2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/assembly.h>
38 #include <mono/metadata/attrdefs.h>
39 #include <mono/metadata/loader.h>
40 #include <mono/metadata/tabledefs.h>
41 #include <mono/metadata/class.h>
42 #include <mono/metadata/object.h>
43 #include <mono/metadata/exception.h>
44 #include <mono/metadata/opcodes.h>
45 #include <mono/metadata/mono-endian.h>
46 #include <mono/metadata/tokentype.h>
47 #include <mono/metadata/tabledefs.h>
48 #include <mono/metadata/marshal.h>
49 #include <mono/metadata/debug-helpers.h>
50 #include <mono/metadata/mono-debug.h>
51 #include <mono/metadata/gc-internal.h>
52 #include <mono/metadata/security-manager.h>
53 #include <mono/metadata/threads-types.h>
54 #include <mono/metadata/security-core-clr.h>
55 #include <mono/metadata/monitor.h>
56 #include <mono/metadata/profiler-private.h>
57 #include <mono/metadata/profiler.h>
58 #include <mono/metadata/debug-mono-symfile.h>
59 #include <mono/utils/mono-compiler.h>
60 #include <mono/utils/mono-memory-model.h>
61 #include <mono/metadata/mono-basic-block.h>
68 #include "jit-icalls.h"
70 #include "debugger-agent.h"
72 #define BRANCH_COST 10
73 #define INLINE_LENGTH_LIMIT 20
74 #define INLINE_FAILURE(msg) do { \
75 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE)) { \
76 if (cfg->verbose_level >= 2) \
77 printf ("inline failed: %s\n", msg); \
78 goto inline_failure; \
81 #define CHECK_CFG_EXCEPTION do {\
82 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
85 #define METHOD_ACCESS_FAILURE do { \
86 char *method_fname = mono_method_full_name (method, TRUE); \
87 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
88 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS); \
89 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
90 g_free (method_fname); \
91 g_free (cil_method_fname); \
92 goto exception_exit; \
94 #define FIELD_ACCESS_FAILURE do { \
95 char *method_fname = mono_method_full_name (method, TRUE); \
96 char *field_fname = mono_field_full_name (field); \
97 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS); \
98 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
99 g_free (method_fname); \
100 g_free (field_fname); \
101 goto exception_exit; \
103 #define GENERIC_SHARING_FAILURE(opcode) do { \
104 if (cfg->generic_sharing_context) { \
105 if (cfg->verbose_level > 2) \
106 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
107 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
108 goto exception_exit; \
111 #define GSHAREDVT_FAILURE(opcode) do { \
112 if (cfg->gsharedvt) { \
113 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __FILE__, __LINE__); \
114 if (cfg->verbose_level >= 2) \
115 printf ("%s\n", cfg->exception_message); \
116 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
117 goto exception_exit; \
120 #define OUT_OF_MEMORY_FAILURE do { \
121 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
122 goto exception_exit; \
124 /* Determine whenever 'ins' represents a load of the 'this' argument */
125 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
127 static int ldind_to_load_membase (int opcode);
128 static int stind_to_store_membase (int opcode);
130 int mono_op_to_op_imm (int opcode);
131 int mono_op_to_op_imm_noemul (int opcode);
133 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
135 /* helper methods signatures */
136 static MonoMethodSignature *helper_sig_class_init_trampoline = NULL;
137 static MonoMethodSignature *helper_sig_domain_get = NULL;
138 static MonoMethodSignature *helper_sig_generic_class_init_trampoline = NULL;
139 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm = NULL;
140 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline = NULL;
141 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline = NULL;
142 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm = NULL;
145 * Instruction metadata
153 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
154 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
160 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
165 /* keep in sync with the enum in mini.h */
168 #include "mini-ops.h"
173 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
174 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
176 * This should contain the index of the last sreg + 1. This is not the same
177 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
179 const gint8 ins_sreg_counts[] = {
180 #include "mini-ops.h"
185 #define MONO_INIT_VARINFO(vi,id) do { \
186 (vi)->range.first_use.pos.bid = 0xffff; \
192 mono_inst_set_src_registers (MonoInst *ins, int *regs)
194 ins->sreg1 = regs [0];
195 ins->sreg2 = regs [1];
196 ins->sreg3 = regs [2];
200 mono_alloc_ireg (MonoCompile *cfg)
202 return alloc_ireg (cfg);
206 mono_alloc_freg (MonoCompile *cfg)
208 return alloc_freg (cfg);
212 mono_alloc_preg (MonoCompile *cfg)
214 return alloc_preg (cfg);
218 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
220 return alloc_dreg (cfg, stack_type);
224 * mono_alloc_ireg_ref:
226 * Allocate an IREG, and mark it as holding a GC ref.
229 mono_alloc_ireg_ref (MonoCompile *cfg)
231 return alloc_ireg_ref (cfg);
235 * mono_alloc_ireg_mp:
237 * Allocate an IREG, and mark it as holding a managed pointer.
240 mono_alloc_ireg_mp (MonoCompile *cfg)
242 return alloc_ireg_mp (cfg);
246 * mono_alloc_ireg_copy:
248 * Allocate an IREG with the same GC type as VREG.
251 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
253 if (vreg_is_ref (cfg, vreg))
254 return alloc_ireg_ref (cfg);
255 else if (vreg_is_mp (cfg, vreg))
256 return alloc_ireg_mp (cfg);
258 return alloc_ireg (cfg);
262 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
267 type = mini_replace_type (type);
269 switch (type->type) {
272 case MONO_TYPE_BOOLEAN:
284 case MONO_TYPE_FNPTR:
286 case MONO_TYPE_CLASS:
287 case MONO_TYPE_STRING:
288 case MONO_TYPE_OBJECT:
289 case MONO_TYPE_SZARRAY:
290 case MONO_TYPE_ARRAY:
294 #if SIZEOF_REGISTER == 8
303 case MONO_TYPE_VALUETYPE:
304 if (type->data.klass->enumtype) {
305 type = mono_class_enum_basetype (type->data.klass);
308 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
311 case MONO_TYPE_TYPEDBYREF:
313 case MONO_TYPE_GENERICINST:
314 type = &type->data.generic_class->container_class->byval_arg;
318 g_assert (cfg->generic_sharing_context);
319 if (mini_type_var_is_vt (cfg, type))
324 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
330 mono_print_bb (MonoBasicBlock *bb, const char *msg)
335 printf ("\n%s %d: [IN: ", msg, bb->block_num);
336 for (i = 0; i < bb->in_count; ++i)
337 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
339 for (i = 0; i < bb->out_count; ++i)
340 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
342 for (tree = bb->code; tree; tree = tree->next)
343 mono_print_ins_index (-1, tree);
347 mono_create_helper_signatures (void)
349 helper_sig_domain_get = mono_create_icall_signature ("ptr");
350 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
351 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
352 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
353 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
354 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
355 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
359 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
360 * foo<T> (int i) { ldarg.0; box T; }
362 #define UNVERIFIED do { \
363 if (cfg->gsharedvt) { \
364 if (cfg->verbose_level > 2) \
365 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
366 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
367 goto exception_exit; \
369 if (mini_get_debug_options ()->break_on_unverified) \
375 #define LOAD_ERROR do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto load_error; } while (0)
377 #define TYPE_LOAD_ERROR(klass) do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else { cfg->exception_ptr = klass; goto load_error; } } while (0)
379 #define GET_BBLOCK(cfg,tblock,ip) do { \
380 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
382 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
383 NEW_BBLOCK (cfg, (tblock)); \
384 (tblock)->cil_code = (ip); \
385 ADD_BBLOCK (cfg, (tblock)); \
389 #if defined(TARGET_X86) || defined(TARGET_AMD64)
390 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
391 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
392 (dest)->dreg = alloc_ireg_mp ((cfg)); \
393 (dest)->sreg1 = (sr1); \
394 (dest)->sreg2 = (sr2); \
395 (dest)->inst_imm = (imm); \
396 (dest)->backend.shift_amount = (shift); \
397 MONO_ADD_INS ((cfg)->cbb, (dest)); \
401 #if SIZEOF_REGISTER == 8
402 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
403 /* FIXME: Need to add many more cases */ \
404 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
406 int dr = alloc_preg (cfg); \
407 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
408 (ins)->sreg2 = widen->dreg; \
412 #define ADD_WIDEN_OP(ins, arg1, arg2)
415 #define ADD_BINOP(op) do { \
416 MONO_INST_NEW (cfg, ins, (op)); \
418 ins->sreg1 = sp [0]->dreg; \
419 ins->sreg2 = sp [1]->dreg; \
420 type_from_op (ins, sp [0], sp [1]); \
422 /* Have to insert a widening op */ \
423 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
424 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
425 MONO_ADD_INS ((cfg)->cbb, (ins)); \
426 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
429 #define ADD_UNOP(op) do { \
430 MONO_INST_NEW (cfg, ins, (op)); \
432 ins->sreg1 = sp [0]->dreg; \
433 type_from_op (ins, sp [0], NULL); \
435 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
436 MONO_ADD_INS ((cfg)->cbb, (ins)); \
437 *sp++ = mono_decompose_opcode (cfg, ins); \
440 #define ADD_BINCOND(next_block) do { \
443 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
444 cmp->sreg1 = sp [0]->dreg; \
445 cmp->sreg2 = sp [1]->dreg; \
446 type_from_op (cmp, sp [0], sp [1]); \
448 type_from_op (ins, sp [0], sp [1]); \
449 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
450 GET_BBLOCK (cfg, tblock, target); \
451 link_bblock (cfg, bblock, tblock); \
452 ins->inst_true_bb = tblock; \
453 if ((next_block)) { \
454 link_bblock (cfg, bblock, (next_block)); \
455 ins->inst_false_bb = (next_block); \
456 start_new_bblock = 1; \
458 GET_BBLOCK (cfg, tblock, ip); \
459 link_bblock (cfg, bblock, tblock); \
460 ins->inst_false_bb = tblock; \
461 start_new_bblock = 2; \
463 if (sp != stack_start) { \
464 handle_stack_args (cfg, stack_start, sp - stack_start); \
465 CHECK_UNVERIFIABLE (cfg); \
467 MONO_ADD_INS (bblock, cmp); \
468 MONO_ADD_INS (bblock, ins); \
472 * link_bblock: Links two basic blocks
474 * links two basic blocks in the control flow graph, the 'from'
475 * argument is the starting block and the 'to' argument is the block
476 * the control flow ends to after 'from'.
479 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
481 MonoBasicBlock **newa;
485 if (from->cil_code) {
487 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
489 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
492 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
494 printf ("edge from entry to exit\n");
499 for (i = 0; i < from->out_count; ++i) {
500 if (to == from->out_bb [i]) {
506 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
507 for (i = 0; i < from->out_count; ++i) {
508 newa [i] = from->out_bb [i];
516 for (i = 0; i < to->in_count; ++i) {
517 if (from == to->in_bb [i]) {
523 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
524 for (i = 0; i < to->in_count; ++i) {
525 newa [i] = to->in_bb [i];
534 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
536 link_bblock (cfg, from, to);
540 * mono_find_block_region:
542 * We mark each basic block with a region ID. We use that to avoid BB
543 * optimizations when blocks are in different regions.
546 * A region token that encodes where this region is, and information
547 * about the clause owner for this block.
549 * The region encodes the try/catch/filter clause that owns this block
550 * as well as the type. -1 is a special value that represents a block
551 * that is in none of try/catch/filter.
554 mono_find_block_region (MonoCompile *cfg, int offset)
556 MonoMethodHeader *header = cfg->header;
557 MonoExceptionClause *clause;
560 for (i = 0; i < header->num_clauses; ++i) {
561 clause = &header->clauses [i];
562 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
563 (offset < (clause->handler_offset)))
564 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
566 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
567 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
568 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
569 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
570 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
572 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
575 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
576 return ((i + 1) << 8) | clause->flags;
583 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
585 MonoMethodHeader *header = cfg->header;
586 MonoExceptionClause *clause;
590 for (i = 0; i < header->num_clauses; ++i) {
591 clause = &header->clauses [i];
592 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
593 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
594 if (clause->flags == type)
595 res = g_list_append (res, clause);
602 mono_create_spvar_for_region (MonoCompile *cfg, int region)
606 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
610 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
611 /* prevent it from being register allocated */
612 var->flags |= MONO_INST_VOLATILE;
614 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
618 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
620 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
624 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
628 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
632 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
633 /* prevent it from being register allocated */
634 var->flags |= MONO_INST_VOLATILE;
636 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
642 * Returns the type used in the eval stack when @type is loaded.
643 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
646 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
650 type = mini_replace_type (type);
651 inst->klass = klass = mono_class_from_mono_type (type);
653 inst->type = STACK_MP;
658 switch (type->type) {
660 inst->type = STACK_INV;
664 case MONO_TYPE_BOOLEAN:
670 inst->type = STACK_I4;
675 case MONO_TYPE_FNPTR:
676 inst->type = STACK_PTR;
678 case MONO_TYPE_CLASS:
679 case MONO_TYPE_STRING:
680 case MONO_TYPE_OBJECT:
681 case MONO_TYPE_SZARRAY:
682 case MONO_TYPE_ARRAY:
683 inst->type = STACK_OBJ;
687 inst->type = STACK_I8;
691 inst->type = STACK_R8;
693 case MONO_TYPE_VALUETYPE:
694 if (type->data.klass->enumtype) {
695 type = mono_class_enum_basetype (type->data.klass);
699 inst->type = STACK_VTYPE;
702 case MONO_TYPE_TYPEDBYREF:
703 inst->klass = mono_defaults.typed_reference_class;
704 inst->type = STACK_VTYPE;
706 case MONO_TYPE_GENERICINST:
707 type = &type->data.generic_class->container_class->byval_arg;
711 g_assert (cfg->generic_sharing_context);
712 if (mini_is_gsharedvt_type (cfg, type)) {
713 g_assert (cfg->gsharedvt);
714 inst->type = STACK_VTYPE;
716 inst->type = STACK_OBJ;
720 g_error ("unknown type 0x%02x in eval stack type", type->type);
725 * The following tables are used to quickly validate the IL code in type_from_op ().
728 bin_num_table [STACK_MAX] [STACK_MAX] = {
729 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
730 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
731 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
732 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
733 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
734 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
735 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
736 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
741 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
744 /* reduce the size of this table */
746 bin_int_table [STACK_MAX] [STACK_MAX] = {
747 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
748 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
749 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
750 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
751 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
752 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
753 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
754 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
758 bin_comp_table [STACK_MAX] [STACK_MAX] = {
759 /* Inv i L p F & O vt */
761 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
762 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
763 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
764 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
765 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
766 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
767 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
770 /* reduce the size of this table */
772 shift_table [STACK_MAX] [STACK_MAX] = {
773 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
774 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
775 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
776 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
777 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
778 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
779 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
780 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
784 * Tables to map from the non-specific opcode to the matching
785 * type-specific opcode.
787 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
789 binops_op_map [STACK_MAX] = {
790 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
793 /* handles from CEE_NEG to CEE_CONV_U8 */
795 unops_op_map [STACK_MAX] = {
796 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
799 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
801 ovfops_op_map [STACK_MAX] = {
802 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
805 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
807 ovf2ops_op_map [STACK_MAX] = {
808 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
811 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
813 ovf3ops_op_map [STACK_MAX] = {
814 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
817 /* handles from CEE_BEQ to CEE_BLT_UN */
819 beqops_op_map [STACK_MAX] = {
820 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
823 /* handles from CEE_CEQ to CEE_CLT_UN */
825 ceqops_op_map [STACK_MAX] = {
826 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
830 * Sets ins->type (the type on the eval stack) according to the
831 * type of the opcode and the arguments to it.
832 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
834 * FIXME: this function sets ins->type unconditionally in some cases, but
835 * it should set it to invalid for some types (a conv.x on an object)
838 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
840 switch (ins->opcode) {
847 /* FIXME: check unverifiable args for STACK_MP */
848 ins->type = bin_num_table [src1->type] [src2->type];
849 ins->opcode += binops_op_map [ins->type];
856 ins->type = bin_int_table [src1->type] [src2->type];
857 ins->opcode += binops_op_map [ins->type];
862 ins->type = shift_table [src1->type] [src2->type];
863 ins->opcode += binops_op_map [ins->type];
868 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
869 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
870 ins->opcode = OP_LCOMPARE;
871 else if (src1->type == STACK_R8)
872 ins->opcode = OP_FCOMPARE;
874 ins->opcode = OP_ICOMPARE;
876 case OP_ICOMPARE_IMM:
877 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
878 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
879 ins->opcode = OP_LCOMPARE_IMM;
891 ins->opcode += beqops_op_map [src1->type];
894 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
895 ins->opcode += ceqops_op_map [src1->type];
901 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
902 ins->opcode += ceqops_op_map [src1->type];
906 ins->type = neg_table [src1->type];
907 ins->opcode += unops_op_map [ins->type];
910 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
911 ins->type = src1->type;
913 ins->type = STACK_INV;
914 ins->opcode += unops_op_map [ins->type];
920 ins->type = STACK_I4;
921 ins->opcode += unops_op_map [src1->type];
924 ins->type = STACK_R8;
925 switch (src1->type) {
928 ins->opcode = OP_ICONV_TO_R_UN;
931 ins->opcode = OP_LCONV_TO_R_UN;
935 case CEE_CONV_OVF_I1:
936 case CEE_CONV_OVF_U1:
937 case CEE_CONV_OVF_I2:
938 case CEE_CONV_OVF_U2:
939 case CEE_CONV_OVF_I4:
940 case CEE_CONV_OVF_U4:
941 ins->type = STACK_I4;
942 ins->opcode += ovf3ops_op_map [src1->type];
944 case CEE_CONV_OVF_I_UN:
945 case CEE_CONV_OVF_U_UN:
946 ins->type = STACK_PTR;
947 ins->opcode += ovf2ops_op_map [src1->type];
949 case CEE_CONV_OVF_I1_UN:
950 case CEE_CONV_OVF_I2_UN:
951 case CEE_CONV_OVF_I4_UN:
952 case CEE_CONV_OVF_U1_UN:
953 case CEE_CONV_OVF_U2_UN:
954 case CEE_CONV_OVF_U4_UN:
955 ins->type = STACK_I4;
956 ins->opcode += ovf2ops_op_map [src1->type];
959 ins->type = STACK_PTR;
960 switch (src1->type) {
962 ins->opcode = OP_ICONV_TO_U;
966 #if SIZEOF_VOID_P == 8
967 ins->opcode = OP_LCONV_TO_U;
969 ins->opcode = OP_MOVE;
973 ins->opcode = OP_LCONV_TO_U;
976 ins->opcode = OP_FCONV_TO_U;
982 ins->type = STACK_I8;
983 ins->opcode += unops_op_map [src1->type];
985 case CEE_CONV_OVF_I8:
986 case CEE_CONV_OVF_U8:
987 ins->type = STACK_I8;
988 ins->opcode += ovf3ops_op_map [src1->type];
990 case CEE_CONV_OVF_U8_UN:
991 case CEE_CONV_OVF_I8_UN:
992 ins->type = STACK_I8;
993 ins->opcode += ovf2ops_op_map [src1->type];
997 ins->type = STACK_R8;
998 ins->opcode += unops_op_map [src1->type];
1001 ins->type = STACK_R8;
1005 ins->type = STACK_I4;
1006 ins->opcode += ovfops_op_map [src1->type];
1009 case CEE_CONV_OVF_I:
1010 case CEE_CONV_OVF_U:
1011 ins->type = STACK_PTR;
1012 ins->opcode += ovfops_op_map [src1->type];
1015 case CEE_ADD_OVF_UN:
1017 case CEE_MUL_OVF_UN:
1019 case CEE_SUB_OVF_UN:
1020 ins->type = bin_num_table [src1->type] [src2->type];
1021 ins->opcode += ovfops_op_map [src1->type];
1022 if (ins->type == STACK_R8)
1023 ins->type = STACK_INV;
1025 case OP_LOAD_MEMBASE:
1026 ins->type = STACK_PTR;
1028 case OP_LOADI1_MEMBASE:
1029 case OP_LOADU1_MEMBASE:
1030 case OP_LOADI2_MEMBASE:
1031 case OP_LOADU2_MEMBASE:
1032 case OP_LOADI4_MEMBASE:
1033 case OP_LOADU4_MEMBASE:
1034 ins->type = STACK_PTR;
1036 case OP_LOADI8_MEMBASE:
1037 ins->type = STACK_I8;
1039 case OP_LOADR4_MEMBASE:
1040 case OP_LOADR8_MEMBASE:
1041 ins->type = STACK_R8;
1044 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1048 if (ins->type == STACK_MP)
1049 ins->klass = mono_defaults.object_class;
1054 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1060 param_table [STACK_MAX] [STACK_MAX] = {
1065 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1069 switch (args->type) {
1079 for (i = 0; i < sig->param_count; ++i) {
1080 switch (args [i].type) {
1084 if (!sig->params [i]->byref)
1088 if (sig->params [i]->byref)
1090 switch (sig->params [i]->type) {
1091 case MONO_TYPE_CLASS:
1092 case MONO_TYPE_STRING:
1093 case MONO_TYPE_OBJECT:
1094 case MONO_TYPE_SZARRAY:
1095 case MONO_TYPE_ARRAY:
1102 if (sig->params [i]->byref)
1104 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1113 /*if (!param_table [args [i].type] [sig->params [i]->type])
1121 * When we need a pointer to the current domain many times in a method, we
1122 * call mono_domain_get() once and we store the result in a local variable.
1123 * This function returns the variable that represents the MonoDomain*.
1125 inline static MonoInst *
1126 mono_get_domainvar (MonoCompile *cfg)
1128 if (!cfg->domainvar)
1129 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1130 return cfg->domainvar;
1134 * The got_var contains the address of the Global Offset Table when AOT
1138 mono_get_got_var (MonoCompile *cfg)
1140 #ifdef MONO_ARCH_NEED_GOT_VAR
1141 if (!cfg->compile_aot)
1143 if (!cfg->got_var) {
1144 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1146 return cfg->got_var;
1153 mono_get_vtable_var (MonoCompile *cfg)
1155 g_assert (cfg->generic_sharing_context);
1157 if (!cfg->rgctx_var) {
1158 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1159 /* force the var to be stack allocated */
1160 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1163 return cfg->rgctx_var;
1167 type_from_stack_type (MonoInst *ins) {
1168 switch (ins->type) {
1169 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1170 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1171 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1172 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1174 return &ins->klass->this_arg;
1175 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1176 case STACK_VTYPE: return &ins->klass->byval_arg;
1178 g_error ("stack type %d to monotype not handled\n", ins->type);
1183 static G_GNUC_UNUSED int
1184 type_to_stack_type (MonoType *t)
1186 t = mono_type_get_underlying_type (t);
1190 case MONO_TYPE_BOOLEAN:
1193 case MONO_TYPE_CHAR:
1200 case MONO_TYPE_FNPTR:
1202 case MONO_TYPE_CLASS:
1203 case MONO_TYPE_STRING:
1204 case MONO_TYPE_OBJECT:
1205 case MONO_TYPE_SZARRAY:
1206 case MONO_TYPE_ARRAY:
1214 case MONO_TYPE_VALUETYPE:
1215 case MONO_TYPE_TYPEDBYREF:
1217 case MONO_TYPE_GENERICINST:
1218 if (mono_type_generic_inst_is_valuetype (t))
1224 g_assert_not_reached ();
1231 array_access_to_klass (int opcode)
1235 return mono_defaults.byte_class;
1237 return mono_defaults.uint16_class;
1240 return mono_defaults.int_class;
1243 return mono_defaults.sbyte_class;
1246 return mono_defaults.int16_class;
1249 return mono_defaults.int32_class;
1251 return mono_defaults.uint32_class;
1254 return mono_defaults.int64_class;
1257 return mono_defaults.single_class;
1260 return mono_defaults.double_class;
1261 case CEE_LDELEM_REF:
1262 case CEE_STELEM_REF:
1263 return mono_defaults.object_class;
1265 g_assert_not_reached ();
1271 * We try to share variables when possible
1274 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1279 /* inlining can result in deeper stacks */
1280 if (slot >= cfg->header->max_stack)
1281 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1283 pos = ins->type - 1 + slot * STACK_MAX;
1285 switch (ins->type) {
1292 if ((vnum = cfg->intvars [pos]))
1293 return cfg->varinfo [vnum];
1294 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1295 cfg->intvars [pos] = res->inst_c0;
1298 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1304 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1307 * Don't use this if a generic_context is set, since that means AOT can't
1308 * look up the method using just the image+token.
1309 * table == 0 means this is a reference made from a wrapper.
1311 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1312 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1313 jump_info_token->image = image;
1314 jump_info_token->token = token;
1315 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1320 * This function is called to handle items that are left on the evaluation stack
1321 * at basic block boundaries. What happens is that we save the values to local variables
1322 * and we reload them later when first entering the target basic block (with the
1323 * handle_loaded_temps () function).
1324 * A single joint point will use the same variables (stored in the array bb->out_stack or
1325 * bb->in_stack, if the basic block is before or after the joint point).
1327 * This function needs to be called _before_ emitting the last instruction of
1328 * the bb (i.e. before emitting a branch).
1329 * If the stack merge fails at a join point, cfg->unverifiable is set.
1332 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1335 MonoBasicBlock *bb = cfg->cbb;
1336 MonoBasicBlock *outb;
1337 MonoInst *inst, **locals;
1342 if (cfg->verbose_level > 3)
1343 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1344 if (!bb->out_scount) {
1345 bb->out_scount = count;
1346 //printf ("bblock %d has out:", bb->block_num);
1348 for (i = 0; i < bb->out_count; ++i) {
1349 outb = bb->out_bb [i];
1350 /* exception handlers are linked, but they should not be considered for stack args */
1351 if (outb->flags & BB_EXCEPTION_HANDLER)
1353 //printf (" %d", outb->block_num);
1354 if (outb->in_stack) {
1356 bb->out_stack = outb->in_stack;
1362 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1363 for (i = 0; i < count; ++i) {
1365 * try to reuse temps already allocated for this purpouse, if they occupy the same
1366 * stack slot and if they are of the same type.
1367 * This won't cause conflicts since if 'local' is used to
1368 * store one of the values in the in_stack of a bblock, then
1369 * the same variable will be used for the same outgoing stack
1371 * This doesn't work when inlining methods, since the bblocks
1372 * in the inlined methods do not inherit their in_stack from
1373 * the bblock they are inlined to. See bug #58863 for an
1376 if (cfg->inlined_method)
1377 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1379 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1384 for (i = 0; i < bb->out_count; ++i) {
1385 outb = bb->out_bb [i];
1386 /* exception handlers are linked, but they should not be considered for stack args */
1387 if (outb->flags & BB_EXCEPTION_HANDLER)
1389 if (outb->in_scount) {
1390 if (outb->in_scount != bb->out_scount) {
1391 cfg->unverifiable = TRUE;
1394 continue; /* check they are the same locals */
1396 outb->in_scount = count;
1397 outb->in_stack = bb->out_stack;
1400 locals = bb->out_stack;
1402 for (i = 0; i < count; ++i) {
1403 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1404 inst->cil_code = sp [i]->cil_code;
1405 sp [i] = locals [i];
1406 if (cfg->verbose_level > 3)
1407 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1411 * It is possible that the out bblocks already have in_stack assigned, and
1412 * the in_stacks differ. In this case, we will store to all the different
1419 /* Find a bblock which has a different in_stack */
1421 while (bindex < bb->out_count) {
1422 outb = bb->out_bb [bindex];
1423 /* exception handlers are linked, but they should not be considered for stack args */
1424 if (outb->flags & BB_EXCEPTION_HANDLER) {
1428 if (outb->in_stack != locals) {
1429 for (i = 0; i < count; ++i) {
1430 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1431 inst->cil_code = sp [i]->cil_code;
1432 sp [i] = locals [i];
1433 if (cfg->verbose_level > 3)
1434 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1436 locals = outb->in_stack;
1445 /* Emit code which loads interface_offsets [klass->interface_id]
1446 * The array is stored in memory before vtable.
1449 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1451 if (cfg->compile_aot) {
1452 int ioffset_reg = alloc_preg (cfg);
1453 int iid_reg = alloc_preg (cfg);
1455 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1456 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1457 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1460 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1465 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1467 int ibitmap_reg = alloc_preg (cfg);
1468 #ifdef COMPRESSED_INTERFACE_BITMAP
1470 MonoInst *res, *ins;
1471 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1472 MONO_ADD_INS (cfg->cbb, ins);
1474 if (cfg->compile_aot)
1475 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1477 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1478 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1479 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1481 int ibitmap_byte_reg = alloc_preg (cfg);
1483 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1485 if (cfg->compile_aot) {
1486 int iid_reg = alloc_preg (cfg);
1487 int shifted_iid_reg = alloc_preg (cfg);
1488 int ibitmap_byte_address_reg = alloc_preg (cfg);
1489 int masked_iid_reg = alloc_preg (cfg);
1490 int iid_one_bit_reg = alloc_preg (cfg);
1491 int iid_bit_reg = alloc_preg (cfg);
1492 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1493 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1494 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1495 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1496 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1497 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1498 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1499 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1501 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1502 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1508 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1509 * stored in "klass_reg" implements the interface "klass".
1512 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1514 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1518 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1519 * stored in "vtable_reg" implements the interface "klass".
1522 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1524 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1528 * Emit code which checks whenever the interface id of @klass is smaller than
1529 * than the value given by max_iid_reg.
1532 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1533 MonoBasicBlock *false_target)
1535 if (cfg->compile_aot) {
1536 int iid_reg = alloc_preg (cfg);
1537 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1538 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1541 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1543 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1545 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1548 /* Same as above, but obtains max_iid from a vtable */
1550 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1551 MonoBasicBlock *false_target)
1553 int max_iid_reg = alloc_preg (cfg);
1555 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1556 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1559 /* Same as above, but obtains max_iid from a klass */
1561 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1562 MonoBasicBlock *false_target)
1564 int max_iid_reg = alloc_preg (cfg);
1566 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1567 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1571 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1573 int idepth_reg = alloc_preg (cfg);
1574 int stypes_reg = alloc_preg (cfg);
1575 int stype = alloc_preg (cfg);
1577 mono_class_setup_supertypes (klass);
1579 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1580 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1581 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1582 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1584 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1585 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1587 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1588 } else if (cfg->compile_aot) {
1589 int const_reg = alloc_preg (cfg);
1590 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1591 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1593 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1595 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1599 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1601 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1605 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1607 int intf_reg = alloc_preg (cfg);
1609 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1610 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1611 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1613 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1615 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1619 * Variant of the above that takes a register to the class, not the vtable.
1622 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1624 int intf_bit_reg = alloc_preg (cfg);
1626 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1627 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1628 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1630 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1632 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1636 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1639 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1640 } else if (cfg->compile_aot) {
1641 int const_reg = alloc_preg (cfg);
1642 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1643 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1645 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1647 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1651 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1653 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1657 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1659 if (cfg->compile_aot) {
1660 int const_reg = alloc_preg (cfg);
1661 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1662 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1664 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1666 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1670 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1673 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1676 int rank_reg = alloc_preg (cfg);
1677 int eclass_reg = alloc_preg (cfg);
1679 g_assert (!klass_inst);
1680 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1681 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1682 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1683 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1684 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1685 if (klass->cast_class == mono_defaults.object_class) {
1686 int parent_reg = alloc_preg (cfg);
1687 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1688 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1689 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1690 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1691 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1692 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1693 } else if (klass->cast_class == mono_defaults.enum_class) {
1694 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1695 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1696 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1698 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1699 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1702 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1703 /* Check that the object is a vector too */
1704 int bounds_reg = alloc_preg (cfg);
1705 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1706 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1707 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1710 int idepth_reg = alloc_preg (cfg);
1711 int stypes_reg = alloc_preg (cfg);
1712 int stype = alloc_preg (cfg);
1714 mono_class_setup_supertypes (klass);
1716 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1717 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1718 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1719 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1721 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1722 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1723 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1728 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1730 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1734 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1738 g_assert (val == 0);
1743 if ((size <= 4) && (size <= align)) {
1746 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1749 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1752 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1754 #if SIZEOF_REGISTER == 8
1756 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1762 val_reg = alloc_preg (cfg);
1764 if (SIZEOF_REGISTER == 8)
1765 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1767 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1770 /* This could be optimized further if neccesary */
1772 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1779 #if !NO_UNALIGNED_ACCESS
1780 if (SIZEOF_REGISTER == 8) {
1782 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1787 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1795 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1800 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1805 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1812 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1819 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1820 g_assert (size < 10000);
1823 /* This could be optimized further if neccesary */
1825 cur_reg = alloc_preg (cfg);
1826 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1827 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1834 #if !NO_UNALIGNED_ACCESS
1835 if (SIZEOF_REGISTER == 8) {
1837 cur_reg = alloc_preg (cfg);
1838 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1839 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1848 cur_reg = alloc_preg (cfg);
1849 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1850 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1856 cur_reg = alloc_preg (cfg);
1857 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1858 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1864 cur_reg = alloc_preg (cfg);
1865 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1866 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1874 emit_tls_set (MonoCompile *cfg, int sreg1, int tls_key)
1878 if (cfg->compile_aot) {
1879 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1880 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1882 ins->sreg2 = c->dreg;
1883 MONO_ADD_INS (cfg->cbb, ins);
1885 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1887 ins->inst_offset = mini_get_tls_offset (tls_key);
1888 MONO_ADD_INS (cfg->cbb, ins);
1895 * Emit IR to push the current LMF onto the LMF stack.
1898 emit_push_lmf (MonoCompile *cfg)
1901 * Emit IR to push the LMF:
1902 * lmf_addr = <lmf_addr from tls>
1903 * lmf->lmf_addr = lmf_addr
1904 * lmf->prev_lmf = *lmf_addr
1907 int lmf_reg, prev_lmf_reg;
1908 MonoInst *ins, *lmf_ins;
1913 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
1914 /* Load current lmf */
1915 lmf_ins = mono_get_lmf_intrinsic (cfg);
1917 MONO_ADD_INS (cfg->cbb, lmf_ins);
1918 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1919 lmf_reg = ins->dreg;
1920 /* Save previous_lmf */
1921 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
1923 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
1926 * Store lmf_addr in a variable, so it can be allocated to a global register.
1928 if (!cfg->lmf_addr_var)
1929 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1931 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
1933 MONO_ADD_INS (cfg->cbb, lmf_ins);
1935 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
1936 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
1938 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1939 lmf_reg = ins->dreg;
1941 prev_lmf_reg = alloc_preg (cfg);
1942 /* Save previous_lmf */
1943 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
1944 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
1946 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
1953 * Emit IR to pop the current LMF from the LMF stack.
1956 emit_pop_lmf (MonoCompile *cfg)
1958 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
1964 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1965 lmf_reg = ins->dreg;
1967 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
1968 /* Load previous_lmf */
1969 prev_lmf_reg = alloc_preg (cfg);
1970 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
1972 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
1975 * Emit IR to pop the LMF:
1976 * *(lmf->lmf_addr) = lmf->prev_lmf
1978 /* This could be called before emit_push_lmf () */
1979 if (!cfg->lmf_addr_var)
1980 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1981 lmf_addr_reg = cfg->lmf_addr_var->dreg;
1983 prev_lmf_reg = alloc_preg (cfg);
1984 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
1985 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
1990 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1993 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1996 type = mini_get_basic_type_from_generic (gsctx, type);
1997 switch (type->type) {
1998 case MONO_TYPE_VOID:
1999 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2002 case MONO_TYPE_BOOLEAN:
2005 case MONO_TYPE_CHAR:
2008 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2012 case MONO_TYPE_FNPTR:
2013 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2014 case MONO_TYPE_CLASS:
2015 case MONO_TYPE_STRING:
2016 case MONO_TYPE_OBJECT:
2017 case MONO_TYPE_SZARRAY:
2018 case MONO_TYPE_ARRAY:
2019 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2022 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2025 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2026 case MONO_TYPE_VALUETYPE:
2027 if (type->data.klass->enumtype) {
2028 type = mono_class_enum_basetype (type->data.klass);
2031 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2032 case MONO_TYPE_TYPEDBYREF:
2033 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2034 case MONO_TYPE_GENERICINST:
2035 type = &type->data.generic_class->container_class->byval_arg;
2038 case MONO_TYPE_MVAR:
2040 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2042 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2048 * target_type_is_incompatible:
2049 * @cfg: MonoCompile context
2051 * Check that the item @arg on the evaluation stack can be stored
2052 * in the target type (can be a local, or field, etc).
2053 * The cfg arg can be used to check if we need verification or just
2056 * Returns: non-0 value if arg can't be stored on a target.
2059 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2061 MonoType *simple_type;
2064 target = mini_replace_type (target);
2065 if (target->byref) {
2066 /* FIXME: check that the pointed to types match */
2067 if (arg->type == STACK_MP)
2068 return arg->klass != mono_class_from_mono_type (target);
2069 if (arg->type == STACK_PTR)
2074 simple_type = mono_type_get_underlying_type (target);
2075 switch (simple_type->type) {
2076 case MONO_TYPE_VOID:
2080 case MONO_TYPE_BOOLEAN:
2083 case MONO_TYPE_CHAR:
2086 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2090 /* STACK_MP is needed when setting pinned locals */
2091 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2096 case MONO_TYPE_FNPTR:
2098 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2099 * in native int. (#688008).
2101 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2104 case MONO_TYPE_CLASS:
2105 case MONO_TYPE_STRING:
2106 case MONO_TYPE_OBJECT:
2107 case MONO_TYPE_SZARRAY:
2108 case MONO_TYPE_ARRAY:
2109 if (arg->type != STACK_OBJ)
2111 /* FIXME: check type compatibility */
2115 if (arg->type != STACK_I8)
2120 if (arg->type != STACK_R8)
2123 case MONO_TYPE_VALUETYPE:
2124 if (arg->type != STACK_VTYPE)
2126 klass = mono_class_from_mono_type (simple_type);
2127 if (klass != arg->klass)
2130 case MONO_TYPE_TYPEDBYREF:
2131 if (arg->type != STACK_VTYPE)
2133 klass = mono_class_from_mono_type (simple_type);
2134 if (klass != arg->klass)
2137 case MONO_TYPE_GENERICINST:
2138 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2139 if (arg->type != STACK_VTYPE)
2141 klass = mono_class_from_mono_type (simple_type);
2142 if (klass != arg->klass)
2146 if (arg->type != STACK_OBJ)
2148 /* FIXME: check type compatibility */
2152 case MONO_TYPE_MVAR:
2153 g_assert (cfg->generic_sharing_context);
2154 if (mini_type_var_is_vt (cfg, simple_type)) {
2155 if (arg->type != STACK_VTYPE)
2158 if (arg->type != STACK_OBJ)
2163 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2169 * Prepare arguments for passing to a function call.
2170 * Return a non-zero value if the arguments can't be passed to the given
2172 * The type checks are not yet complete and some conversions may need
2173 * casts on 32 or 64 bit architectures.
2175 * FIXME: implement this using target_type_is_incompatible ()
2178 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2180 MonoType *simple_type;
2184 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2188 for (i = 0; i < sig->param_count; ++i) {
2189 if (sig->params [i]->byref) {
2190 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2194 simple_type = sig->params [i];
2195 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
2197 switch (simple_type->type) {
2198 case MONO_TYPE_VOID:
2203 case MONO_TYPE_BOOLEAN:
2206 case MONO_TYPE_CHAR:
2209 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2215 case MONO_TYPE_FNPTR:
2216 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2219 case MONO_TYPE_CLASS:
2220 case MONO_TYPE_STRING:
2221 case MONO_TYPE_OBJECT:
2222 case MONO_TYPE_SZARRAY:
2223 case MONO_TYPE_ARRAY:
2224 if (args [i]->type != STACK_OBJ)
2229 if (args [i]->type != STACK_I8)
2234 if (args [i]->type != STACK_R8)
2237 case MONO_TYPE_VALUETYPE:
2238 if (simple_type->data.klass->enumtype) {
2239 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2242 if (args [i]->type != STACK_VTYPE)
2245 case MONO_TYPE_TYPEDBYREF:
2246 if (args [i]->type != STACK_VTYPE)
2249 case MONO_TYPE_GENERICINST:
2250 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2253 case MONO_TYPE_MVAR:
2255 if (args [i]->type != STACK_VTYPE)
2259 g_error ("unknown type 0x%02x in check_call_signature",
2267 callvirt_to_call (int opcode)
2270 case OP_CALL_MEMBASE:
2272 case OP_VOIDCALL_MEMBASE:
2274 case OP_FCALL_MEMBASE:
2276 case OP_VCALL_MEMBASE:
2278 case OP_LCALL_MEMBASE:
2281 g_assert_not_reached ();
2287 #ifdef MONO_ARCH_HAVE_IMT
2288 /* Either METHOD or IMT_ARG needs to be set */
2290 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2294 if (COMPILE_LLVM (cfg)) {
2295 method_reg = alloc_preg (cfg);
2298 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2299 } else if (cfg->compile_aot) {
2300 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2303 MONO_INST_NEW (cfg, ins, OP_PCONST);
2304 ins->inst_p0 = method;
2305 ins->dreg = method_reg;
2306 MONO_ADD_INS (cfg->cbb, ins);
2310 call->imt_arg_reg = method_reg;
2312 #ifdef MONO_ARCH_IMT_REG
2313 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2315 /* Need this to keep the IMT arg alive */
2316 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2321 #ifdef MONO_ARCH_IMT_REG
2322 method_reg = alloc_preg (cfg);
2325 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2326 } else if (cfg->compile_aot) {
2327 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2330 MONO_INST_NEW (cfg, ins, OP_PCONST);
2331 ins->inst_p0 = method;
2332 ins->dreg = method_reg;
2333 MONO_ADD_INS (cfg->cbb, ins);
2336 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2338 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2343 static MonoJumpInfo *
2344 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2346 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2350 ji->data.target = target;
2356 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2358 if (cfg->generic_sharing_context)
2359 return mono_class_check_context_used (klass);
2365 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2367 if (cfg->generic_sharing_context)
2368 return mono_method_check_context_used (method);
2374 * check_method_sharing:
2376 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2379 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2381 gboolean pass_vtable = FALSE;
2382 gboolean pass_mrgctx = FALSE;
2384 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2385 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2386 gboolean sharable = FALSE;
2388 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2391 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2392 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
2393 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2395 sharable = sharing_enabled && context_sharable;
2399 * Pass vtable iff target method might
2400 * be shared, which means that sharing
2401 * is enabled for its class and its
2402 * context is sharable (and it's not a
2405 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2409 if (mini_method_get_context (cmethod) &&
2410 mini_method_get_context (cmethod)->method_inst) {
2411 g_assert (!pass_vtable);
2413 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2416 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2417 MonoGenericContext *context = mini_method_get_context (cmethod);
2418 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2420 if (sharing_enabled && context_sharable)
2422 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, mono_method_signature (cmethod)))
2427 if (out_pass_vtable)
2428 *out_pass_vtable = pass_vtable;
2429 if (out_pass_mrgctx)
2430 *out_pass_mrgctx = pass_mrgctx;
2433 inline static MonoCallInst *
2434 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2435 MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
2439 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2444 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2446 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2449 call->signature = sig;
2450 call->rgctx_reg = rgctx;
2451 sig_ret = mini_replace_type (sig->ret);
2453 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2456 if (mini_type_is_vtype (cfg, sig_ret)) {
2457 call->vret_var = cfg->vret_addr;
2458 //g_assert_not_reached ();
2460 } else if (mini_type_is_vtype (cfg, sig_ret)) {
2461 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2464 temp->backend.is_pinvoke = sig->pinvoke;
2467 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2468 * address of return value to increase optimization opportunities.
2469 * Before vtype decomposition, the dreg of the call ins itself represents the
2470 * fact the call modifies the return value. After decomposition, the call will
2471 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2472 * will be transformed into an LDADDR.
2474 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2475 loada->dreg = alloc_preg (cfg);
2476 loada->inst_p0 = temp;
2477 /* We reference the call too since call->dreg could change during optimization */
2478 loada->inst_p1 = call;
2479 MONO_ADD_INS (cfg->cbb, loada);
2481 call->inst.dreg = temp->dreg;
2483 call->vret_var = loada;
2484 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2485 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2487 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2488 if (COMPILE_SOFT_FLOAT (cfg)) {
2490 * If the call has a float argument, we would need to do an r8->r4 conversion using
2491 * an icall, but that cannot be done during the call sequence since it would clobber
2492 * the call registers + the stack. So we do it before emitting the call.
2494 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2496 MonoInst *in = call->args [i];
2498 if (i >= sig->hasthis)
2499 t = sig->params [i - sig->hasthis];
2501 t = &mono_defaults.int_class->byval_arg;
2502 t = mono_type_get_underlying_type (t);
2504 if (!t->byref && t->type == MONO_TYPE_R4) {
2505 MonoInst *iargs [1];
2509 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2511 /* The result will be in an int vreg */
2512 call->args [i] = conv;
2518 call->need_unbox_trampoline = unbox_trampoline;
2521 if (COMPILE_LLVM (cfg))
2522 mono_llvm_emit_call (cfg, call);
2524 mono_arch_emit_call (cfg, call);
2526 mono_arch_emit_call (cfg, call);
2529 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2530 cfg->flags |= MONO_CFG_HAS_CALLS;
2536 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2538 #ifdef MONO_ARCH_RGCTX_REG
2539 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2540 cfg->uses_rgctx_reg = TRUE;
2541 call->rgctx_reg = TRUE;
2543 call->rgctx_arg_reg = rgctx_reg;
2550 inline static MonoInst*
2551 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2557 rgctx_reg = mono_alloc_preg (cfg);
2558 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2561 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2563 call->inst.sreg1 = addr->dreg;
2566 emit_imt_argument (cfg, call, NULL, imt_arg);
2568 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2571 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2573 return (MonoInst*)call;
2577 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2580 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2582 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2585 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2586 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2588 #ifndef DISABLE_REMOTING
2589 gboolean might_be_remote = FALSE;
2591 gboolean virtual = this != NULL;
2592 gboolean enable_for_aot = TRUE;
2596 gboolean need_unbox_trampoline;
2599 sig = mono_method_signature (method);
2602 rgctx_reg = mono_alloc_preg (cfg);
2603 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2606 if (method->string_ctor) {
2607 /* Create the real signature */
2608 /* FIXME: Cache these */
2609 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2610 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2615 context_used = mini_method_check_context_used (cfg, method);
2617 #ifndef DISABLE_REMOTING
2618 might_be_remote = this && sig->hasthis &&
2619 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2620 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2622 if (might_be_remote && context_used) {
2625 g_assert (cfg->generic_sharing_context);
2627 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2629 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2633 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2635 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2637 #ifndef DISABLE_REMOTING
2638 if (might_be_remote)
2639 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2642 call->method = method;
2643 call->inst.flags |= MONO_INST_HAS_METHOD;
2644 call->inst.inst_left = this;
2645 call->tail_call = tail;
2648 int vtable_reg, slot_reg, this_reg;
2651 this_reg = this->dreg;
2653 if (ARCH_HAVE_DELEGATE_TRAMPOLINES && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2654 MonoInst *dummy_use;
2656 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2658 /* Make a call to delegate->invoke_impl */
2659 call->inst.inst_basereg = this_reg;
2660 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2661 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2663 /* We must emit a dummy use here because the delegate trampoline will
2664 replace the 'this' argument with the delegate target making this activation
2665 no longer a root for the delegate.
2666 This is an issue for delegates that target collectible code such as dynamic
2667 methods of GC'able assemblies.
2669 For a test case look into #667921.
2671 FIXME: a dummy use is not the best way to do it as the local register allocator
2672 will put it on a caller save register and spil it around the call.
2673 Ideally, we would either put it on a callee save register or only do the store part.
2675 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2677 return (MonoInst*)call;
2680 if ((!cfg->compile_aot || enable_for_aot) &&
2681 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2682 (MONO_METHOD_IS_FINAL (method) &&
2683 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2684 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2686 * the method is not virtual, we just need to ensure this is not null
2687 * and then we can call the method directly.
2689 #ifndef DISABLE_REMOTING
2690 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2692 * The check above ensures method is not gshared, this is needed since
2693 * gshared methods can't have wrappers.
2695 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2699 if (!method->string_ctor)
2700 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2702 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2703 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2705 * the method is virtual, but we can statically dispatch since either
2706 * it's class or the method itself are sealed.
2707 * But first we need to ensure it's not a null reference.
2709 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2711 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2713 vtable_reg = alloc_preg (cfg);
2714 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2715 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2717 #ifdef MONO_ARCH_HAVE_IMT
2719 guint32 imt_slot = mono_method_get_imt_slot (method);
2720 emit_imt_argument (cfg, call, call->method, imt_arg);
2721 slot_reg = vtable_reg;
2722 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2725 if (slot_reg == -1) {
2726 slot_reg = alloc_preg (cfg);
2727 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2728 offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2731 slot_reg = vtable_reg;
2732 offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2733 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2734 #ifdef MONO_ARCH_HAVE_IMT
2736 g_assert (mono_method_signature (method)->generic_param_count);
2737 emit_imt_argument (cfg, call, call->method, imt_arg);
2742 call->inst.sreg1 = slot_reg;
2743 call->inst.inst_offset = offset;
2744 call->virtual = TRUE;
2748 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2751 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2753 return (MonoInst*)call;
2757 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2759 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this, NULL, NULL);
2763 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2770 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2773 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2775 return (MonoInst*)call;
2779 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2781 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2785 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2789 * mono_emit_abs_call:
2791 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2793 inline static MonoInst*
2794 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2795 MonoMethodSignature *sig, MonoInst **args)
2797 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2801 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2804 if (cfg->abs_patches == NULL)
2805 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2806 g_hash_table_insert (cfg->abs_patches, ji, ji);
2807 ins = mono_emit_native_call (cfg, ji, sig, args);
2808 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2813 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2815 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2816 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2820 * Native code might return non register sized integers
2821 * without initializing the upper bits.
2823 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2824 case OP_LOADI1_MEMBASE:
2825 widen_op = OP_ICONV_TO_I1;
2827 case OP_LOADU1_MEMBASE:
2828 widen_op = OP_ICONV_TO_U1;
2830 case OP_LOADI2_MEMBASE:
2831 widen_op = OP_ICONV_TO_I2;
2833 case OP_LOADU2_MEMBASE:
2834 widen_op = OP_ICONV_TO_U2;
2840 if (widen_op != -1) {
2841 int dreg = alloc_preg (cfg);
2844 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2845 widen->type = ins->type;
2855 get_memcpy_method (void)
2857 static MonoMethod *memcpy_method = NULL;
2858 if (!memcpy_method) {
2859 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2861 g_error ("Old corlib found. Install a new one");
2863 return memcpy_method;
2867 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
2869 MonoClassField *field;
2870 gpointer iter = NULL;
2872 while ((field = mono_class_get_fields (klass, &iter))) {
2875 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2877 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2878 if (mini_type_is_reference (cfg, mono_field_get_type (field))) {
2879 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2880 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2882 MonoClass *field_class = mono_class_from_mono_type (field->type);
2883 if (field_class->has_references)
2884 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
2890 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
2892 int card_table_shift_bits;
2893 gpointer card_table_mask;
2895 MonoInst *dummy_use;
2896 int nursery_shift_bits;
2897 size_t nursery_size;
2898 gboolean has_card_table_wb = FALSE;
2900 if (!cfg->gen_write_barriers)
2903 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2905 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2907 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
2908 has_card_table_wb = TRUE;
2911 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
2914 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2915 wbarrier->sreg1 = ptr->dreg;
2916 wbarrier->sreg2 = value->dreg;
2917 MONO_ADD_INS (cfg->cbb, wbarrier);
2918 } else if (card_table) {
2919 int offset_reg = alloc_preg (cfg);
2920 int card_reg = alloc_preg (cfg);
2923 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2924 if (card_table_mask)
2925 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
2927 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2928 * IMM's larger than 32bits.
2930 if (cfg->compile_aot) {
2931 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
2933 MONO_INST_NEW (cfg, ins, OP_PCONST);
2934 ins->inst_p0 = card_table;
2935 ins->dreg = card_reg;
2936 MONO_ADD_INS (cfg->cbb, ins);
2939 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
2940 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
2942 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2943 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
2946 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
2950 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
2952 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
2953 unsigned need_wb = 0;
2958 /*types with references can't have alignment smaller than sizeof(void*) */
2959 if (align < SIZEOF_VOID_P)
2962 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
2963 if (size > 32 * SIZEOF_VOID_P)
2966 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
2968 /* We don't unroll more than 5 stores to avoid code bloat. */
2969 if (size > 5 * SIZEOF_VOID_P) {
2970 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
2971 size += (SIZEOF_VOID_P - 1);
2972 size &= ~(SIZEOF_VOID_P - 1);
2974 EMIT_NEW_ICONST (cfg, iargs [2], size);
2975 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
2976 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
2980 destreg = iargs [0]->dreg;
2981 srcreg = iargs [1]->dreg;
2984 dest_ptr_reg = alloc_preg (cfg);
2985 tmp_reg = alloc_preg (cfg);
2988 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
2990 while (size >= SIZEOF_VOID_P) {
2991 MonoInst *load_inst;
2992 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
2993 load_inst->dreg = tmp_reg;
2994 load_inst->inst_basereg = srcreg;
2995 load_inst->inst_offset = offset;
2996 MONO_ADD_INS (cfg->cbb, load_inst);
2998 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3001 emit_write_barrier (cfg, iargs [0], load_inst);
3003 offset += SIZEOF_VOID_P;
3004 size -= SIZEOF_VOID_P;
3007 /*tmp += sizeof (void*)*/
3008 if (size >= SIZEOF_VOID_P) {
3009 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3010 MONO_ADD_INS (cfg->cbb, iargs [0]);
3014 /* Those cannot be references since size < sizeof (void*) */
3016 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3017 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3023 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3024 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3030 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3031 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3040 * Emit code to copy a valuetype of type @klass whose address is stored in
3041 * @src->dreg to memory whose address is stored at @dest->dreg.
3044 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3046 MonoInst *iargs [4];
3047 int context_used, n;
3049 MonoMethod *memcpy_method;
3050 MonoInst *size_ins = NULL;
3051 MonoInst *memcpy_ins = NULL;
3055 * This check breaks with spilled vars... need to handle it during verification anyway.
3056 * g_assert (klass && klass == src->klass && klass == dest->klass);
3059 if (mini_is_gsharedvt_klass (cfg, klass)) {
3061 context_used = mini_class_check_context_used (cfg, klass);
3062 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3063 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3067 n = mono_class_native_size (klass, &align);
3069 n = mono_class_value_size (klass, &align);
3071 /* if native is true there should be no references in the struct */
3072 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3073 /* Avoid barriers when storing to the stack */
3074 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3075 (dest->opcode == OP_LDADDR))) {
3081 context_used = mini_class_check_context_used (cfg, klass);
3083 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3084 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3086 } else if (context_used) {
3087 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3089 if (cfg->compile_aot) {
3090 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
3092 EMIT_NEW_PCONST (cfg, iargs [2], klass);
3093 mono_class_compute_gc_descriptor (klass);
3098 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3100 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3105 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
3106 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3107 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3112 iargs [2] = size_ins;
3114 EMIT_NEW_ICONST (cfg, iargs [2], n);
3116 memcpy_method = get_memcpy_method ();
3118 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3120 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3125 get_memset_method (void)
3127 static MonoMethod *memset_method = NULL;
3128 if (!memset_method) {
3129 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3131 g_error ("Old corlib found. Install a new one");
3133 return memset_method;
3137 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3139 MonoInst *iargs [3];
3140 int n, context_used;
3142 MonoMethod *memset_method;
3143 MonoInst *size_ins = NULL;
3144 MonoInst *bzero_ins = NULL;
3145 static MonoMethod *bzero_method;
3147 /* FIXME: Optimize this for the case when dest is an LDADDR */
3149 mono_class_init (klass);
3150 if (mini_is_gsharedvt_klass (cfg, klass)) {
3151 context_used = mini_class_check_context_used (cfg, klass);
3152 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3153 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3155 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3156 g_assert (bzero_method);
3158 iargs [1] = size_ins;
3159 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3163 n = mono_class_value_size (klass, &align);
3165 if (n <= sizeof (gpointer) * 5) {
3166 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3169 memset_method = get_memset_method ();
3171 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3172 EMIT_NEW_ICONST (cfg, iargs [2], n);
3173 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3178 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3180 MonoInst *this = NULL;
3182 g_assert (cfg->generic_sharing_context);
3184 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3185 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3186 !method->klass->valuetype)
3187 EMIT_NEW_ARGLOAD (cfg, this, 0);
3189 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3190 MonoInst *mrgctx_loc, *mrgctx_var;
3193 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3195 mrgctx_loc = mono_get_vtable_var (cfg);
3196 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3199 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3200 MonoInst *vtable_loc, *vtable_var;
3204 vtable_loc = mono_get_vtable_var (cfg);
3205 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3207 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3208 MonoInst *mrgctx_var = vtable_var;
3211 vtable_reg = alloc_preg (cfg);
3212 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3213 vtable_var->type = STACK_PTR;
3221 vtable_reg = alloc_preg (cfg);
3222 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3227 static MonoJumpInfoRgctxEntry *
3228 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3230 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3231 res->method = method;
3232 res->in_mrgctx = in_mrgctx;
3233 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3234 res->data->type = patch_type;
3235 res->data->data.target = patch_data;
3236 res->info_type = info_type;
3241 static inline MonoInst*
3242 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3244 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3248 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3249 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3251 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3252 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3254 return emit_rgctx_fetch (cfg, rgctx, entry);
3258 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3259 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3261 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3262 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3264 return emit_rgctx_fetch (cfg, rgctx, entry);
3268 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3269 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3271 MonoJumpInfoGSharedVtCall *call_info;
3272 MonoJumpInfoRgctxEntry *entry;
3275 call_info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3276 call_info->sig = sig;
3277 call_info->method = cmethod;
3279 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3280 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3282 return emit_rgctx_fetch (cfg, rgctx, entry);
3287 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3288 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3290 MonoJumpInfoRgctxEntry *entry;
3293 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3294 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3296 return emit_rgctx_fetch (cfg, rgctx, entry);
3300 * emit_get_rgctx_method:
3302 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3303 * normal constants, else emit a load from the rgctx.
3306 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3307 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3309 if (!context_used) {
3312 switch (rgctx_type) {
3313 case MONO_RGCTX_INFO_METHOD:
3314 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3316 case MONO_RGCTX_INFO_METHOD_RGCTX:
3317 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3320 g_assert_not_reached ();
3323 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3324 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3326 return emit_rgctx_fetch (cfg, rgctx, entry);
3331 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3332 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3334 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3335 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3337 return emit_rgctx_fetch (cfg, rgctx, entry);
3341 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3343 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3344 MonoRuntimeGenericContextInfoTemplate *template;
3349 for (i = 0; i < info->entries->len; ++i) {
3350 MonoRuntimeGenericContextInfoTemplate *otemplate = g_ptr_array_index (info->entries, i);
3352 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3356 template = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate));
3357 template->info_type = rgctx_type;
3358 template->data = data;
3360 idx = info->entries->len;
3362 g_ptr_array_add (info->entries, template);
3368 * emit_get_gsharedvt_info:
3370 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3373 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3378 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3379 /* Load info->entries [idx] */
3380 dreg = alloc_preg (cfg);
3381 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3387 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3389 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3393 * On return the caller must check @klass for load errors.
3396 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3398 MonoInst *vtable_arg;
3402 context_used = mini_class_check_context_used (cfg, klass);
3405 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3406 klass, MONO_RGCTX_INFO_VTABLE);
3408 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3412 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3415 if (COMPILE_LLVM (cfg))
3416 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3418 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3419 #ifdef MONO_ARCH_VTABLE_REG
3420 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3421 cfg->uses_vtable_reg = TRUE;
3428 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3432 if (cfg->gen_seq_points && cfg->method == method) {
3433 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3435 ins->flags |= MONO_INST_NONEMPTY_STACK;
3436 MONO_ADD_INS (cfg->cbb, ins);
3441 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check, MonoBasicBlock **out_bblock)
3443 if (mini_get_debug_options ()->better_cast_details) {
3444 int to_klass_reg = alloc_preg (cfg);
3445 int vtable_reg = alloc_preg (cfg);
3446 int klass_reg = alloc_preg (cfg);
3447 MonoBasicBlock *is_null_bb = NULL;
3451 NEW_BBLOCK (cfg, is_null_bb);
3453 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3454 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3457 tls_get = mono_get_jit_tls_intrinsic (cfg);
3459 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3463 MONO_ADD_INS (cfg->cbb, tls_get);
3464 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3465 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3467 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3468 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3469 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3472 MONO_START_BB (cfg, is_null_bb);
3474 *out_bblock = cfg->cbb;
3480 reset_cast_details (MonoCompile *cfg)
3482 /* Reset the variables holding the cast details */
3483 if (mini_get_debug_options ()->better_cast_details) {
3484 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3486 MONO_ADD_INS (cfg->cbb, tls_get);
3487 /* It is enough to reset the from field */
3488 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3493 * On return the caller must check @array_class for load errors
3496 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3498 int vtable_reg = alloc_preg (cfg);
3501 context_used = mini_class_check_context_used (cfg, array_class);
3503 save_cast_details (cfg, array_class, obj->dreg, FALSE, NULL);
3505 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3507 if (cfg->opt & MONO_OPT_SHARED) {
3508 int class_reg = alloc_preg (cfg);
3509 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3510 if (cfg->compile_aot) {
3511 int klass_reg = alloc_preg (cfg);
3512 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3513 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3515 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3517 } else if (context_used) {
3518 MonoInst *vtable_ins;
3520 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3521 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3523 if (cfg->compile_aot) {
3527 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3529 vt_reg = alloc_preg (cfg);
3530 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3531 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3534 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3536 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3540 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3542 reset_cast_details (cfg);
3546 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3547 * generic code is generated.
3550 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3552 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3555 MonoInst *rgctx, *addr;
3557 /* FIXME: What if the class is shared? We might not
3558 have to get the address of the method from the
3560 addr = emit_get_rgctx_method (cfg, context_used, method,
3561 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3563 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3565 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3567 gboolean pass_vtable, pass_mrgctx;
3568 MonoInst *rgctx_arg = NULL;
3570 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3571 g_assert (!pass_mrgctx);
3574 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3577 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3580 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3585 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3589 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3590 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3591 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3592 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3594 obj_reg = sp [0]->dreg;
3595 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3596 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3598 /* FIXME: generics */
3599 g_assert (klass->rank == 0);
3602 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3603 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3605 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3606 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3609 MonoInst *element_class;
3611 /* This assertion is from the unboxcast insn */
3612 g_assert (klass->rank == 0);
3614 element_class = emit_get_rgctx_klass (cfg, context_used,
3615 klass->element_class, MONO_RGCTX_INFO_KLASS);
3617 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3618 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3620 save_cast_details (cfg, klass->element_class, obj_reg, FALSE, NULL);
3621 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3622 reset_cast_details (cfg);
3625 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3626 MONO_ADD_INS (cfg->cbb, add);
3627 add->type = STACK_MP;
3634 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj, MonoBasicBlock **out_cbb)
3636 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3637 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3641 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3647 args [1] = klass_inst;
3650 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3652 NEW_BBLOCK (cfg, is_ref_bb);
3653 NEW_BBLOCK (cfg, is_nullable_bb);
3654 NEW_BBLOCK (cfg, end_bb);
3655 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3656 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3657 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3659 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3660 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3662 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3663 addr_reg = alloc_dreg (cfg, STACK_MP);
3667 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3668 MONO_ADD_INS (cfg->cbb, addr);
3670 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3673 MONO_START_BB (cfg, is_ref_bb);
3675 /* Save the ref to a temporary */
3676 dreg = alloc_ireg (cfg);
3677 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3678 addr->dreg = addr_reg;
3679 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3680 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3683 MONO_START_BB (cfg, is_nullable_bb);
3686 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3687 MonoInst *unbox_call;
3688 MonoMethodSignature *unbox_sig;
3691 var = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
3693 unbox_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3694 unbox_sig->ret = &klass->byval_arg;
3695 unbox_sig->param_count = 1;
3696 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3697 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3699 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3700 addr->dreg = addr_reg;
3703 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3706 MONO_START_BB (cfg, end_bb);
3709 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3711 *out_cbb = cfg->cbb;
3717 * Returns NULL and set the cfg exception on error.
3720 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3722 MonoInst *iargs [2];
3728 MonoInst *iargs [2];
3730 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box);
3732 if (cfg->opt & MONO_OPT_SHARED)
3733 rgctx_info = MONO_RGCTX_INFO_KLASS;
3735 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3736 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3738 if (cfg->opt & MONO_OPT_SHARED) {
3739 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3741 alloc_ftn = mono_object_new;
3744 alloc_ftn = mono_object_new_specific;
3747 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED))
3748 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3750 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3753 if (cfg->opt & MONO_OPT_SHARED) {
3754 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3755 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3757 alloc_ftn = mono_object_new;
3758 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3759 /* This happens often in argument checking code, eg. throw new FooException... */
3760 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3761 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3762 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3764 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3765 MonoMethod *managed_alloc = NULL;
3769 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3770 cfg->exception_ptr = klass;
3774 #ifndef MONO_CROSS_COMPILE
3775 managed_alloc = mono_gc_get_managed_allocator (klass, for_box);
3778 if (managed_alloc) {
3779 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3780 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3782 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3784 guint32 lw = vtable->klass->instance_size;
3785 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3786 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3787 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3790 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3794 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3798 * Returns NULL and set the cfg exception on error.
3801 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoBasicBlock **out_cbb)
3803 MonoInst *alloc, *ins;
3805 *out_cbb = cfg->cbb;
3807 if (mono_class_is_nullable (klass)) {
3808 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3811 /* FIXME: What if the class is shared? We might not
3812 have to get the method address from the RGCTX. */
3813 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3814 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3815 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3817 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3819 gboolean pass_vtable, pass_mrgctx;
3820 MonoInst *rgctx_arg = NULL;
3822 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3823 g_assert (!pass_mrgctx);
3826 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3829 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3832 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3836 if (mini_is_gsharedvt_klass (cfg, klass)) {
3837 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3838 MonoInst *res, *is_ref, *src_var, *addr;
3841 dreg = alloc_ireg (cfg);
3843 NEW_BBLOCK (cfg, is_ref_bb);
3844 NEW_BBLOCK (cfg, is_nullable_bb);
3845 NEW_BBLOCK (cfg, end_bb);
3846 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3847 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3848 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3850 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3851 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3854 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3857 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3858 ins->opcode = OP_STOREV_MEMBASE;
3860 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
3861 res->type = STACK_OBJ;
3863 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3866 MONO_START_BB (cfg, is_ref_bb);
3867 addr_reg = alloc_ireg (cfg);
3869 /* val is a vtype, so has to load the value manually */
3870 src_var = get_vreg_to_inst (cfg, val->dreg);
3872 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
3873 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
3874 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
3875 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3878 MONO_START_BB (cfg, is_nullable_bb);
3881 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
3882 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
3884 MonoMethodSignature *box_sig;
3887 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
3888 * construct that method at JIT time, so have to do things by hand.
3890 box_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3891 box_sig->ret = &mono_defaults.object_class->byval_arg;
3892 box_sig->param_count = 1;
3893 box_sig->params [0] = &klass->byval_arg;
3894 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
3895 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
3896 res->type = STACK_OBJ;
3900 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3902 MONO_START_BB (cfg, end_bb);
3904 *out_cbb = cfg->cbb;
3908 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3912 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3919 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
3922 MonoGenericContainer *container;
3923 MonoGenericInst *ginst;
3925 if (klass->generic_class) {
3926 container = klass->generic_class->container_class->generic_container;
3927 ginst = klass->generic_class->context.class_inst;
3928 } else if (klass->generic_container && context_used) {
3929 container = klass->generic_container;
3930 ginst = container->context.class_inst;
3935 for (i = 0; i < container->type_argc; ++i) {
3937 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
3939 type = ginst->type_argv [i];
3940 if (mini_type_is_reference (cfg, type))
3946 // FIXME: This doesn't work yet (class libs tests fail?)
3947 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3950 * Returns NULL and set the cfg exception on error.
3953 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3955 MonoBasicBlock *is_null_bb;
3956 int obj_reg = src->dreg;
3957 int vtable_reg = alloc_preg (cfg);
3958 MonoInst *klass_inst = NULL;
3963 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
3964 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
3965 MonoInst *cache_ins;
3967 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3972 /* klass - it's the second element of the cache entry*/
3973 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3976 args [2] = cache_ins;
3978 return mono_emit_method_call (cfg, mono_castclass, args, NULL);
3981 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3984 NEW_BBLOCK (cfg, is_null_bb);
3986 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3987 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3989 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
3991 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3992 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3993 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3995 int klass_reg = alloc_preg (cfg);
3997 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3999 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4000 /* the remoting code is broken, access the class for now */
4001 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4002 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4004 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4005 cfg->exception_ptr = klass;
4008 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4010 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4011 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4013 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4015 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4016 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4020 MONO_START_BB (cfg, is_null_bb);
4022 reset_cast_details (cfg);
4028 * Returns NULL and set the cfg exception on error.
4031 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4034 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4035 int obj_reg = src->dreg;
4036 int vtable_reg = alloc_preg (cfg);
4037 int res_reg = alloc_ireg_ref (cfg);
4038 MonoInst *klass_inst = NULL;
4043 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4044 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4045 MonoInst *cache_ins;
4047 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4052 /* klass - it's the second element of the cache entry*/
4053 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4056 args [2] = cache_ins;
4058 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4061 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4064 NEW_BBLOCK (cfg, is_null_bb);
4065 NEW_BBLOCK (cfg, false_bb);
4066 NEW_BBLOCK (cfg, end_bb);
4068 /* Do the assignment at the beginning, so the other assignment can be if converted */
4069 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4070 ins->type = STACK_OBJ;
4073 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4074 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4076 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4078 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4079 g_assert (!context_used);
4080 /* the is_null_bb target simply copies the input register to the output */
4081 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4083 int klass_reg = alloc_preg (cfg);
4086 int rank_reg = alloc_preg (cfg);
4087 int eclass_reg = alloc_preg (cfg);
4089 g_assert (!context_used);
4090 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4091 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4092 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4093 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4094 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
4095 if (klass->cast_class == mono_defaults.object_class) {
4096 int parent_reg = alloc_preg (cfg);
4097 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
4098 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4099 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4100 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4101 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4102 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4103 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4104 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4105 } else if (klass->cast_class == mono_defaults.enum_class) {
4106 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4107 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4108 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4109 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4111 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4112 /* Check that the object is a vector too */
4113 int bounds_reg = alloc_preg (cfg);
4114 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
4115 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4116 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4119 /* the is_null_bb target simply copies the input register to the output */
4120 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4122 } else if (mono_class_is_nullable (klass)) {
4123 g_assert (!context_used);
4124 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4125 /* the is_null_bb target simply copies the input register to the output */
4126 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4128 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4129 g_assert (!context_used);
4130 /* the remoting code is broken, access the class for now */
4131 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4132 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4134 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4135 cfg->exception_ptr = klass;
4138 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4140 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4141 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4143 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4144 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4146 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4147 /* the is_null_bb target simply copies the input register to the output */
4148 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4153 MONO_START_BB (cfg, false_bb);
4155 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4156 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4158 MONO_START_BB (cfg, is_null_bb);
4160 MONO_START_BB (cfg, end_bb);
4166 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4168 /* This opcode takes as input an object reference and a class, and returns:
4169 0) if the object is an instance of the class,
4170 1) if the object is not instance of the class,
4171 2) if the object is a proxy whose type cannot be determined */
4174 #ifndef DISABLE_REMOTING
4175 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4177 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4179 int obj_reg = src->dreg;
4180 int dreg = alloc_ireg (cfg);
4182 #ifndef DISABLE_REMOTING
4183 int klass_reg = alloc_preg (cfg);
4186 NEW_BBLOCK (cfg, true_bb);
4187 NEW_BBLOCK (cfg, false_bb);
4188 NEW_BBLOCK (cfg, end_bb);
4189 #ifndef DISABLE_REMOTING
4190 NEW_BBLOCK (cfg, false2_bb);
4191 NEW_BBLOCK (cfg, no_proxy_bb);
4194 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4195 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4197 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4198 #ifndef DISABLE_REMOTING
4199 NEW_BBLOCK (cfg, interface_fail_bb);
4202 tmp_reg = alloc_preg (cfg);
4203 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4204 #ifndef DISABLE_REMOTING
4205 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4206 MONO_START_BB (cfg, interface_fail_bb);
4207 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4209 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4211 tmp_reg = alloc_preg (cfg);
4212 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4213 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4214 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4216 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4219 #ifndef DISABLE_REMOTING
4220 tmp_reg = alloc_preg (cfg);
4221 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4222 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4224 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4225 tmp_reg = alloc_preg (cfg);
4226 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4227 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4229 tmp_reg = alloc_preg (cfg);
4230 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4231 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4232 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4234 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4235 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4237 MONO_START_BB (cfg, no_proxy_bb);
4239 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4241 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4245 MONO_START_BB (cfg, false_bb);
4247 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4248 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4250 #ifndef DISABLE_REMOTING
4251 MONO_START_BB (cfg, false2_bb);
4253 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4254 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4257 MONO_START_BB (cfg, true_bb);
4259 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4261 MONO_START_BB (cfg, end_bb);
4264 MONO_INST_NEW (cfg, ins, OP_ICONST);
4266 ins->type = STACK_I4;
4272 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4274 /* This opcode takes as input an object reference and a class, and returns:
4275 0) if the object is an instance of the class,
4276 1) if the object is a proxy whose type cannot be determined
4277 an InvalidCastException exception is thrown otherwhise*/
4280 #ifndef DISABLE_REMOTING
4281 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4283 MonoBasicBlock *ok_result_bb;
4285 int obj_reg = src->dreg;
4286 int dreg = alloc_ireg (cfg);
4287 int tmp_reg = alloc_preg (cfg);
4289 #ifndef DISABLE_REMOTING
4290 int klass_reg = alloc_preg (cfg);
4291 NEW_BBLOCK (cfg, end_bb);
4294 NEW_BBLOCK (cfg, ok_result_bb);
4296 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4297 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4299 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4301 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4302 #ifndef DISABLE_REMOTING
4303 NEW_BBLOCK (cfg, interface_fail_bb);
4305 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4306 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4307 MONO_START_BB (cfg, interface_fail_bb);
4308 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4310 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4312 tmp_reg = alloc_preg (cfg);
4313 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4314 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4315 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4317 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4318 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4320 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4321 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4322 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4325 #ifndef DISABLE_REMOTING
4326 NEW_BBLOCK (cfg, no_proxy_bb);
4328 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4329 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4330 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4332 tmp_reg = alloc_preg (cfg);
4333 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4334 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4336 tmp_reg = alloc_preg (cfg);
4337 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4338 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4339 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4341 NEW_BBLOCK (cfg, fail_1_bb);
4343 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4345 MONO_START_BB (cfg, fail_1_bb);
4347 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4348 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4350 MONO_START_BB (cfg, no_proxy_bb);
4352 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4354 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4358 MONO_START_BB (cfg, ok_result_bb);
4360 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4362 #ifndef DISABLE_REMOTING
4363 MONO_START_BB (cfg, end_bb);
4367 MONO_INST_NEW (cfg, ins, OP_ICONST);
4369 ins->type = STACK_I4;
4375 * Returns NULL and set the cfg exception on error.
4377 static G_GNUC_UNUSED MonoInst*
4378 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
4382 gpointer *trampoline;
4383 MonoInst *obj, *method_ins, *tramp_ins;
4387 obj = handle_alloc (cfg, klass, FALSE, 0);
4391 /* Inline the contents of mono_delegate_ctor */
4393 /* Set target field */
4394 /* Optimize away setting of NULL target */
4395 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
4396 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4397 if (cfg->gen_write_barriers) {
4398 dreg = alloc_preg (cfg);
4399 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target));
4400 emit_write_barrier (cfg, ptr, target);
4404 /* Set method field */
4405 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4406 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4407 if (cfg->gen_write_barriers) {
4408 dreg = alloc_preg (cfg);
4409 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method));
4410 emit_write_barrier (cfg, ptr, method_ins);
4413 * To avoid looking up the compiled code belonging to the target method
4414 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4415 * store it, and we fill it after the method has been compiled.
4417 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4418 MonoInst *code_slot_ins;
4421 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4423 domain = mono_domain_get ();
4424 mono_domain_lock (domain);
4425 if (!domain_jit_info (domain)->method_code_hash)
4426 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4427 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4429 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
4430 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4432 mono_domain_unlock (domain);
4434 if (cfg->compile_aot)
4435 EMIT_NEW_AOTCONST (cfg, code_slot_ins, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
4437 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
4439 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4442 /* Set invoke_impl field */
4443 if (cfg->compile_aot) {
4444 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
4446 trampoline = mono_create_delegate_trampoline (cfg->domain, klass);
4447 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4449 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4451 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4457 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4459 MonoJitICallInfo *info;
4461 /* Need to register the icall so it gets an icall wrapper */
4462 info = mono_get_array_new_va_icall (rank);
4464 cfg->flags |= MONO_CFG_HAS_VARARGS;
4466 /* mono_array_new_va () needs a vararg calling convention */
4467 cfg->disable_llvm = TRUE;
4469 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4470 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4474 mono_emit_load_got_addr (MonoCompile *cfg)
4476 MonoInst *getaddr, *dummy_use;
4478 if (!cfg->got_var || cfg->got_var_allocated)
4481 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
4482 getaddr->cil_code = cfg->header->code;
4483 getaddr->dreg = cfg->got_var->dreg;
4485 /* Add it to the start of the first bblock */
4486 if (cfg->bb_entry->code) {
4487 getaddr->next = cfg->bb_entry->code;
4488 cfg->bb_entry->code = getaddr;
4491 MONO_ADD_INS (cfg->bb_entry, getaddr);
4493 cfg->got_var_allocated = TRUE;
4496 * Add a dummy use to keep the got_var alive, since real uses might
4497 * only be generated by the back ends.
4498 * Add it to end_bblock, so the variable's lifetime covers the whole
4500 * It would be better to make the usage of the got var explicit in all
4501 * cases when the backend needs it (i.e. calls, throw etc.), so this
4502 * wouldn't be needed.
4504 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
4505 MONO_ADD_INS (cfg->bb_exit, dummy_use);
4508 static int inline_limit;
4509 static gboolean inline_limit_inited;
4512 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
4514 MonoMethodHeaderSummary header;
4516 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4517 MonoMethodSignature *sig = mono_method_signature (method);
4521 if (cfg->generic_sharing_context)
4524 if (cfg->inline_depth > 10)
4527 #ifdef MONO_ARCH_HAVE_LMF_OPS
4528 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
4529 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
4530 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
4535 if (!mono_method_get_header_summary (method, &header))
4538 /*runtime, icall and pinvoke are checked by summary call*/
4539 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
4540 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
4541 (mono_class_is_marshalbyref (method->klass)) ||
4545 /* also consider num_locals? */
4546 /* Do the size check early to avoid creating vtables */
4547 if (!inline_limit_inited) {
4548 if (g_getenv ("MONO_INLINELIMIT"))
4549 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
4551 inline_limit = INLINE_LENGTH_LIMIT;
4552 inline_limit_inited = TRUE;
4554 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
4558 * if we can initialize the class of the method right away, we do,
4559 * otherwise we don't allow inlining if the class needs initialization,
4560 * since it would mean inserting a call to mono_runtime_class_init()
4561 * inside the inlined code
4563 if (!(cfg->opt & MONO_OPT_SHARED)) {
4564 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
4565 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
4566 vtable = mono_class_vtable (cfg->domain, method->klass);
4569 mono_runtime_class_init (vtable);
4570 } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
4571 if (cfg->run_cctors && method->klass->has_cctor) {
4572 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
4573 if (!method->klass->runtime_info)
4574 /* No vtable created yet */
4576 vtable = mono_class_vtable (cfg->domain, method->klass);
4579 /* This makes so that inline cannot trigger */
4580 /* .cctors: too many apps depend on them */
4581 /* running with a specific order... */
4582 if (! vtable->initialized)
4584 mono_runtime_class_init (vtable);
4586 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
4587 if (!method->klass->runtime_info)
4588 /* No vtable created yet */
4590 vtable = mono_class_vtable (cfg->domain, method->klass);
4593 if (!vtable->initialized)
4598 * If we're compiling for shared code
4599 * the cctor will need to be run at aot method load time, for example,
4600 * or at the end of the compilation of the inlining method.
4602 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
4607 * CAS - do not inline methods with declarative security
4608 * Note: this has to be before any possible return TRUE;
4610 if (mono_security_method_has_declsec (method))
4613 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4614 if (mono_arch_is_soft_float ()) {
4616 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4618 for (i = 0; i < sig->param_count; ++i)
4619 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4628 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
4630 if (!cfg->compile_aot) {
4632 if (vtable->initialized)
4636 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
4639 if (!mono_class_needs_cctor_run (klass, method))
4642 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
4643 /* The initialization is already done before the method is called */
4650 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4654 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4657 if (mini_is_gsharedvt_klass (cfg, klass)) {
4660 mono_class_init (klass);
4661 size = mono_class_array_element_size (klass);
4664 mult_reg = alloc_preg (cfg);
4665 array_reg = arr->dreg;
4666 index_reg = index->dreg;
4668 #if SIZEOF_REGISTER == 8
4669 /* The array reg is 64 bits but the index reg is only 32 */
4670 if (COMPILE_LLVM (cfg)) {
4672 index2_reg = index_reg;
4674 index2_reg = alloc_preg (cfg);
4675 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4678 if (index->type == STACK_I8) {
4679 index2_reg = alloc_preg (cfg);
4680 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4682 index2_reg = index_reg;
4687 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4689 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4690 if (size == 1 || size == 2 || size == 4 || size == 8) {
4691 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4693 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
4694 ins->klass = mono_class_get_element_class (klass);
4695 ins->type = STACK_MP;
4701 add_reg = alloc_ireg_mp (cfg);
4704 MonoInst *rgctx_ins;
4707 g_assert (cfg->generic_sharing_context);
4708 context_used = mini_class_check_context_used (cfg, klass);
4709 g_assert (context_used);
4710 rgctx_ins = emit_get_gsharedvt_info (cfg, &klass->byval_arg, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
4711 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
4713 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4715 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4716 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4717 ins->klass = mono_class_get_element_class (klass);
4718 ins->type = STACK_MP;
4719 MONO_ADD_INS (cfg->cbb, ins);
4724 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4726 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4728 int bounds_reg = alloc_preg (cfg);
4729 int add_reg = alloc_ireg_mp (cfg);
4730 int mult_reg = alloc_preg (cfg);
4731 int mult2_reg = alloc_preg (cfg);
4732 int low1_reg = alloc_preg (cfg);
4733 int low2_reg = alloc_preg (cfg);
4734 int high1_reg = alloc_preg (cfg);
4735 int high2_reg = alloc_preg (cfg);
4736 int realidx1_reg = alloc_preg (cfg);
4737 int realidx2_reg = alloc_preg (cfg);
4738 int sum_reg = alloc_preg (cfg);
4739 int index1, index2, tmpreg;
4743 mono_class_init (klass);
4744 size = mono_class_array_element_size (klass);
4746 index1 = index_ins1->dreg;
4747 index2 = index_ins2->dreg;
4749 #if SIZEOF_REGISTER == 8
4750 /* The array reg is 64 bits but the index reg is only 32 */
4751 if (COMPILE_LLVM (cfg)) {
4754 tmpreg = alloc_preg (cfg);
4755 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
4757 tmpreg = alloc_preg (cfg);
4758 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
4762 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
4766 /* range checking */
4767 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4768 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4770 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4771 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4772 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4773 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4774 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4775 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4776 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4778 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4779 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4780 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4781 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4782 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
4783 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4784 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4786 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4787 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4788 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4789 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4790 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4792 ins->type = STACK_MP;
4794 MONO_ADD_INS (cfg->cbb, ins);
4801 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4805 MonoMethod *addr_method;
4808 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4811 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
4813 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4814 /* emit_ldelema_2 depends on OP_LMUL */
4815 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
4816 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
4820 element_size = mono_class_array_element_size (cmethod->klass->element_class);
4821 addr_method = mono_marshal_get_array_address (rank, element_size);
4822 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4827 static MonoBreakPolicy
4828 always_insert_breakpoint (MonoMethod *method)
4830 return MONO_BREAK_POLICY_ALWAYS;
4833 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4836 * mono_set_break_policy:
4837 * policy_callback: the new callback function
4839 * Allow embedders to decide wherther to actually obey breakpoint instructions
4840 * (both break IL instructions and Debugger.Break () method calls), for example
4841 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4842 * untrusted or semi-trusted code.
4844 * @policy_callback will be called every time a break point instruction needs to
4845 * be inserted with the method argument being the method that calls Debugger.Break()
4846 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4847 * if it wants the breakpoint to not be effective in the given method.
4848 * #MONO_BREAK_POLICY_ALWAYS is the default.
4851 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4853 if (policy_callback)
4854 break_policy_func = policy_callback;
4856 break_policy_func = always_insert_breakpoint;
4860 should_insert_brekpoint (MonoMethod *method) {
4861 switch (break_policy_func (method)) {
4862 case MONO_BREAK_POLICY_ALWAYS:
4864 case MONO_BREAK_POLICY_NEVER:
4866 case MONO_BREAK_POLICY_ON_DBG:
4867 g_warning ("mdb no longer supported");
4870 g_warning ("Incorrect value returned from break policy callback");
4875 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4877 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4879 MonoInst *addr, *store, *load;
4880 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4882 /* the bounds check is already done by the callers */
4883 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4885 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4886 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4887 if (mini_type_is_reference (cfg, fsig->params [2]))
4888 emit_write_barrier (cfg, addr, load);
4890 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4891 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4898 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4900 return mini_type_is_reference (cfg, &klass->byval_arg);
4904 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
4906 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
4907 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
4908 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
4909 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
4910 MonoInst *iargs [3];
4913 mono_class_setup_vtable (obj_array);
4914 g_assert (helper->slot);
4916 if (sp [0]->type != STACK_OBJ)
4918 if (sp [2]->type != STACK_OBJ)
4925 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
4929 if (mini_is_gsharedvt_klass (cfg, klass)) {
4932 // FIXME-VT: OP_ICONST optimization
4933 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
4934 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
4935 ins->opcode = OP_STOREV_MEMBASE;
4936 } else if (sp [1]->opcode == OP_ICONST) {
4937 int array_reg = sp [0]->dreg;
4938 int index_reg = sp [1]->dreg;
4939 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
4942 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
4943 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
4945 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
4946 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
4947 if (generic_class_is_reference_type (cfg, klass))
4948 emit_write_barrier (cfg, addr, sp [2]);
4955 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4960 eklass = mono_class_from_mono_type (fsig->params [2]);
4962 eklass = mono_class_from_mono_type (fsig->ret);
4966 return emit_array_store (cfg, eklass, args, FALSE);
4968 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4969 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
4975 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4977 MonoInst *ins = NULL;
4978 #ifdef MONO_ARCH_SIMD_INTRINSICS
4979 if (cfg->opt & MONO_OPT_SIMD) {
4980 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4990 emit_memory_barrier (MonoCompile *cfg, int kind)
4992 MonoInst *ins = NULL;
4993 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4994 MONO_ADD_INS (cfg->cbb, ins);
4995 ins->backend.memory_barrier_kind = kind;
5001 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5003 MonoInst *ins = NULL;
5006 /* The LLVM backend supports these intrinsics */
5007 if (cmethod->klass == mono_defaults.math_class) {
5008 if (strcmp (cmethod->name, "Sin") == 0) {
5010 } else if (strcmp (cmethod->name, "Cos") == 0) {
5012 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5014 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5019 MONO_INST_NEW (cfg, ins, opcode);
5020 ins->type = STACK_R8;
5021 ins->dreg = mono_alloc_freg (cfg);
5022 ins->sreg1 = args [0]->dreg;
5023 MONO_ADD_INS (cfg->cbb, ins);
5027 if (cfg->opt & MONO_OPT_CMOV) {
5028 if (strcmp (cmethod->name, "Min") == 0) {
5029 if (fsig->params [0]->type == MONO_TYPE_I4)
5031 if (fsig->params [0]->type == MONO_TYPE_U4)
5032 opcode = OP_IMIN_UN;
5033 else if (fsig->params [0]->type == MONO_TYPE_I8)
5035 else if (fsig->params [0]->type == MONO_TYPE_U8)
5036 opcode = OP_LMIN_UN;
5037 } else if (strcmp (cmethod->name, "Max") == 0) {
5038 if (fsig->params [0]->type == MONO_TYPE_I4)
5040 if (fsig->params [0]->type == MONO_TYPE_U4)
5041 opcode = OP_IMAX_UN;
5042 else if (fsig->params [0]->type == MONO_TYPE_I8)
5044 else if (fsig->params [0]->type == MONO_TYPE_U8)
5045 opcode = OP_LMAX_UN;
5050 MONO_INST_NEW (cfg, ins, opcode);
5051 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5052 ins->dreg = mono_alloc_ireg (cfg);
5053 ins->sreg1 = args [0]->dreg;
5054 ins->sreg2 = args [1]->dreg;
5055 MONO_ADD_INS (cfg->cbb, ins);
5063 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5065 if (cmethod->klass == mono_defaults.array_class) {
5066 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5067 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5068 if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5069 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5076 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5078 MonoInst *ins = NULL;
5080 static MonoClass *runtime_helpers_class = NULL;
5081 if (! runtime_helpers_class)
5082 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
5083 "System.Runtime.CompilerServices", "RuntimeHelpers");
5085 if (cmethod->klass == mono_defaults.string_class) {
5086 if (strcmp (cmethod->name, "get_Chars") == 0) {
5087 int dreg = alloc_ireg (cfg);
5088 int index_reg = alloc_preg (cfg);
5089 int mult_reg = alloc_preg (cfg);
5090 int add_reg = alloc_preg (cfg);
5092 #if SIZEOF_REGISTER == 8
5093 /* The array reg is 64 bits but the index reg is only 32 */
5094 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5096 index_reg = args [1]->dreg;
5098 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5100 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5101 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
5102 add_reg = ins->dreg;
5103 /* Avoid a warning */
5105 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5108 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5109 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5110 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5111 add_reg, G_STRUCT_OFFSET (MonoString, chars));
5113 type_from_op (ins, NULL, NULL);
5115 } else if (strcmp (cmethod->name, "get_Length") == 0) {
5116 int dreg = alloc_ireg (cfg);
5117 /* Decompose later to allow more optimizations */
5118 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5119 ins->type = STACK_I4;
5120 ins->flags |= MONO_INST_FAULT;
5121 cfg->cbb->has_array_access = TRUE;
5122 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5125 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
5126 int mult_reg = alloc_preg (cfg);
5127 int add_reg = alloc_preg (cfg);
5129 /* The corlib functions check for oob already. */
5130 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
5131 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5132 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
5133 return cfg->cbb->last_ins;
5136 } else if (cmethod->klass == mono_defaults.object_class) {
5138 if (strcmp (cmethod->name, "GetType") == 0) {
5139 int dreg = alloc_ireg_ref (cfg);
5140 int vt_reg = alloc_preg (cfg);
5141 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
5142 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
5143 type_from_op (ins, NULL, NULL);
5146 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
5147 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
5148 int dreg = alloc_ireg (cfg);
5149 int t1 = alloc_ireg (cfg);
5151 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5152 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5153 ins->type = STACK_I4;
5157 } else if (strcmp (cmethod->name, ".ctor") == 0) {
5158 MONO_INST_NEW (cfg, ins, OP_NOP);
5159 MONO_ADD_INS (cfg->cbb, ins);
5163 } else if (cmethod->klass == mono_defaults.array_class) {
5164 if (!cfg->gsharedvt && strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
5165 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
5167 #ifndef MONO_BIG_ARRAYS
5169 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5172 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5173 int dreg = alloc_ireg (cfg);
5174 int bounds_reg = alloc_ireg_mp (cfg);
5175 MonoBasicBlock *end_bb, *szarray_bb;
5176 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5178 NEW_BBLOCK (cfg, end_bb);
5179 NEW_BBLOCK (cfg, szarray_bb);
5181 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5182 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
5183 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5184 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5185 /* Non-szarray case */
5187 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5188 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
5190 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5191 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5192 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5193 MONO_START_BB (cfg, szarray_bb);
5196 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5197 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
5199 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5200 MONO_START_BB (cfg, end_bb);
5202 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5203 ins->type = STACK_I4;
5209 if (cmethod->name [0] != 'g')
5212 if (strcmp (cmethod->name, "get_Rank") == 0) {
5213 int dreg = alloc_ireg (cfg);
5214 int vtable_reg = alloc_preg (cfg);
5215 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5216 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
5217 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5218 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
5219 type_from_op (ins, NULL, NULL);
5222 } else if (strcmp (cmethod->name, "get_Length") == 0) {
5223 int dreg = alloc_ireg (cfg);
5225 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5226 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
5227 type_from_op (ins, NULL, NULL);
5232 } else if (cmethod->klass == runtime_helpers_class) {
5234 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
5235 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
5239 } else if (cmethod->klass == mono_defaults.thread_class) {
5240 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
5241 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5242 MONO_ADD_INS (cfg->cbb, ins);
5244 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
5245 return emit_memory_barrier (cfg, FullBarrier);
5247 } else if (cmethod->klass == mono_defaults.monitor_class) {
5249 /* FIXME this should be integrated to the check below once we support the trampoline version */
5250 #if defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
5251 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) {
5252 MonoMethod *fast_method = NULL;
5254 /* Avoid infinite recursion */
5255 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN && !strcmp (cfg->method->name, "FastMonitorEnterV4"))
5258 fast_method = mono_monitor_get_fast_path (cmethod);
5262 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
5266 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
5267 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
5270 if (COMPILE_LLVM (cfg)) {
5272 * Pass the argument normally, the LLVM backend will handle the
5273 * calling convention problems.
5275 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5277 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
5278 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5279 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5280 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5283 return (MonoInst*)call;
5284 } else if (strcmp (cmethod->name, "Exit") == 0) {
5287 if (COMPILE_LLVM (cfg)) {
5288 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5290 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
5291 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5292 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5293 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5296 return (MonoInst*)call;
5298 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
5300 MonoMethod *fast_method = NULL;
5302 /* Avoid infinite recursion */
5303 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
5304 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
5305 strcmp (cfg->method->name, "FastMonitorExit") == 0))
5308 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) ||
5309 strcmp (cmethod->name, "Exit") == 0)
5310 fast_method = mono_monitor_get_fast_path (cmethod);
5314 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
5317 } else if (cmethod->klass->image == mono_defaults.corlib &&
5318 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5319 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
5322 #if SIZEOF_REGISTER == 8
5323 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
5324 /* 64 bit reads are already atomic */
5325 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
5326 ins->dreg = mono_alloc_preg (cfg);
5327 ins->inst_basereg = args [0]->dreg;
5328 ins->inst_offset = 0;
5329 MONO_ADD_INS (cfg->cbb, ins);
5333 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
5334 if (strcmp (cmethod->name, "Increment") == 0) {
5335 MonoInst *ins_iconst;
5338 if (fsig->params [0]->type == MONO_TYPE_I4)
5339 opcode = OP_ATOMIC_ADD_NEW_I4;
5340 #if SIZEOF_REGISTER == 8
5341 else if (fsig->params [0]->type == MONO_TYPE_I8)
5342 opcode = OP_ATOMIC_ADD_NEW_I8;
5345 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5346 ins_iconst->inst_c0 = 1;
5347 ins_iconst->dreg = mono_alloc_ireg (cfg);
5348 MONO_ADD_INS (cfg->cbb, ins_iconst);
5350 MONO_INST_NEW (cfg, ins, opcode);
5351 ins->dreg = mono_alloc_ireg (cfg);
5352 ins->inst_basereg = args [0]->dreg;
5353 ins->inst_offset = 0;
5354 ins->sreg2 = ins_iconst->dreg;
5355 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5356 MONO_ADD_INS (cfg->cbb, ins);
5358 } else if (strcmp (cmethod->name, "Decrement") == 0) {
5359 MonoInst *ins_iconst;
5362 if (fsig->params [0]->type == MONO_TYPE_I4)
5363 opcode = OP_ATOMIC_ADD_NEW_I4;
5364 #if SIZEOF_REGISTER == 8
5365 else if (fsig->params [0]->type == MONO_TYPE_I8)
5366 opcode = OP_ATOMIC_ADD_NEW_I8;
5369 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5370 ins_iconst->inst_c0 = -1;
5371 ins_iconst->dreg = mono_alloc_ireg (cfg);
5372 MONO_ADD_INS (cfg->cbb, ins_iconst);
5374 MONO_INST_NEW (cfg, ins, opcode);
5375 ins->dreg = mono_alloc_ireg (cfg);
5376 ins->inst_basereg = args [0]->dreg;
5377 ins->inst_offset = 0;
5378 ins->sreg2 = ins_iconst->dreg;
5379 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5380 MONO_ADD_INS (cfg->cbb, ins);
5382 } else if (strcmp (cmethod->name, "Add") == 0) {
5385 if (fsig->params [0]->type == MONO_TYPE_I4)
5386 opcode = OP_ATOMIC_ADD_NEW_I4;
5387 #if SIZEOF_REGISTER == 8
5388 else if (fsig->params [0]->type == MONO_TYPE_I8)
5389 opcode = OP_ATOMIC_ADD_NEW_I8;
5393 MONO_INST_NEW (cfg, ins, opcode);
5394 ins->dreg = mono_alloc_ireg (cfg);
5395 ins->inst_basereg = args [0]->dreg;
5396 ins->inst_offset = 0;
5397 ins->sreg2 = args [1]->dreg;
5398 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5399 MONO_ADD_INS (cfg->cbb, ins);
5402 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
5404 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
5405 if (strcmp (cmethod->name, "Exchange") == 0) {
5407 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
5409 if (fsig->params [0]->type == MONO_TYPE_I4)
5410 opcode = OP_ATOMIC_EXCHANGE_I4;
5411 #if SIZEOF_REGISTER == 8
5412 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
5413 (fsig->params [0]->type == MONO_TYPE_I))
5414 opcode = OP_ATOMIC_EXCHANGE_I8;
5416 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
5417 opcode = OP_ATOMIC_EXCHANGE_I4;
5422 MONO_INST_NEW (cfg, ins, opcode);
5423 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
5424 ins->inst_basereg = args [0]->dreg;
5425 ins->inst_offset = 0;
5426 ins->sreg2 = args [1]->dreg;
5427 MONO_ADD_INS (cfg->cbb, ins);
5429 switch (fsig->params [0]->type) {
5431 ins->type = STACK_I4;
5435 ins->type = STACK_I8;
5437 case MONO_TYPE_OBJECT:
5438 ins->type = STACK_OBJ;
5441 g_assert_not_reached ();
5444 if (cfg->gen_write_barriers && is_ref)
5445 emit_write_barrier (cfg, args [0], args [1]);
5447 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
5449 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
5450 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
5452 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
5453 if (fsig->params [1]->type == MONO_TYPE_I4)
5455 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
5456 size = sizeof (gpointer);
5457 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
5460 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
5461 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5462 ins->sreg1 = args [0]->dreg;
5463 ins->sreg2 = args [1]->dreg;
5464 ins->sreg3 = args [2]->dreg;
5465 ins->type = STACK_I4;
5466 MONO_ADD_INS (cfg->cbb, ins);
5467 } else if (size == 8) {
5468 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
5469 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5470 ins->sreg1 = args [0]->dreg;
5471 ins->sreg2 = args [1]->dreg;
5472 ins->sreg3 = args [2]->dreg;
5473 ins->type = STACK_I8;
5474 MONO_ADD_INS (cfg->cbb, ins);
5476 /* g_assert_not_reached (); */
5478 if (cfg->gen_write_barriers && is_ref)
5479 emit_write_barrier (cfg, args [0], args [1]);
5481 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
5483 if (strcmp (cmethod->name, "MemoryBarrier") == 0)
5484 ins = emit_memory_barrier (cfg, FullBarrier);
5488 } else if (cmethod->klass->image == mono_defaults.corlib) {
5489 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
5490 && strcmp (cmethod->klass->name, "Debugger") == 0) {
5491 if (should_insert_brekpoint (cfg->method)) {
5492 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
5494 MONO_INST_NEW (cfg, ins, OP_NOP);
5495 MONO_ADD_INS (cfg->cbb, ins);
5499 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
5500 && strcmp (cmethod->klass->name, "Environment") == 0) {
5502 EMIT_NEW_ICONST (cfg, ins, 1);
5504 EMIT_NEW_ICONST (cfg, ins, 0);
5508 } else if (cmethod->klass == mono_defaults.math_class) {
5510 * There is general branches code for Min/Max, but it does not work for
5512 * http://everything2.com/?node_id=1051618
5514 } else if ((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") || !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) && !strcmp (cmethod->klass->name, "Selector") && !strcmp (cmethod->name, "GetHandle") && cfg->compile_aot && (args [0]->opcode == OP_GOT_ENTRY || args[0]->opcode == OP_AOTCONST)) {
5515 #ifdef MONO_ARCH_HAVE_OBJC_GET_SELECTOR
5517 MonoJumpInfoToken *ji;
5520 cfg->disable_llvm = TRUE;
5522 if (args [0]->opcode == OP_GOT_ENTRY) {
5523 pi = args [0]->inst_p1;
5524 g_assert (pi->opcode == OP_PATCH_INFO);
5525 g_assert ((int)pi->inst_p1 == MONO_PATCH_INFO_LDSTR);
5528 g_assert ((int)args [0]->inst_p1 == MONO_PATCH_INFO_LDSTR);
5529 ji = args [0]->inst_p0;
5532 NULLIFY_INS (args [0]);
5535 s = mono_ldstr (cfg->domain, ji->image, mono_metadata_token_index (ji->token));
5536 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
5537 ins->dreg = mono_alloc_ireg (cfg);
5539 ins->inst_p0 = mono_string_to_utf8 (s);
5540 MONO_ADD_INS (cfg->cbb, ins);
5545 #ifdef MONO_ARCH_SIMD_INTRINSICS
5546 if (cfg->opt & MONO_OPT_SIMD) {
5547 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5553 if (COMPILE_LLVM (cfg)) {
5554 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
5559 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
5563 * This entry point could be used later for arbitrary method
5566 inline static MonoInst*
5567 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
5568 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
5570 if (method->klass == mono_defaults.string_class) {
5571 /* managed string allocation support */
5572 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
5573 MonoInst *iargs [2];
5574 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
5575 MonoMethod *managed_alloc = NULL;
5577 g_assert (vtable); /*Should not fail since it System.String*/
5578 #ifndef MONO_CROSS_COMPILE
5579 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE);
5583 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
5584 iargs [1] = args [0];
5585 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
5592 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
5594 MonoInst *store, *temp;
5597 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
5598 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
5601 * FIXME: We should use *args++ = sp [0], but that would mean the arg
5602 * would be different than the MonoInst's used to represent arguments, and
5603 * the ldelema implementation can't deal with that.
5604 * Solution: When ldelema is used on an inline argument, create a var for
5605 * it, emit ldelema on that var, and emit the saving code below in
5606 * inline_method () if needed.
5608 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
5609 cfg->args [i] = temp;
5610 /* This uses cfg->args [i] which is set by the preceeding line */
5611 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
5612 store->cil_code = sp [0]->cil_code;
5617 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
5618 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
5620 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5622 check_inline_called_method_name_limit (MonoMethod *called_method)
5625 static const char *limit = NULL;
5627 if (limit == NULL) {
5628 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
5630 if (limit_string != NULL)
5631 limit = limit_string;
5636 if (limit [0] != '\0') {
5637 char *called_method_name = mono_method_full_name (called_method, TRUE);
5639 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
5640 g_free (called_method_name);
5642 //return (strncmp_result <= 0);
5643 return (strncmp_result == 0);
5650 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5652 check_inline_caller_method_name_limit (MonoMethod *caller_method)
5655 static const char *limit = NULL;
5657 if (limit == NULL) {
5658 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
5659 if (limit_string != NULL) {
5660 limit = limit_string;
5666 if (limit [0] != '\0') {
5667 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
5669 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
5670 g_free (caller_method_name);
5672 //return (strncmp_result <= 0);
5673 return (strncmp_result == 0);
5681 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
5683 static double r8_0 = 0.0;
5687 rtype = mini_replace_type (rtype);
5691 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
5692 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
5693 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5694 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
5695 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
5696 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
5697 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5698 ins->type = STACK_R8;
5699 ins->inst_p0 = (void*)&r8_0;
5701 MONO_ADD_INS (cfg->cbb, ins);
5702 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
5703 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
5704 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
5705 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
5706 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
5708 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
5713 emit_init_local (MonoCompile *cfg, int local, MonoType *type)
5715 MonoInst *var = cfg->locals [local];
5716 if (COMPILE_SOFT_FLOAT (cfg)) {
5718 int reg = alloc_dreg (cfg, var->type);
5719 emit_init_rvar (cfg, reg, type);
5720 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
5722 emit_init_rvar (cfg, var->dreg, type);
5727 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
5728 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_always)
5730 MonoInst *ins, *rvar = NULL;
5731 MonoMethodHeader *cheader;
5732 MonoBasicBlock *ebblock, *sbblock;
5734 MonoMethod *prev_inlined_method;
5735 MonoInst **prev_locals, **prev_args;
5736 MonoType **prev_arg_types;
5737 guint prev_real_offset;
5738 GHashTable *prev_cbb_hash;
5739 MonoBasicBlock **prev_cil_offset_to_bb;
5740 MonoBasicBlock *prev_cbb;
5741 unsigned char* prev_cil_start;
5742 guint32 prev_cil_offset_to_bb_len;
5743 MonoMethod *prev_current_method;
5744 MonoGenericContext *prev_generic_context;
5745 gboolean ret_var_set, prev_ret_var_set, virtual = FALSE;
5747 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
5749 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5750 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
5753 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5754 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
5758 if (cfg->verbose_level > 2)
5759 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
5761 if (!cmethod->inline_info) {
5762 cfg->stat_inlineable_methods++;
5763 cmethod->inline_info = 1;
5766 /* allocate local variables */
5767 cheader = mono_method_get_header (cmethod);
5769 if (cheader == NULL || mono_loader_get_last_error ()) {
5770 MonoLoaderError *error = mono_loader_get_last_error ();
5773 mono_metadata_free_mh (cheader);
5774 if (inline_always && error)
5775 mono_cfg_set_exception (cfg, error->exception_type);
5777 mono_loader_clear_error ();
5781 /*Must verify before creating locals as it can cause the JIT to assert.*/
5782 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
5783 mono_metadata_free_mh (cheader);
5787 /* allocate space to store the return value */
5788 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
5789 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
5792 prev_locals = cfg->locals;
5793 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
5794 for (i = 0; i < cheader->num_locals; ++i)
5795 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
5797 /* allocate start and end blocks */
5798 /* This is needed so if the inline is aborted, we can clean up */
5799 NEW_BBLOCK (cfg, sbblock);
5800 sbblock->real_offset = real_offset;
5802 NEW_BBLOCK (cfg, ebblock);
5803 ebblock->block_num = cfg->num_bblocks++;
5804 ebblock->real_offset = real_offset;
5806 prev_args = cfg->args;
5807 prev_arg_types = cfg->arg_types;
5808 prev_inlined_method = cfg->inlined_method;
5809 cfg->inlined_method = cmethod;
5810 cfg->ret_var_set = FALSE;
5811 cfg->inline_depth ++;
5812 prev_real_offset = cfg->real_offset;
5813 prev_cbb_hash = cfg->cbb_hash;
5814 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
5815 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
5816 prev_cil_start = cfg->cil_start;
5817 prev_cbb = cfg->cbb;
5818 prev_current_method = cfg->current_method;
5819 prev_generic_context = cfg->generic_context;
5820 prev_ret_var_set = cfg->ret_var_set;
5822 if (*ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
5825 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, virtual);
5827 ret_var_set = cfg->ret_var_set;
5829 cfg->inlined_method = prev_inlined_method;
5830 cfg->real_offset = prev_real_offset;
5831 cfg->cbb_hash = prev_cbb_hash;
5832 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
5833 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
5834 cfg->cil_start = prev_cil_start;
5835 cfg->locals = prev_locals;
5836 cfg->args = prev_args;
5837 cfg->arg_types = prev_arg_types;
5838 cfg->current_method = prev_current_method;
5839 cfg->generic_context = prev_generic_context;
5840 cfg->ret_var_set = prev_ret_var_set;
5841 cfg->inline_depth --;
5843 if ((costs >= 0 && costs < 60) || inline_always) {
5844 if (cfg->verbose_level > 2)
5845 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
5847 cfg->stat_inlined_methods++;
5849 /* always add some code to avoid block split failures */
5850 MONO_INST_NEW (cfg, ins, OP_NOP);
5851 MONO_ADD_INS (prev_cbb, ins);
5853 prev_cbb->next_bb = sbblock;
5854 link_bblock (cfg, prev_cbb, sbblock);
5857 * Get rid of the begin and end bblocks if possible to aid local
5860 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
5862 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
5863 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
5865 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
5866 MonoBasicBlock *prev = ebblock->in_bb [0];
5867 mono_merge_basic_blocks (cfg, prev, ebblock);
5869 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
5870 mono_merge_basic_blocks (cfg, prev_cbb, prev);
5871 cfg->cbb = prev_cbb;
5875 * Its possible that the rvar is set in some prev bblock, but not in others.
5881 for (i = 0; i < ebblock->in_count; ++i) {
5882 bb = ebblock->in_bb [i];
5884 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
5887 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
5897 * If the inlined method contains only a throw, then the ret var is not
5898 * set, so set it to a dummy value.
5901 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
5903 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
5906 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5909 if (cfg->verbose_level > 2)
5910 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
5911 cfg->exception_type = MONO_EXCEPTION_NONE;
5912 mono_loader_clear_error ();
5914 /* This gets rid of the newly added bblocks */
5915 cfg->cbb = prev_cbb;
5917 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5922 * Some of these comments may well be out-of-date.
5923 * Design decisions: we do a single pass over the IL code (and we do bblock
5924 * splitting/merging in the few cases when it's required: a back jump to an IL
5925 * address that was not already seen as bblock starting point).
5926 * Code is validated as we go (full verification is still better left to metadata/verify.c).
5927 * Complex operations are decomposed in simpler ones right away. We need to let the
5928 * arch-specific code peek and poke inside this process somehow (except when the
5929 * optimizations can take advantage of the full semantic info of coarse opcodes).
5930 * All the opcodes of the form opcode.s are 'normalized' to opcode.
5931 * MonoInst->opcode initially is the IL opcode or some simplification of that
5932 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
5933 * opcode with value bigger than OP_LAST.
5934 * At this point the IR can be handed over to an interpreter, a dumb code generator
5935 * or to the optimizing code generator that will translate it to SSA form.
5937 * Profiling directed optimizations.
5938 * We may compile by default with few or no optimizations and instrument the code
5939 * or the user may indicate what methods to optimize the most either in a config file
5940 * or through repeated runs where the compiler applies offline the optimizations to
5941 * each method and then decides if it was worth it.
5944 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
5945 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
5946 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
5947 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
5948 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
5949 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
5950 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
5951 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; LOAD_ERROR;}
5953 /* offset from br.s -> br like opcodes */
5954 #define BIG_BRANCH_OFFSET 13
5957 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
5959 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
5961 return b == NULL || b == bb;
5965 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
5967 unsigned char *ip = start;
5968 unsigned char *target;
5971 MonoBasicBlock *bblock;
5972 const MonoOpcode *opcode;
5975 cli_addr = ip - start;
5976 i = mono_opcode_value ((const guint8 **)&ip, end);
5979 opcode = &mono_opcodes [i];
5980 switch (opcode->argument) {
5981 case MonoInlineNone:
5984 case MonoInlineString:
5985 case MonoInlineType:
5986 case MonoInlineField:
5987 case MonoInlineMethod:
5990 case MonoShortInlineR:
5997 case MonoShortInlineVar:
5998 case MonoShortInlineI:
6001 case MonoShortInlineBrTarget:
6002 target = start + cli_addr + 2 + (signed char)ip [1];
6003 GET_BBLOCK (cfg, bblock, target);
6006 GET_BBLOCK (cfg, bblock, ip);
6008 case MonoInlineBrTarget:
6009 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
6010 GET_BBLOCK (cfg, bblock, target);
6013 GET_BBLOCK (cfg, bblock, ip);
6015 case MonoInlineSwitch: {
6016 guint32 n = read32 (ip + 1);
6019 cli_addr += 5 + 4 * n;
6020 target = start + cli_addr;
6021 GET_BBLOCK (cfg, bblock, target);
6023 for (j = 0; j < n; ++j) {
6024 target = start + cli_addr + (gint32)read32 (ip);
6025 GET_BBLOCK (cfg, bblock, target);
6035 g_assert_not_reached ();
6038 if (i == CEE_THROW) {
6039 unsigned char *bb_start = ip - 1;
6041 /* Find the start of the bblock containing the throw */
6043 while ((bb_start >= start) && !bblock) {
6044 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
6048 bblock->out_of_line = 1;
6058 static inline MonoMethod *
6059 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6063 if (m->wrapper_type != MONO_WRAPPER_NONE) {
6064 method = mono_method_get_wrapper_data (m, token);
6066 method = mono_class_inflate_generic_method (method, context);
6068 method = mono_get_method_full (m->klass->image, token, klass, context);
6074 static inline MonoMethod *
6075 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6077 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
6079 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
6085 static inline MonoClass*
6086 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
6090 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6091 klass = mono_method_get_wrapper_data (method, token);
6093 klass = mono_class_inflate_generic_class (klass, context);
6095 klass = mono_class_get_full (method->klass->image, token, context);
6098 mono_class_init (klass);
6102 static inline MonoMethodSignature*
6103 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
6105 MonoMethodSignature *fsig;
6107 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6110 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6112 fsig = mono_inflate_generic_signature (fsig, context, &error);
6114 g_assert (mono_error_ok (&error));
6117 fsig = mono_metadata_parse_signature (method->klass->image, token);
6123 * Returns TRUE if the JIT should abort inlining because "callee"
6124 * is influenced by security attributes.
6127 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
6131 if ((cfg->method != caller) && mono_security_method_has_declsec (callee)) {
6135 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
6136 if (result == MONO_JIT_SECURITY_OK)
6139 if (result == MONO_JIT_LINKDEMAND_ECMA) {
6140 /* Generate code to throw a SecurityException before the actual call/link */
6141 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6144 NEW_ICONST (cfg, args [0], 4);
6145 NEW_METHODCONST (cfg, args [1], caller);
6146 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
6147 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
6148 /* don't hide previous results */
6149 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
6150 cfg->exception_data = result;
6158 throw_exception (void)
6160 static MonoMethod *method = NULL;
6163 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6164 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
6171 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
6173 MonoMethod *thrower = throw_exception ();
6176 EMIT_NEW_PCONST (cfg, args [0], ex);
6177 mono_emit_method_call (cfg, thrower, args, NULL);
6181 * Return the original method is a wrapper is specified. We can only access
6182 * the custom attributes from the original method.
6185 get_original_method (MonoMethod *method)
6187 if (method->wrapper_type == MONO_WRAPPER_NONE)
6190 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
6191 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
6194 /* in other cases we need to find the original method */
6195 return mono_marshal_method_from_wrapper (method);
6199 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
6200 MonoBasicBlock *bblock, unsigned char *ip)
6202 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6203 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
6205 emit_throw_exception (cfg, ex);
6209 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
6210 MonoBasicBlock *bblock, unsigned char *ip)
6212 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6213 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
6215 emit_throw_exception (cfg, ex);
6219 * Check that the IL instructions at ip are the array initialization
6220 * sequence and return the pointer to the data and the size.
6223 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
6226 * newarr[System.Int32]
6228 * ldtoken field valuetype ...
6229 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
6231 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
6232 guint32 token = read32 (ip + 7);
6233 guint32 field_token = read32 (ip + 2);
6234 guint32 field_index = field_token & 0xffffff;
6236 const char *data_ptr;
6238 MonoMethod *cmethod;
6239 MonoClass *dummy_class;
6240 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
6246 *out_field_token = field_token;
6248 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
6251 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
6253 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
6254 case MONO_TYPE_BOOLEAN:
6258 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
6259 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
6260 case MONO_TYPE_CHAR:
6277 if (size > mono_type_size (field->type, &dummy_align))
6280 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
6281 if (!method->klass->image->dynamic) {
6282 field_index = read32 (ip + 2) & 0xffffff;
6283 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
6284 data_ptr = mono_image_rva_map (method->klass->image, rva);
6285 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
6286 /* for aot code we do the lookup on load */
6287 if (aot && data_ptr)
6288 return GUINT_TO_POINTER (rva);
6290 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
6292 data_ptr = mono_field_get_data (field);
6300 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
6302 char *method_fname = mono_method_full_name (method, TRUE);
6304 MonoMethodHeader *header = mono_method_get_header (method);
6306 if (header->code_size == 0)
6307 method_code = g_strdup ("method body is empty.");
6309 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
6310 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
6311 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
6312 g_free (method_fname);
6313 g_free (method_code);
6314 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
6318 set_exception_object (MonoCompile *cfg, MonoException *exception)
6320 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
6321 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
6322 cfg->exception_ptr = exception;
6326 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
6329 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
6330 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
6331 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
6332 /* Optimize reg-reg moves away */
6334 * Can't optimize other opcodes, since sp[0] might point to
6335 * the last ins of a decomposed opcode.
6337 sp [0]->dreg = (cfg)->locals [n]->dreg;
6339 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
6344 * ldloca inhibits many optimizations so try to get rid of it in common
6347 static inline unsigned char *
6348 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
6358 local = read16 (ip + 2);
6362 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
6363 /* From the INITOBJ case */
6364 token = read32 (ip + 2);
6365 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
6366 CHECK_TYPELOAD (klass);
6367 type = mini_replace_type (&klass->byval_arg);
6368 emit_init_local (cfg, local, type);
6376 is_exception_class (MonoClass *class)
6379 if (class == mono_defaults.exception_class)
6381 class = class->parent;
6387 * is_jit_optimizer_disabled:
6389 * Determine whenever M's assembly has a DebuggableAttribute with the
6390 * IsJITOptimizerDisabled flag set.
6393 is_jit_optimizer_disabled (MonoMethod *m)
6395 MonoAssembly *ass = m->klass->image->assembly;
6396 MonoCustomAttrInfo* attrs;
6397 static MonoClass *klass;
6399 gboolean val = FALSE;
6402 if (ass->jit_optimizer_disabled_inited)
6403 return ass->jit_optimizer_disabled;
6406 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
6409 ass->jit_optimizer_disabled = FALSE;
6410 mono_memory_barrier ();
6411 ass->jit_optimizer_disabled_inited = TRUE;
6415 attrs = mono_custom_attrs_from_assembly (ass);
6417 for (i = 0; i < attrs->num_attrs; ++i) {
6418 MonoCustomAttrEntry *attr = &attrs->attrs [i];
6421 MonoMethodSignature *sig;
6423 if (!attr->ctor || attr->ctor->klass != klass)
6425 /* Decode the attribute. See reflection.c */
6426 len = attr->data_size;
6427 p = (const char*)attr->data;
6428 g_assert (read16 (p) == 0x0001);
6431 // FIXME: Support named parameters
6432 sig = mono_method_signature (attr->ctor);
6433 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
6435 /* Two boolean arguments */
6439 mono_custom_attrs_free (attrs);
6442 ass->jit_optimizer_disabled = val;
6443 mono_memory_barrier ();
6444 ass->jit_optimizer_disabled_inited = TRUE;
6450 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
6452 gboolean supported_tail_call;
6455 #ifdef MONO_ARCH_HAVE_OP_TAIL_CALL
6456 supported_tail_call = mono_arch_tail_call_supported (mono_method_signature (method), mono_method_signature (cmethod));
6458 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6461 for (i = 0; i < fsig->param_count; ++i) {
6462 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
6463 /* These can point to the current method's stack */
6464 supported_tail_call = FALSE;
6466 if (fsig->hasthis && cmethod->klass->valuetype)
6467 /* this might point to the current method's stack */
6468 supported_tail_call = FALSE;
6469 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
6470 supported_tail_call = FALSE;
6471 if (cfg->method->save_lmf)
6472 supported_tail_call = FALSE;
6473 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
6474 supported_tail_call = FALSE;
6475 if (call_opcode != CEE_CALL)
6476 supported_tail_call = FALSE;
6478 /* Debugging support */
6480 if (supported_tail_call) {
6481 if (!mono_debug_count ())
6482 supported_tail_call = FALSE;
6486 return supported_tail_call;
6489 /* the JIT intercepts ldflda instructions to the tlsdata field in ThreadLocal<T> and redirects
6490 * it to the thread local value based on the tls_offset field. Every other kind of access to
6491 * the field causes an assert.
6494 is_magic_tls_access (MonoClassField *field)
6496 if (strcmp (field->name, "tlsdata"))
6498 if (strcmp (field->parent->name, "ThreadLocal`1"))
6500 return field->parent->image == mono_defaults.corlib;
6503 /* emits the code needed to access a managed tls var (like ThreadStatic)
6504 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
6505 * pointer for the current thread.
6506 * Returns the MonoInst* representing the address of the tls var.
6509 emit_managed_static_data_access (MonoCompile *cfg, MonoInst *thread_ins, int offset_reg)
6512 int static_data_reg, array_reg, dreg;
6513 int offset2_reg, idx_reg;
6514 // inlined access to the tls data
6515 // idx = (offset >> 24) - 1;
6516 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
6517 static_data_reg = alloc_ireg (cfg);
6518 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
6519 idx_reg = alloc_ireg (cfg);
6520 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
6521 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
6522 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
6523 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
6524 array_reg = alloc_ireg (cfg);
6525 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
6526 offset2_reg = alloc_ireg (cfg);
6527 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
6528 dreg = alloc_ireg (cfg);
6529 EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
6534 * redirect access to the tlsdata field to the tls var given by the tls_offset field.
6535 * this address is cached per-method in cached_tls_addr.
6538 create_magic_tls_access (MonoCompile *cfg, MonoClassField *tls_field, MonoInst **cached_tls_addr, MonoInst *thread_local)
6540 MonoInst *load, *addr, *temp, *store, *thread_ins;
6541 MonoClassField *offset_field;
6543 if (*cached_tls_addr) {
6544 EMIT_NEW_TEMPLOAD (cfg, addr, (*cached_tls_addr)->inst_c0);
6547 thread_ins = mono_get_thread_intrinsic (cfg);
6548 offset_field = mono_class_get_field_from_name (tls_field->parent, "tls_offset");
6550 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, offset_field->type, thread_local->dreg, offset_field->offset);
6552 MONO_ADD_INS (cfg->cbb, thread_ins);
6554 MonoMethod *thread_method;
6555 thread_method = mono_class_get_method_from_name (mono_get_thread_class(), "CurrentInternalThread_internal", 0);
6556 thread_ins = mono_emit_method_call (cfg, thread_method, NULL, NULL);
6558 addr = emit_managed_static_data_access (cfg, thread_ins, load->dreg);
6559 addr->klass = mono_class_from_mono_type (tls_field->type);
6560 addr->type = STACK_MP;
6561 *cached_tls_addr = temp = mono_compile_create_var (cfg, type_from_stack_type (addr), OP_LOCAL);
6562 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, addr);
6564 EMIT_NEW_TEMPLOAD (cfg, addr, temp->inst_c0);
6569 * mono_method_to_ir:
6571 * Translate the .net IL into linear IR.
6574 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
6575 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
6576 guint inline_offset, gboolean is_virtual_call)
6579 MonoInst *ins, **sp, **stack_start;
6580 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
6581 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
6582 MonoMethod *cmethod, *method_definition;
6583 MonoInst **arg_array;
6584 MonoMethodHeader *header;
6586 guint32 token, ins_flag;
6588 MonoClass *constrained_call = NULL;
6589 unsigned char *ip, *end, *target, *err_pos;
6590 MonoMethodSignature *sig;
6591 MonoGenericContext *generic_context = NULL;
6592 MonoGenericContainer *generic_container = NULL;
6593 MonoType **param_types;
6594 int i, n, start_new_bblock, dreg;
6595 int num_calls = 0, inline_costs = 0;
6596 int breakpoint_id = 0;
6598 MonoBoolean security, pinvoke;
6599 MonoSecurityManager* secman = NULL;
6600 MonoDeclSecurityActions actions;
6601 GSList *class_inits = NULL;
6602 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
6604 gboolean init_locals, seq_points, skip_dead_blocks;
6605 gboolean disable_inline, sym_seq_points = FALSE;
6606 MonoInst *cached_tls_addr = NULL;
6607 MonoDebugMethodInfo *minfo;
6608 MonoBitSet *seq_point_locs = NULL;
6609 MonoBitSet *seq_point_set_locs = NULL;
6611 disable_inline = is_jit_optimizer_disabled (method);
6613 /* serialization and xdomain stuff may need access to private fields and methods */
6614 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
6615 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
6616 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
6617 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
6618 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
6619 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
6621 dont_verify |= mono_security_smcs_hack_enabled ();
6623 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
6624 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
6625 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
6626 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
6627 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
6629 image = method->klass->image;
6630 header = mono_method_get_header (method);
6632 MonoLoaderError *error;
6634 if ((error = mono_loader_get_last_error ())) {
6635 mono_cfg_set_exception (cfg, error->exception_type);
6637 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
6638 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
6640 goto exception_exit;
6642 generic_container = mono_method_get_generic_container (method);
6643 sig = mono_method_signature (method);
6644 num_args = sig->hasthis + sig->param_count;
6645 ip = (unsigned char*)header->code;
6646 cfg->cil_start = ip;
6647 end = ip + header->code_size;
6648 cfg->stat_cil_code_size += header->code_size;
6649 init_locals = header->init_locals;
6651 seq_points = cfg->gen_seq_points && cfg->method == method;
6652 #ifdef PLATFORM_ANDROID
6653 seq_points &= cfg->method->wrapper_type == MONO_WRAPPER_NONE;
6656 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
6657 /* We could hit a seq point before attaching to the JIT (#8338) */
6661 if (cfg->gen_seq_points && cfg->method == method) {
6662 minfo = mono_debug_lookup_method (method);
6664 int i, n_il_offsets;
6668 mono_debug_symfile_get_line_numbers_full (minfo, NULL, NULL, &n_il_offsets, &il_offsets, &line_numbers, NULL, NULL);
6669 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6670 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6671 sym_seq_points = TRUE;
6672 for (i = 0; i < n_il_offsets; ++i) {
6673 if (il_offsets [i] < header->code_size)
6674 mono_bitset_set_fast (seq_point_locs, il_offsets [i]);
6676 g_free (il_offsets);
6677 g_free (line_numbers);
6682 * Methods without init_locals set could cause asserts in various passes
6687 method_definition = method;
6688 while (method_definition->is_inflated) {
6689 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
6690 method_definition = imethod->declaring;
6693 /* SkipVerification is not allowed if core-clr is enabled */
6694 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
6696 dont_verify_stloc = TRUE;
6699 if (sig->is_inflated)
6700 generic_context = mono_method_get_context (method);
6701 else if (generic_container)
6702 generic_context = &generic_container->context;
6703 cfg->generic_context = generic_context;
6705 if (!cfg->generic_sharing_context)
6706 g_assert (!sig->has_type_parameters);
6708 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
6709 g_assert (method->is_inflated);
6710 g_assert (mono_method_get_context (method)->method_inst);
6712 if (method->is_inflated && mono_method_get_context (method)->method_inst)
6713 g_assert (sig->generic_param_count);
6715 if (cfg->method == method) {
6716 cfg->real_offset = 0;
6718 cfg->real_offset = inline_offset;
6721 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
6722 cfg->cil_offset_to_bb_len = header->code_size;
6724 cfg->current_method = method;
6726 if (cfg->verbose_level > 2)
6727 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
6729 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
6731 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
6732 for (n = 0; n < sig->param_count; ++n)
6733 param_types [n + sig->hasthis] = sig->params [n];
6734 cfg->arg_types = param_types;
6736 dont_inline = g_list_prepend (dont_inline, method);
6737 if (cfg->method == method) {
6739 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
6740 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
6743 NEW_BBLOCK (cfg, start_bblock);
6744 cfg->bb_entry = start_bblock;
6745 start_bblock->cil_code = NULL;
6746 start_bblock->cil_length = 0;
6747 #if defined(__native_client_codegen__)
6748 MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
6749 ins->dreg = alloc_dreg (cfg, STACK_I4);
6750 MONO_ADD_INS (start_bblock, ins);
6754 NEW_BBLOCK (cfg, end_bblock);
6755 cfg->bb_exit = end_bblock;
6756 end_bblock->cil_code = NULL;
6757 end_bblock->cil_length = 0;
6758 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
6759 g_assert (cfg->num_bblocks == 2);
6761 arg_array = cfg->args;
6763 if (header->num_clauses) {
6764 cfg->spvars = g_hash_table_new (NULL, NULL);
6765 cfg->exvars = g_hash_table_new (NULL, NULL);
6767 /* handle exception clauses */
6768 for (i = 0; i < header->num_clauses; ++i) {
6769 MonoBasicBlock *try_bb;
6770 MonoExceptionClause *clause = &header->clauses [i];
6771 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
6772 try_bb->real_offset = clause->try_offset;
6773 try_bb->try_start = TRUE;
6774 try_bb->region = ((i + 1) << 8) | clause->flags;
6775 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
6776 tblock->real_offset = clause->handler_offset;
6777 tblock->flags |= BB_EXCEPTION_HANDLER;
6780 * Linking the try block with the EH block hinders inlining as we won't be able to
6781 * merge the bblocks from inlining and produce an artificial hole for no good reason.
6783 if (COMPILE_LLVM (cfg))
6784 link_bblock (cfg, try_bb, tblock);
6786 if (*(ip + clause->handler_offset) == CEE_POP)
6787 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
6789 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
6790 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
6791 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
6792 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
6793 MONO_ADD_INS (tblock, ins);
6795 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) {
6796 /* finally clauses already have a seq point */
6797 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
6798 MONO_ADD_INS (tblock, ins);
6801 /* todo: is a fault block unsafe to optimize? */
6802 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
6803 tblock->flags |= BB_EXCEPTION_UNSAFE;
6807 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
6809 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
6811 /* catch and filter blocks get the exception object on the stack */
6812 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
6813 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
6814 MonoInst *dummy_use;
6816 /* mostly like handle_stack_args (), but just sets the input args */
6817 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
6818 tblock->in_scount = 1;
6819 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
6820 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
6823 * Add a dummy use for the exvar so its liveness info will be
6827 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
6829 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
6830 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
6831 tblock->flags |= BB_EXCEPTION_HANDLER;
6832 tblock->real_offset = clause->data.filter_offset;
6833 tblock->in_scount = 1;
6834 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
6835 /* The filter block shares the exvar with the handler block */
6836 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
6837 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
6838 MONO_ADD_INS (tblock, ins);
6842 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
6843 clause->data.catch_class &&
6844 cfg->generic_sharing_context &&
6845 mono_class_check_context_used (clause->data.catch_class)) {
6847 * In shared generic code with catch
6848 * clauses containing type variables
6849 * the exception handling code has to
6850 * be able to get to the rgctx.
6851 * Therefore we have to make sure that
6852 * the vtable/mrgctx argument (for
6853 * static or generic methods) or the
6854 * "this" argument (for non-static
6855 * methods) are live.
6857 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
6858 mini_method_get_context (method)->method_inst ||
6859 method->klass->valuetype) {
6860 mono_get_vtable_var (cfg);
6862 MonoInst *dummy_use;
6864 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
6869 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
6870 cfg->cbb = start_bblock;
6871 cfg->args = arg_array;
6872 mono_save_args (cfg, sig, inline_args);
6875 /* FIRST CODE BLOCK */
6876 NEW_BBLOCK (cfg, bblock);
6877 bblock->cil_code = ip;
6881 ADD_BBLOCK (cfg, bblock);
6883 if (cfg->method == method) {
6884 breakpoint_id = mono_debugger_method_has_breakpoint (method);
6885 if (breakpoint_id) {
6886 MONO_INST_NEW (cfg, ins, OP_BREAK);
6887 MONO_ADD_INS (bblock, ins);
6891 if (mono_security_cas_enabled ())
6892 secman = mono_security_manager_get_methods ();
6894 security = (secman && mono_security_method_has_declsec (method));
6895 /* at this point having security doesn't mean we have any code to generate */
6896 if (security && (cfg->method == method)) {
6897 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
6898 * And we do not want to enter the next section (with allocation) if we
6899 * have nothing to generate */
6900 security = mono_declsec_get_demands (method, &actions);
6903 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
6904 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
6906 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
6907 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6908 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
6910 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
6911 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
6915 mono_custom_attrs_free (custom);
6918 custom = mono_custom_attrs_from_class (wrapped->klass);
6919 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
6923 mono_custom_attrs_free (custom);
6926 /* not a P/Invoke after all */
6931 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
6932 /* we use a separate basic block for the initialization code */
6933 NEW_BBLOCK (cfg, init_localsbb);
6934 cfg->bb_init = init_localsbb;
6935 init_localsbb->real_offset = cfg->real_offset;
6936 start_bblock->next_bb = init_localsbb;
6937 init_localsbb->next_bb = bblock;
6938 link_bblock (cfg, start_bblock, init_localsbb);
6939 link_bblock (cfg, init_localsbb, bblock);
6941 cfg->cbb = init_localsbb;
6943 start_bblock->next_bb = bblock;
6944 link_bblock (cfg, start_bblock, bblock);
6947 if (cfg->gsharedvt && cfg->method == method) {
6948 MonoGSharedVtMethodInfo *info;
6949 MonoInst *var, *locals_var;
6952 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
6953 info->method = cfg->method;
6955 info->entries = g_ptr_array_new ();
6956 cfg->gsharedvt_info = info;
6958 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6959 /* prevent it from being register allocated */
6960 //var->flags |= MONO_INST_VOLATILE;
6961 cfg->gsharedvt_info_var = var;
6963 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
6964 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
6966 /* Allocate locals */
6967 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6968 /* prevent it from being register allocated */
6969 //locals_var->flags |= MONO_INST_VOLATILE;
6970 cfg->gsharedvt_locals_var = locals_var;
6972 dreg = alloc_ireg (cfg);
6973 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
6975 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
6976 ins->dreg = locals_var->dreg;
6978 MONO_ADD_INS (cfg->cbb, ins);
6979 cfg->gsharedvt_locals_var_ins = ins;
6981 cfg->flags |= MONO_CFG_HAS_ALLOCA;
6984 ins->flags |= MONO_INST_INIT;
6988 /* at this point we know, if security is TRUE, that some code needs to be generated */
6989 if (security && (cfg->method == method)) {
6992 cfg->stat_cas_demand_generation++;
6994 if (actions.demand.blob) {
6995 /* Add code for SecurityAction.Demand */
6996 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
6997 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
6998 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
6999 mono_emit_method_call (cfg, secman->demand, args, NULL);
7001 if (actions.noncasdemand.blob) {
7002 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
7003 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
7004 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
7005 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
7006 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
7007 mono_emit_method_call (cfg, secman->demand, args, NULL);
7009 if (actions.demandchoice.blob) {
7010 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
7011 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
7012 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
7013 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
7014 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
7018 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
7020 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
7023 if (mono_security_core_clr_enabled ()) {
7024 /* check if this is native code, e.g. an icall or a p/invoke */
7025 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
7026 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
7028 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
7029 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
7031 /* if this ia a native call then it can only be JITted from platform code */
7032 if ((icall || pinvk) && method->klass && method->klass->image) {
7033 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
7034 MonoException *ex = icall ? mono_get_exception_security () :
7035 mono_get_exception_method_access ();
7036 emit_throw_exception (cfg, ex);
7043 CHECK_CFG_EXCEPTION;
7045 if (header->code_size == 0)
7048 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
7053 if (cfg->method == method)
7054 mono_debug_init_method (cfg, bblock, breakpoint_id);
7056 for (n = 0; n < header->num_locals; ++n) {
7057 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
7062 /* We force the vtable variable here for all shared methods
7063 for the possibility that they might show up in a stack
7064 trace where their exact instantiation is needed. */
7065 if (cfg->generic_sharing_context && method == cfg->method) {
7066 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7067 mini_method_get_context (method)->method_inst ||
7068 method->klass->valuetype) {
7069 mono_get_vtable_var (cfg);
7071 /* FIXME: Is there a better way to do this?
7072 We need the variable live for the duration
7073 of the whole method. */
7074 cfg->args [0]->flags |= MONO_INST_VOLATILE;
7078 /* add a check for this != NULL to inlined methods */
7079 if (is_virtual_call) {
7082 NEW_ARGLOAD (cfg, arg_ins, 0);
7083 MONO_ADD_INS (cfg->cbb, arg_ins);
7084 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
7087 skip_dead_blocks = !dont_verify;
7088 if (skip_dead_blocks) {
7089 original_bb = bb = mono_basic_block_split (method, &error);
7090 if (!mono_error_ok (&error)) {
7091 mono_error_cleanup (&error);
7097 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
7098 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
7101 start_new_bblock = 0;
7104 if (cfg->method == method)
7105 cfg->real_offset = ip - header->code;
7107 cfg->real_offset = inline_offset;
7112 if (start_new_bblock) {
7113 bblock->cil_length = ip - bblock->cil_code;
7114 if (start_new_bblock == 2) {
7115 g_assert (ip == tblock->cil_code);
7117 GET_BBLOCK (cfg, tblock, ip);
7119 bblock->next_bb = tblock;
7122 start_new_bblock = 0;
7123 for (i = 0; i < bblock->in_scount; ++i) {
7124 if (cfg->verbose_level > 3)
7125 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
7126 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
7130 g_slist_free (class_inits);
7133 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
7134 link_bblock (cfg, bblock, tblock);
7135 if (sp != stack_start) {
7136 handle_stack_args (cfg, stack_start, sp - stack_start);
7138 CHECK_UNVERIFIABLE (cfg);
7140 bblock->next_bb = tblock;
7143 for (i = 0; i < bblock->in_scount; ++i) {
7144 if (cfg->verbose_level > 3)
7145 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
7146 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
7149 g_slist_free (class_inits);
7154 if (skip_dead_blocks) {
7155 int ip_offset = ip - header->code;
7157 if (ip_offset == bb->end)
7161 int op_size = mono_opcode_size (ip, end);
7162 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
7164 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
7166 if (ip_offset + op_size == bb->end) {
7167 MONO_INST_NEW (cfg, ins, OP_NOP);
7168 MONO_ADD_INS (bblock, ins);
7169 start_new_bblock = 1;
7177 * Sequence points are points where the debugger can place a breakpoint.
7178 * Currently, we generate these automatically at points where the IL
7181 if (seq_points && ((sp == stack_start) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
7183 * Make methods interruptable at the beginning, and at the targets of
7184 * backward branches.
7185 * Also, do this at the start of every bblock in methods with clauses too,
7186 * to be able to handle instructions with inprecise control flow like
7188 * Backward branches are handled at the end of method-to-ir ().
7190 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
7192 /* Avoid sequence points on empty IL like .volatile */
7193 // FIXME: Enable this
7194 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
7195 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
7196 if (sp != stack_start)
7197 ins->flags |= MONO_INST_NONEMPTY_STACK;
7198 MONO_ADD_INS (cfg->cbb, ins);
7201 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
7204 bblock->real_offset = cfg->real_offset;
7206 if ((cfg->method == method) && cfg->coverage_info) {
7207 guint32 cil_offset = ip - header->code;
7208 cfg->coverage_info->data [cil_offset].cil_code = ip;
7210 /* TODO: Use an increment here */
7211 #if defined(TARGET_X86)
7212 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
7213 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
7215 MONO_ADD_INS (cfg->cbb, ins);
7217 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
7218 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
7222 if (cfg->verbose_level > 3)
7223 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
7227 if (seq_points && !sym_seq_points && sp != stack_start) {
7229 * The C# compiler uses these nops to notify the JIT that it should
7230 * insert seq points.
7232 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
7233 MONO_ADD_INS (cfg->cbb, ins);
7235 if (cfg->keep_cil_nops)
7236 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
7238 MONO_INST_NEW (cfg, ins, OP_NOP);
7240 MONO_ADD_INS (bblock, ins);
7243 if (should_insert_brekpoint (cfg->method)) {
7244 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
7246 MONO_INST_NEW (cfg, ins, OP_NOP);
7249 MONO_ADD_INS (bblock, ins);
7255 CHECK_STACK_OVF (1);
7256 n = (*ip)-CEE_LDARG_0;
7258 EMIT_NEW_ARGLOAD (cfg, ins, n);
7266 CHECK_STACK_OVF (1);
7267 n = (*ip)-CEE_LDLOC_0;
7269 EMIT_NEW_LOCLOAD (cfg, ins, n);
7278 n = (*ip)-CEE_STLOC_0;
7281 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
7283 emit_stloc_ir (cfg, sp, header, n);
7290 CHECK_STACK_OVF (1);
7293 EMIT_NEW_ARGLOAD (cfg, ins, n);
7299 CHECK_STACK_OVF (1);
7302 NEW_ARGLOADA (cfg, ins, n);
7303 MONO_ADD_INS (cfg->cbb, ins);
7313 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
7315 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
7320 CHECK_STACK_OVF (1);
7323 EMIT_NEW_LOCLOAD (cfg, ins, n);
7327 case CEE_LDLOCA_S: {
7328 unsigned char *tmp_ip;
7330 CHECK_STACK_OVF (1);
7331 CHECK_LOCAL (ip [1]);
7333 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
7339 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
7348 CHECK_LOCAL (ip [1]);
7349 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
7351 emit_stloc_ir (cfg, sp, header, ip [1]);
7356 CHECK_STACK_OVF (1);
7357 EMIT_NEW_PCONST (cfg, ins, NULL);
7358 ins->type = STACK_OBJ;
7363 CHECK_STACK_OVF (1);
7364 EMIT_NEW_ICONST (cfg, ins, -1);
7377 CHECK_STACK_OVF (1);
7378 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
7384 CHECK_STACK_OVF (1);
7386 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
7392 CHECK_STACK_OVF (1);
7393 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
7399 CHECK_STACK_OVF (1);
7400 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7401 ins->type = STACK_I8;
7402 ins->dreg = alloc_dreg (cfg, STACK_I8);
7404 ins->inst_l = (gint64)read64 (ip);
7405 MONO_ADD_INS (bblock, ins);
7411 gboolean use_aotconst = FALSE;
7413 #ifdef TARGET_POWERPC
7414 /* FIXME: Clean this up */
7415 if (cfg->compile_aot)
7416 use_aotconst = TRUE;
7419 /* FIXME: we should really allocate this only late in the compilation process */
7420 f = mono_domain_alloc (cfg->domain, sizeof (float));
7422 CHECK_STACK_OVF (1);
7428 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
7430 dreg = alloc_freg (cfg);
7431 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
7432 ins->type = STACK_R8;
7434 MONO_INST_NEW (cfg, ins, OP_R4CONST);
7435 ins->type = STACK_R8;
7436 ins->dreg = alloc_dreg (cfg, STACK_R8);
7438 MONO_ADD_INS (bblock, ins);
7448 gboolean use_aotconst = FALSE;
7450 #ifdef TARGET_POWERPC
7451 /* FIXME: Clean this up */
7452 if (cfg->compile_aot)
7453 use_aotconst = TRUE;
7456 /* FIXME: we should really allocate this only late in the compilation process */
7457 d = mono_domain_alloc (cfg->domain, sizeof (double));
7459 CHECK_STACK_OVF (1);
7465 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
7467 dreg = alloc_freg (cfg);
7468 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
7469 ins->type = STACK_R8;
7471 MONO_INST_NEW (cfg, ins, OP_R8CONST);
7472 ins->type = STACK_R8;
7473 ins->dreg = alloc_dreg (cfg, STACK_R8);
7475 MONO_ADD_INS (bblock, ins);
7484 MonoInst *temp, *store;
7486 CHECK_STACK_OVF (1);
7490 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
7491 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
7493 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
7496 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
7509 if (sp [0]->type == STACK_R8)
7510 /* we need to pop the value from the x86 FP stack */
7511 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
7517 INLINE_FAILURE ("jmp");
7518 GSHAREDVT_FAILURE (*ip);
7521 if (stack_start != sp)
7523 token = read32 (ip + 1);
7524 /* FIXME: check the signature matches */
7525 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7527 if (!cmethod || mono_loader_get_last_error ())
7530 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
7531 GENERIC_SHARING_FAILURE (CEE_JMP);
7533 if (mono_security_cas_enabled ())
7534 CHECK_CFG_EXCEPTION;
7536 if (ARCH_HAVE_OP_TAIL_CALL) {
7537 MonoMethodSignature *fsig = mono_method_signature (cmethod);
7540 /* Handle tail calls similarly to calls */
7541 n = fsig->param_count + fsig->hasthis;
7543 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
7544 call->method = cmethod;
7545 call->tail_call = TRUE;
7546 call->signature = mono_method_signature (cmethod);
7547 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
7548 call->inst.inst_p0 = cmethod;
7549 for (i = 0; i < n; ++i)
7550 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
7552 mono_arch_emit_call (cfg, call);
7553 MONO_ADD_INS (bblock, (MonoInst*)call);
7555 for (i = 0; i < num_args; ++i)
7556 /* Prevent arguments from being optimized away */
7557 arg_array [i]->flags |= MONO_INST_VOLATILE;
7559 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
7560 ins = (MonoInst*)call;
7561 ins->inst_p0 = cmethod;
7562 MONO_ADD_INS (bblock, ins);
7566 start_new_bblock = 1;
7571 case CEE_CALLVIRT: {
7572 MonoInst *addr = NULL;
7573 MonoMethodSignature *fsig = NULL;
7575 int virtual = *ip == CEE_CALLVIRT;
7576 int calli = *ip == CEE_CALLI;
7577 gboolean pass_imt_from_rgctx = FALSE;
7578 MonoInst *imt_arg = NULL;
7579 MonoInst *keep_this_alive = NULL;
7580 gboolean pass_vtable = FALSE;
7581 gboolean pass_mrgctx = FALSE;
7582 MonoInst *vtable_arg = NULL;
7583 gboolean check_this = FALSE;
7584 gboolean supported_tail_call = FALSE;
7585 gboolean tail_call = FALSE;
7586 gboolean need_seq_point = FALSE;
7587 guint32 call_opcode = *ip;
7588 gboolean emit_widen = TRUE;
7589 gboolean push_res = TRUE;
7590 gboolean skip_ret = FALSE;
7591 gboolean delegate_invoke = FALSE;
7594 token = read32 (ip + 1);
7599 //GSHAREDVT_FAILURE (*ip);
7604 fsig = mini_get_signature (method, token, generic_context);
7605 n = fsig->param_count + fsig->hasthis;
7607 if (method->dynamic && fsig->pinvoke) {
7611 * This is a call through a function pointer using a pinvoke
7612 * signature. Have to create a wrapper and call that instead.
7613 * FIXME: This is very slow, need to create a wrapper at JIT time
7614 * instead based on the signature.
7616 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
7617 EMIT_NEW_PCONST (cfg, args [1], fsig);
7619 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
7622 MonoMethod *cil_method;
7624 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7625 cil_method = cmethod;
7627 if (constrained_call) {
7628 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7629 if (cfg->verbose_level > 2)
7630 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_call));
7631 if (!((constrained_call->byval_arg.type == MONO_TYPE_VAR ||
7632 constrained_call->byval_arg.type == MONO_TYPE_MVAR) &&
7633 cfg->generic_sharing_context)) {
7634 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_call, generic_context);
7637 if (cfg->verbose_level > 2)
7638 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_call));
7640 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
7642 * This is needed since get_method_constrained can't find
7643 * the method in klass representing a type var.
7644 * The type var is guaranteed to be a reference type in this
7647 if (!mini_is_gsharedvt_klass (cfg, constrained_call))
7648 g_assert (!cmethod->klass->valuetype);
7650 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
7655 if (!cmethod || mono_loader_get_last_error ())
7657 if (!dont_verify && !cfg->skip_visibility) {
7658 MonoMethod *target_method = cil_method;
7659 if (method->is_inflated) {
7660 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
7662 if (!mono_method_can_access_method (method_definition, target_method) &&
7663 !mono_method_can_access_method (method, cil_method))
7664 METHOD_ACCESS_FAILURE;
7667 if (mono_security_core_clr_enabled ())
7668 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
7670 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
7671 /* MS.NET seems to silently convert this to a callvirt */
7676 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
7677 * converts to a callvirt.
7679 * tests/bug-515884.il is an example of this behavior
7681 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
7682 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
7683 if (!virtual && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
7687 if (!cmethod->klass->inited)
7688 if (!mono_class_init (cmethod->klass))
7689 TYPE_LOAD_ERROR (cmethod->klass);
7691 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
7692 mini_class_is_system_array (cmethod->klass)) {
7693 array_rank = cmethod->klass->rank;
7694 fsig = mono_method_signature (cmethod);
7696 fsig = mono_method_signature (cmethod);
7701 if (fsig->pinvoke) {
7702 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
7703 check_for_pending_exc, cfg->compile_aot);
7704 fsig = mono_method_signature (wrapper);
7705 } else if (constrained_call) {
7706 fsig = mono_method_signature (cmethod);
7708 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
7712 mono_save_token_info (cfg, image, token, cil_method);
7714 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7716 * Need to emit an implicit seq point after every non-void call so single stepping through nested calls like
7717 * foo (bar (), baz ())
7718 * works correctly. MS does this also:
7719 * http://stackoverflow.com/questions/6937198/making-your-net-language-step-correctly-in-the-debugger
7720 * The problem with this approach is that the debugger will stop after all calls returning a value,
7721 * even for simple cases, like:
7724 /* Special case a few common successor opcodes */
7725 if (!(ip + 5 < end && (ip [5] == CEE_POP || ip [5] == CEE_NOP)) && !(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
7726 need_seq_point = TRUE;
7729 n = fsig->param_count + fsig->hasthis;
7731 /* Don't support calls made using type arguments for now */
7733 if (cfg->gsharedvt) {
7734 if (mini_is_gsharedvt_signature (cfg, fsig))
7735 GSHAREDVT_FAILURE (*ip);
7739 if (mono_security_cas_enabled ()) {
7740 if (check_linkdemand (cfg, method, cmethod))
7741 INLINE_FAILURE ("linkdemand");
7742 CHECK_CFG_EXCEPTION;
7745 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
7746 g_assert_not_reached ();
7749 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
7752 if (!cfg->generic_sharing_context && cmethod)
7753 g_assert (!mono_method_check_context_used (cmethod));
7757 //g_assert (!virtual || fsig->hasthis);
7761 if (constrained_call) {
7762 if (mini_is_gsharedvt_klass (cfg, constrained_call)) {
7764 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
7766 if ((cmethod->klass != mono_defaults.object_class) && constrained_call->valuetype && cmethod->klass->valuetype) {
7767 /* The 'Own method' case below */
7768 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
7769 /* 'The type parameter is instantiated as a reference type' case below. */
7770 } else if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
7771 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || mini_is_gsharedvt_type (cfg, fsig->ret)) &&
7772 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || mini_is_gsharedvt_type (cfg, fsig->params [0]))))) {
7773 MonoInst *args [16];
7776 * This case handles calls to
7777 * - object:ToString()/Equals()/GetHashCode(),
7778 * - System.IComparable<T>:CompareTo()
7779 * - System.IEquatable<T>:Equals ()
7780 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
7784 if (mono_method_check_context_used (cmethod))
7785 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
7787 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
7788 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_call), constrained_call, MONO_RGCTX_INFO_KLASS);
7790 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
7791 if (fsig->hasthis && fsig->param_count) {
7792 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
7793 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
7794 ins->dreg = alloc_preg (cfg);
7795 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
7796 MONO_ADD_INS (cfg->cbb, ins);
7799 if (mini_is_gsharedvt_type (cfg, fsig->params [0])) {
7802 args [3] = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
7804 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
7805 addr_reg = ins->dreg;
7806 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
7808 EMIT_NEW_ICONST (cfg, args [3], 0);
7809 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
7812 EMIT_NEW_ICONST (cfg, args [3], 0);
7813 EMIT_NEW_ICONST (cfg, args [4], 0);
7815 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
7818 if (mini_is_gsharedvt_type (cfg, fsig->ret)) {
7819 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins, &bblock);
7820 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret)) {
7824 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
7825 MONO_ADD_INS (cfg->cbb, add);
7827 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
7828 MONO_ADD_INS (cfg->cbb, ins);
7829 /* ins represents the call result */
7834 GSHAREDVT_FAILURE (*ip);
7838 * We have the `constrained.' prefix opcode.
7840 if (constrained_call->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
7842 * The type parameter is instantiated as a valuetype,
7843 * but that type doesn't override the method we're
7844 * calling, so we need to box `this'.
7846 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
7847 ins->klass = constrained_call;
7848 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
7849 CHECK_CFG_EXCEPTION;
7850 } else if (!constrained_call->valuetype) {
7851 int dreg = alloc_ireg_ref (cfg);
7854 * The type parameter is instantiated as a reference
7855 * type. We have a managed pointer on the stack, so
7856 * we need to dereference it here.
7858 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
7859 ins->type = STACK_OBJ;
7862 if (cmethod->klass->valuetype) {
7865 /* Interface method */
7868 mono_class_setup_vtable (constrained_call);
7869 CHECK_TYPELOAD (constrained_call);
7870 ioffset = mono_class_interface_offset (constrained_call, cmethod->klass);
7872 TYPE_LOAD_ERROR (constrained_call);
7873 slot = mono_method_get_vtable_slot (cmethod);
7875 TYPE_LOAD_ERROR (cmethod->klass);
7876 cmethod = constrained_call->vtable [ioffset + slot];
7878 if (cmethod->klass == mono_defaults.enum_class) {
7879 /* Enum implements some interfaces, so treat this as the first case */
7880 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
7881 ins->klass = constrained_call;
7882 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
7883 CHECK_CFG_EXCEPTION;
7888 constrained_call = NULL;
7891 if (!calli && check_call_signature (cfg, fsig, sp))
7894 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
7895 if (cmethod && (cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
7896 delegate_invoke = TRUE;
7899 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
7901 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7902 type_to_eval_stack_type ((cfg), fsig->ret, ins);
7910 * If the callee is a shared method, then its static cctor
7911 * might not get called after the call was patched.
7913 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
7914 emit_generic_class_init (cfg, cmethod->klass);
7915 CHECK_TYPELOAD (cmethod->klass);
7919 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
7921 if (cfg->generic_sharing_context && cmethod) {
7922 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
7924 context_used = mini_method_check_context_used (cfg, cmethod);
7926 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7927 /* Generic method interface
7928 calls are resolved via a
7929 helper function and don't
7931 if (!cmethod_context || !cmethod_context->method_inst)
7932 pass_imt_from_rgctx = TRUE;
7936 * If a shared method calls another
7937 * shared method then the caller must
7938 * have a generic sharing context
7939 * because the magic trampoline
7940 * requires it. FIXME: We shouldn't
7941 * have to force the vtable/mrgctx
7942 * variable here. Instead there
7943 * should be a flag in the cfg to
7944 * request a generic sharing context.
7947 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
7948 mono_get_vtable_var (cfg);
7953 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7955 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7957 CHECK_TYPELOAD (cmethod->klass);
7958 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7963 g_assert (!vtable_arg);
7965 if (!cfg->compile_aot) {
7967 * emit_get_rgctx_method () calls mono_class_vtable () so check
7968 * for type load errors before.
7970 mono_class_setup_vtable (cmethod->klass);
7971 CHECK_TYPELOAD (cmethod->klass);
7974 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7976 /* !marshalbyref is needed to properly handle generic methods + remoting */
7977 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
7978 MONO_METHOD_IS_FINAL (cmethod)) &&
7979 !mono_class_is_marshalbyref (cmethod->klass)) {
7986 if (pass_imt_from_rgctx) {
7987 g_assert (!pass_vtable);
7990 imt_arg = emit_get_rgctx_method (cfg, context_used,
7991 cmethod, MONO_RGCTX_INFO_METHOD);
7995 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
7997 /* Calling virtual generic methods */
7998 if (cmethod && virtual &&
7999 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
8000 !(MONO_METHOD_IS_FINAL (cmethod) &&
8001 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
8002 fsig->generic_param_count &&
8003 !(cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))) {
8004 MonoInst *this_temp, *this_arg_temp, *store;
8005 MonoInst *iargs [4];
8006 gboolean use_imt = FALSE;
8008 g_assert (fsig->is_inflated);
8010 /* Prevent inlining of methods that contain indirect calls */
8011 INLINE_FAILURE ("virtual generic call");
8013 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
8014 GSHAREDVT_FAILURE (*ip);
8016 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
8017 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt)
8022 g_assert (!imt_arg);
8024 g_assert (cmethod->is_inflated);
8025 imt_arg = emit_get_rgctx_method (cfg, context_used,
8026 cmethod, MONO_RGCTX_INFO_METHOD);
8027 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
8029 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
8030 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
8031 MONO_ADD_INS (bblock, store);
8033 /* FIXME: This should be a managed pointer */
8034 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8036 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
8037 iargs [1] = emit_get_rgctx_method (cfg, context_used,
8038 cmethod, MONO_RGCTX_INFO_METHOD);
8039 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
8040 addr = mono_emit_jit_icall (cfg,
8041 mono_helper_compile_generic_method, iargs);
8043 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
8045 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8052 * Implement a workaround for the inherent races involved in locking:
8058 * If a thread abort happens between the call to Monitor.Enter () and the start of the
8059 * try block, the Exit () won't be executed, see:
8060 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
8061 * To work around this, we extend such try blocks to include the last x bytes
8062 * of the Monitor.Enter () call.
8064 if (cmethod && cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
8065 MonoBasicBlock *tbb;
8067 GET_BBLOCK (cfg, tbb, ip + 5);
8069 * Only extend try blocks with a finally, to avoid catching exceptions thrown
8070 * from Monitor.Enter like ArgumentNullException.
8072 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
8073 /* Mark this bblock as needing to be extended */
8074 tbb->extend_try_block = TRUE;
8078 /* Conversion to a JIT intrinsic */
8079 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
8081 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8082 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8089 if (cmethod && (cfg->opt & MONO_OPT_INLINE) &&
8090 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
8091 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
8092 !g_list_find (dont_inline, cmethod)) {
8094 gboolean always = FALSE;
8096 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
8097 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
8098 /* Prevent inlining of methods that call wrappers */
8099 INLINE_FAILURE ("wrapper call");
8100 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
8104 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, always);
8106 cfg->real_offset += 5;
8109 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8110 /* *sp is already set by inline_method */
8115 inline_costs += costs;
8121 /* Tail recursion elimination */
8122 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
8123 gboolean has_vtargs = FALSE;
8126 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8127 INLINE_FAILURE ("tail call");
8129 /* keep it simple */
8130 for (i = fsig->param_count - 1; i >= 0; i--) {
8131 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
8136 for (i = 0; i < n; ++i)
8137 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8138 MONO_INST_NEW (cfg, ins, OP_BR);
8139 MONO_ADD_INS (bblock, ins);
8140 tblock = start_bblock->out_bb [0];
8141 link_bblock (cfg, bblock, tblock);
8142 ins->inst_target_bb = tblock;
8143 start_new_bblock = 1;
8145 /* skip the CEE_RET, too */
8146 if (ip_in_bb (cfg, bblock, ip + 5))
8153 inline_costs += 10 * num_calls++;
8156 * Making generic calls out of gsharedvt methods.
8158 if (cmethod && cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig)) {
8159 MonoRgctxInfoType info_type;
8162 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
8163 //GSHAREDVT_FAILURE (*ip);
8164 // disable for possible remoting calls
8165 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
8166 GSHAREDVT_FAILURE (*ip);
8167 if (fsig->generic_param_count) {
8168 /* virtual generic call */
8169 g_assert (mono_use_imt);
8170 g_assert (!imt_arg);
8171 /* Same as the virtual generic case above */
8172 imt_arg = emit_get_rgctx_method (cfg, context_used,
8173 cmethod, MONO_RGCTX_INFO_METHOD);
8174 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
8179 if (cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)
8180 /* test_0_multi_dim_arrays () in gshared.cs */
8181 GSHAREDVT_FAILURE (*ip);
8183 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
8184 keep_this_alive = sp [0];
8186 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
8187 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
8189 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
8190 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
8192 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8194 } else if (calli && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
8196 * We pass the address to the gsharedvt trampoline in the rgctx reg
8198 MonoInst *callee = addr;
8200 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8202 GSHAREDVT_FAILURE (*ip);
8204 addr = emit_get_rgctx_sig (cfg, context_used,
8205 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8206 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8210 /* Generic sharing */
8211 /* FIXME: only do this for generic methods if
8212 they are not shared! */
8213 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
8214 (!mono_method_is_generic_sharable (cmethod, TRUE) ||
8215 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
8216 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
8217 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
8218 INLINE_FAILURE ("gshared");
8220 g_assert (cfg->generic_sharing_context && cmethod);
8224 * We are compiling a call to a
8225 * generic method from shared code,
8226 * which means that we have to look up
8227 * the method in the rgctx and do an
8231 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8233 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8234 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8238 /* Indirect calls */
8240 if (call_opcode == CEE_CALL)
8241 g_assert (context_used);
8242 else if (call_opcode == CEE_CALLI)
8243 g_assert (!vtable_arg);
8245 /* FIXME: what the hell is this??? */
8246 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
8247 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
8249 /* Prevent inlining of methods with indirect calls */
8250 INLINE_FAILURE ("indirect call");
8252 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8257 * Instead of emitting an indirect call, emit a direct call
8258 * with the contents of the aotconst as the patch info.
8260 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8261 info_type = addr->inst_c1;
8262 info_data = addr->inst_p0;
8264 info_type = addr->inst_right->inst_c1;
8265 info_data = addr->inst_right->inst_left;
8268 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8269 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8274 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8282 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
8283 MonoInst *val = sp [fsig->param_count];
8285 if (val->type == STACK_OBJ) {
8286 MonoInst *iargs [2];
8291 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
8294 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
8295 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
8296 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
8297 emit_write_barrier (cfg, addr, val);
8298 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
8299 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
8301 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
8302 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
8303 if (!cmethod->klass->element_class->valuetype && !readonly)
8304 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
8305 CHECK_TYPELOAD (cmethod->klass);
8308 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
8311 g_assert_not_reached ();
8318 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
8322 /* Tail prefix / tail call optimization */
8324 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
8325 /* FIXME: runtime generic context pointer for jumps? */
8326 /* FIXME: handle this for generic sharing eventually */
8327 if (cmethod && (ins_flag & MONO_INST_TAILCALL) &&
8328 !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
8329 supported_tail_call = TRUE;
8331 if (supported_tail_call) {
8334 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8335 INLINE_FAILURE ("tail call");
8337 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
8339 if (ARCH_HAVE_OP_TAIL_CALL) {
8340 /* Handle tail calls similarly to normal calls */
8343 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8344 call->tail_call = TRUE;
8345 call->method = cmethod;
8346 call->signature = mono_method_signature (cmethod);
8349 * We implement tail calls by storing the actual arguments into the
8350 * argument variables, then emitting a CEE_JMP.
8352 for (i = 0; i < n; ++i) {
8353 /* Prevent argument from being register allocated */
8354 arg_array [i]->flags |= MONO_INST_VOLATILE;
8355 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8357 ins = (MonoInst*)call;
8358 ins->inst_p0 = cmethod;
8359 ins->inst_p1 = arg_array [0];
8360 MONO_ADD_INS (bblock, ins);
8361 link_bblock (cfg, bblock, end_bblock);
8362 start_new_bblock = 1;
8364 // FIXME: Eliminate unreachable epilogs
8367 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
8368 * only reachable from this call.
8370 GET_BBLOCK (cfg, tblock, ip + 5);
8371 if (tblock == bblock || tblock->in_count == 0)
8380 * Synchronized wrappers.
8381 * Its hard to determine where to replace a method with its synchronized
8382 * wrapper without causing an infinite recursion. The current solution is
8383 * to add the synchronized wrapper in the trampolines, and to
8384 * change the called method to a dummy wrapper, and resolve that wrapper
8385 * to the real method in mono_jit_compile_method ().
8387 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8388 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
8389 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
8390 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
8394 INLINE_FAILURE ("call");
8395 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual ? sp [0] : NULL,
8396 imt_arg, vtable_arg);
8399 link_bblock (cfg, bblock, end_bblock);
8400 start_new_bblock = 1;
8402 // FIXME: Eliminate unreachable epilogs
8405 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
8406 * only reachable from this call.
8408 GET_BBLOCK (cfg, tblock, ip + 5);
8409 if (tblock == bblock || tblock->in_count == 0)
8416 /* End of call, INS should contain the result of the call, if any */
8418 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
8421 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8426 if (keep_this_alive) {
8427 MonoInst *dummy_use;
8429 /* See mono_emit_method_call_full () */
8430 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
8433 CHECK_CFG_EXCEPTION;
8437 g_assert (*ip == CEE_RET);
8441 constrained_call = NULL;
8443 emit_seq_point (cfg, method, ip, FALSE, TRUE);
8447 if (cfg->method != method) {
8448 /* return from inlined method */
8450 * If in_count == 0, that means the ret is unreachable due to
8451 * being preceeded by a throw. In that case, inline_method () will
8452 * handle setting the return value
8453 * (test case: test_0_inline_throw ()).
8455 if (return_var && cfg->cbb->in_count) {
8456 MonoType *ret_type = mono_method_signature (method)->ret;
8462 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
8465 //g_assert (returnvar != -1);
8466 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
8467 cfg->ret_var_set = TRUE;
8470 if (cfg->lmf_var && cfg->cbb->in_count)
8474 MonoType *ret_type = mini_replace_type (mono_method_signature (method)->ret);
8476 if (seq_points && !sym_seq_points) {
8478 * Place a seq point here too even through the IL stack is not
8479 * empty, so a step over on
8482 * will work correctly.
8484 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
8485 MONO_ADD_INS (cfg->cbb, ins);
8488 g_assert (!return_var);
8492 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
8495 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
8498 if (!cfg->vret_addr) {
8501 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
8503 EMIT_NEW_RETLOADA (cfg, ret_addr);
8505 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
8506 ins->klass = mono_class_from_mono_type (ret_type);
8509 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
8510 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
8511 MonoInst *iargs [1];
8515 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
8516 mono_arch_emit_setret (cfg, method, conv);
8518 mono_arch_emit_setret (cfg, method, *sp);
8521 mono_arch_emit_setret (cfg, method, *sp);
8526 if (sp != stack_start)
8528 MONO_INST_NEW (cfg, ins, OP_BR);
8530 ins->inst_target_bb = end_bblock;
8531 MONO_ADD_INS (bblock, ins);
8532 link_bblock (cfg, bblock, end_bblock);
8533 start_new_bblock = 1;
8537 MONO_INST_NEW (cfg, ins, OP_BR);
8539 target = ip + 1 + (signed char)(*ip);
8541 GET_BBLOCK (cfg, tblock, target);
8542 link_bblock (cfg, bblock, tblock);
8543 ins->inst_target_bb = tblock;
8544 if (sp != stack_start) {
8545 handle_stack_args (cfg, stack_start, sp - stack_start);
8547 CHECK_UNVERIFIABLE (cfg);
8549 MONO_ADD_INS (bblock, ins);
8550 start_new_bblock = 1;
8551 inline_costs += BRANCH_COST;
8565 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
8567 target = ip + 1 + *(signed char*)ip;
8573 inline_costs += BRANCH_COST;
8577 MONO_INST_NEW (cfg, ins, OP_BR);
8580 target = ip + 4 + (gint32)read32(ip);
8582 GET_BBLOCK (cfg, tblock, target);
8583 link_bblock (cfg, bblock, tblock);
8584 ins->inst_target_bb = tblock;
8585 if (sp != stack_start) {
8586 handle_stack_args (cfg, stack_start, sp - stack_start);
8588 CHECK_UNVERIFIABLE (cfg);
8591 MONO_ADD_INS (bblock, ins);
8593 start_new_bblock = 1;
8594 inline_costs += BRANCH_COST;
8601 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
8602 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
8603 guint32 opsize = is_short ? 1 : 4;
8605 CHECK_OPSIZE (opsize);
8607 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
8610 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
8615 GET_BBLOCK (cfg, tblock, target);
8616 link_bblock (cfg, bblock, tblock);
8617 GET_BBLOCK (cfg, tblock, ip);
8618 link_bblock (cfg, bblock, tblock);
8620 if (sp != stack_start) {
8621 handle_stack_args (cfg, stack_start, sp - stack_start);
8622 CHECK_UNVERIFIABLE (cfg);
8625 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
8626 cmp->sreg1 = sp [0]->dreg;
8627 type_from_op (cmp, sp [0], NULL);
8630 #if SIZEOF_REGISTER == 4
8631 if (cmp->opcode == OP_LCOMPARE_IMM) {
8632 /* Convert it to OP_LCOMPARE */
8633 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8634 ins->type = STACK_I8;
8635 ins->dreg = alloc_dreg (cfg, STACK_I8);
8637 MONO_ADD_INS (bblock, ins);
8638 cmp->opcode = OP_LCOMPARE;
8639 cmp->sreg2 = ins->dreg;
8642 MONO_ADD_INS (bblock, cmp);
8644 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
8645 type_from_op (ins, sp [0], NULL);
8646 MONO_ADD_INS (bblock, ins);
8647 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
8648 GET_BBLOCK (cfg, tblock, target);
8649 ins->inst_true_bb = tblock;
8650 GET_BBLOCK (cfg, tblock, ip);
8651 ins->inst_false_bb = tblock;
8652 start_new_bblock = 2;
8655 inline_costs += BRANCH_COST;
8670 MONO_INST_NEW (cfg, ins, *ip);
8672 target = ip + 4 + (gint32)read32(ip);
8678 inline_costs += BRANCH_COST;
8682 MonoBasicBlock **targets;
8683 MonoBasicBlock *default_bblock;
8684 MonoJumpInfoBBTable *table;
8685 int offset_reg = alloc_preg (cfg);
8686 int target_reg = alloc_preg (cfg);
8687 int table_reg = alloc_preg (cfg);
8688 int sum_reg = alloc_preg (cfg);
8689 gboolean use_op_switch;
8693 n = read32 (ip + 1);
8696 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
8700 CHECK_OPSIZE (n * sizeof (guint32));
8701 target = ip + n * sizeof (guint32);
8703 GET_BBLOCK (cfg, default_bblock, target);
8704 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
8706 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
8707 for (i = 0; i < n; ++i) {
8708 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
8709 targets [i] = tblock;
8710 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
8714 if (sp != stack_start) {
8716 * Link the current bb with the targets as well, so handle_stack_args
8717 * will set their in_stack correctly.
8719 link_bblock (cfg, bblock, default_bblock);
8720 for (i = 0; i < n; ++i)
8721 link_bblock (cfg, bblock, targets [i]);
8723 handle_stack_args (cfg, stack_start, sp - stack_start);
8725 CHECK_UNVERIFIABLE (cfg);
8728 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
8729 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
8732 for (i = 0; i < n; ++i)
8733 link_bblock (cfg, bblock, targets [i]);
8735 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
8736 table->table = targets;
8737 table->table_size = n;
8739 use_op_switch = FALSE;
8741 /* ARM implements SWITCH statements differently */
8742 /* FIXME: Make it use the generic implementation */
8743 if (!cfg->compile_aot)
8744 use_op_switch = TRUE;
8747 if (COMPILE_LLVM (cfg))
8748 use_op_switch = TRUE;
8750 cfg->cbb->has_jump_table = 1;
8752 if (use_op_switch) {
8753 MONO_INST_NEW (cfg, ins, OP_SWITCH);
8754 ins->sreg1 = src1->dreg;
8755 ins->inst_p0 = table;
8756 ins->inst_many_bb = targets;
8757 ins->klass = GUINT_TO_POINTER (n);
8758 MONO_ADD_INS (cfg->cbb, ins);
8760 if (sizeof (gpointer) == 8)
8761 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
8763 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
8765 #if SIZEOF_REGISTER == 8
8766 /* The upper word might not be zero, and we add it to a 64 bit address later */
8767 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
8770 if (cfg->compile_aot) {
8771 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
8773 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
8774 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
8775 ins->inst_p0 = table;
8776 ins->dreg = table_reg;
8777 MONO_ADD_INS (cfg->cbb, ins);
8780 /* FIXME: Use load_memindex */
8781 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
8782 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
8783 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
8785 start_new_bblock = 1;
8786 inline_costs += (BRANCH_COST * 2);
8806 dreg = alloc_freg (cfg);
8809 dreg = alloc_lreg (cfg);
8812 dreg = alloc_ireg_ref (cfg);
8815 dreg = alloc_preg (cfg);
8818 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
8819 ins->type = ldind_type [*ip - CEE_LDIND_I1];
8820 ins->flags |= ins_flag;
8822 MONO_ADD_INS (bblock, ins);
8824 if (ins->flags & MONO_INST_VOLATILE) {
8825 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
8826 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
8827 emit_memory_barrier (cfg, FullBarrier);
8842 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
8843 ins->flags |= ins_flag;
8846 if (ins->flags & MONO_INST_VOLATILE) {
8847 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
8848 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
8849 emit_memory_barrier (cfg, FullBarrier);
8852 MONO_ADD_INS (bblock, ins);
8854 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
8855 emit_write_barrier (cfg, sp [0], sp [1]);
8864 MONO_INST_NEW (cfg, ins, (*ip));
8866 ins->sreg1 = sp [0]->dreg;
8867 ins->sreg2 = sp [1]->dreg;
8868 type_from_op (ins, sp [0], sp [1]);
8870 ins->dreg = alloc_dreg ((cfg), (ins)->type);
8872 /* Use the immediate opcodes if possible */
8873 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
8874 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
8875 if (imm_opcode != -1) {
8876 ins->opcode = imm_opcode;
8877 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
8880 sp [1]->opcode = OP_NOP;
8884 MONO_ADD_INS ((cfg)->cbb, (ins));
8886 *sp++ = mono_decompose_opcode (cfg, ins);
8903 MONO_INST_NEW (cfg, ins, (*ip));
8905 ins->sreg1 = sp [0]->dreg;
8906 ins->sreg2 = sp [1]->dreg;
8907 type_from_op (ins, sp [0], sp [1]);
8909 ADD_WIDEN_OP (ins, sp [0], sp [1]);
8910 ins->dreg = alloc_dreg ((cfg), (ins)->type);
8912 /* FIXME: Pass opcode to is_inst_imm */
8914 /* Use the immediate opcodes if possible */
8915 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
8918 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
8919 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
8920 /* Keep emulated opcodes which are optimized away later */
8921 if ((ins->opcode == OP_IREM_UN || ins->opcode == OP_IDIV_UN_IMM) && (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) && sp [1]->opcode == OP_ICONST && mono_is_power_of_two (sp [1]->inst_c0) >= 0) {
8922 imm_opcode = mono_op_to_op_imm (ins->opcode);
8925 if (imm_opcode != -1) {
8926 ins->opcode = imm_opcode;
8927 if (sp [1]->opcode == OP_I8CONST) {
8928 #if SIZEOF_REGISTER == 8
8929 ins->inst_imm = sp [1]->inst_l;
8931 ins->inst_ls_word = sp [1]->inst_ls_word;
8932 ins->inst_ms_word = sp [1]->inst_ms_word;
8936 ins->inst_imm = (gssize)(sp [1]->inst_c0);
8939 /* Might be followed by an instruction added by ADD_WIDEN_OP */
8940 if (sp [1]->next == NULL)
8941 sp [1]->opcode = OP_NOP;
8944 MONO_ADD_INS ((cfg)->cbb, (ins));
8946 *sp++ = mono_decompose_opcode (cfg, ins);
8959 case CEE_CONV_OVF_I8:
8960 case CEE_CONV_OVF_U8:
8964 /* Special case this earlier so we have long constants in the IR */
8965 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
8966 int data = sp [-1]->inst_c0;
8967 sp [-1]->opcode = OP_I8CONST;
8968 sp [-1]->type = STACK_I8;
8969 #if SIZEOF_REGISTER == 8
8970 if ((*ip) == CEE_CONV_U8)
8971 sp [-1]->inst_c0 = (guint32)data;
8973 sp [-1]->inst_c0 = data;
8975 sp [-1]->inst_ls_word = data;
8976 if ((*ip) == CEE_CONV_U8)
8977 sp [-1]->inst_ms_word = 0;
8979 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
8981 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
8988 case CEE_CONV_OVF_I4:
8989 case CEE_CONV_OVF_I1:
8990 case CEE_CONV_OVF_I2:
8991 case CEE_CONV_OVF_I:
8992 case CEE_CONV_OVF_U:
8995 if (sp [-1]->type == STACK_R8) {
8996 ADD_UNOP (CEE_CONV_OVF_I8);
9003 case CEE_CONV_OVF_U1:
9004 case CEE_CONV_OVF_U2:
9005 case CEE_CONV_OVF_U4:
9008 if (sp [-1]->type == STACK_R8) {
9009 ADD_UNOP (CEE_CONV_OVF_U8);
9016 case CEE_CONV_OVF_I1_UN:
9017 case CEE_CONV_OVF_I2_UN:
9018 case CEE_CONV_OVF_I4_UN:
9019 case CEE_CONV_OVF_I8_UN:
9020 case CEE_CONV_OVF_U1_UN:
9021 case CEE_CONV_OVF_U2_UN:
9022 case CEE_CONV_OVF_U4_UN:
9023 case CEE_CONV_OVF_U8_UN:
9024 case CEE_CONV_OVF_I_UN:
9025 case CEE_CONV_OVF_U_UN:
9032 CHECK_CFG_EXCEPTION;
9036 case CEE_ADD_OVF_UN:
9038 case CEE_MUL_OVF_UN:
9040 case CEE_SUB_OVF_UN:
9046 GSHAREDVT_FAILURE (*ip);
9049 token = read32 (ip + 1);
9050 klass = mini_get_class (method, token, generic_context);
9051 CHECK_TYPELOAD (klass);
9053 if (generic_class_is_reference_type (cfg, klass)) {
9054 MonoInst *store, *load;
9055 int dreg = alloc_ireg_ref (cfg);
9057 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
9058 load->flags |= ins_flag;
9059 MONO_ADD_INS (cfg->cbb, load);
9061 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
9062 store->flags |= ins_flag;
9063 MONO_ADD_INS (cfg->cbb, store);
9065 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
9066 emit_write_barrier (cfg, sp [0], sp [1]);
9068 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
9080 token = read32 (ip + 1);
9081 klass = mini_get_class (method, token, generic_context);
9082 CHECK_TYPELOAD (klass);
9084 /* Optimize the common ldobj+stloc combination */
9094 loc_index = ip [5] - CEE_STLOC_0;
9101 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
9102 CHECK_LOCAL (loc_index);
9104 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9105 ins->dreg = cfg->locals [loc_index]->dreg;
9111 /* Optimize the ldobj+stobj combination */
9112 /* The reference case ends up being a load+store anyway */
9113 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
9118 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
9125 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9134 CHECK_STACK_OVF (1);
9136 n = read32 (ip + 1);
9138 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
9139 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
9140 ins->type = STACK_OBJ;
9143 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
9144 MonoInst *iargs [1];
9146 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
9147 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
9149 if (cfg->opt & MONO_OPT_SHARED) {
9150 MonoInst *iargs [3];
9152 if (cfg->compile_aot) {
9153 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
9155 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9156 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
9157 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
9158 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
9159 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
9161 if (bblock->out_of_line) {
9162 MonoInst *iargs [2];
9164 if (image == mono_defaults.corlib) {
9166 * Avoid relocations in AOT and save some space by using a
9167 * version of helper_ldstr specialized to mscorlib.
9169 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
9170 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
9172 /* Avoid creating the string object */
9173 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9174 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
9175 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
9179 if (cfg->compile_aot) {
9180 NEW_LDSTRCONST (cfg, ins, image, n);
9182 MONO_ADD_INS (bblock, ins);
9185 NEW_PCONST (cfg, ins, NULL);
9186 ins->type = STACK_OBJ;
9187 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
9189 OUT_OF_MEMORY_FAILURE;
9192 MONO_ADD_INS (bblock, ins);
9201 MonoInst *iargs [2];
9202 MonoMethodSignature *fsig;
9205 MonoInst *vtable_arg = NULL;
9208 token = read32 (ip + 1);
9209 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
9210 if (!cmethod || mono_loader_get_last_error ())
9212 fsig = mono_method_get_signature (cmethod, image, token);
9216 mono_save_token_info (cfg, image, token, cmethod);
9218 if (!mono_class_init (cmethod->klass))
9219 TYPE_LOAD_ERROR (cmethod->klass);
9221 context_used = mini_method_check_context_used (cfg, cmethod);
9223 if (mono_security_cas_enabled ()) {
9224 if (check_linkdemand (cfg, method, cmethod))
9225 INLINE_FAILURE ("linkdemand");
9226 CHECK_CFG_EXCEPTION;
9227 } else if (mono_security_core_clr_enabled ()) {
9228 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9231 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9232 emit_generic_class_init (cfg, cmethod->klass);
9233 CHECK_TYPELOAD (cmethod->klass);
9237 if (cfg->gsharedvt) {
9238 if (mini_is_gsharedvt_variable_signature (sig))
9239 GSHAREDVT_FAILURE (*ip);
9243 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
9244 mono_method_is_generic_sharable (cmethod, TRUE)) {
9245 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
9246 mono_class_vtable (cfg->domain, cmethod->klass);
9247 CHECK_TYPELOAD (cmethod->klass);
9249 vtable_arg = emit_get_rgctx_method (cfg, context_used,
9250 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9253 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
9254 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9256 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9258 CHECK_TYPELOAD (cmethod->klass);
9259 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9264 n = fsig->param_count;
9268 * Generate smaller code for the common newobj <exception> instruction in
9269 * argument checking code.
9271 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
9272 is_exception_class (cmethod->klass) && n <= 2 &&
9273 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
9274 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
9275 MonoInst *iargs [3];
9277 g_assert (!vtable_arg);
9281 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
9284 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
9288 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
9293 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
9296 g_assert_not_reached ();
9304 /* move the args to allow room for 'this' in the first position */
9310 /* check_call_signature () requires sp[0] to be set */
9311 this_ins.type = STACK_OBJ;
9313 if (check_call_signature (cfg, fsig, sp))
9318 if (mini_class_is_system_array (cmethod->klass)) {
9319 g_assert (!vtable_arg);
9321 *sp = emit_get_rgctx_method (cfg, context_used,
9322 cmethod, MONO_RGCTX_INFO_METHOD);
9324 /* Avoid varargs in the common case */
9325 if (fsig->param_count == 1)
9326 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
9327 else if (fsig->param_count == 2)
9328 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
9329 else if (fsig->param_count == 3)
9330 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
9331 else if (fsig->param_count == 4)
9332 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
9334 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
9335 } else if (cmethod->string_ctor) {
9336 g_assert (!context_used);
9337 g_assert (!vtable_arg);
9338 /* we simply pass a null pointer */
9339 EMIT_NEW_PCONST (cfg, *sp, NULL);
9340 /* now call the string ctor */
9341 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
9343 MonoInst* callvirt_this_arg = NULL;
9345 if (cmethod->klass->valuetype) {
9346 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
9347 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
9348 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
9353 * The code generated by mini_emit_virtual_call () expects
9354 * iargs [0] to be a boxed instance, but luckily the vcall
9355 * will be transformed into a normal call there.
9357 } else if (context_used) {
9358 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
9361 MonoVTable *vtable = NULL;
9363 if (!cfg->compile_aot)
9364 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9365 CHECK_TYPELOAD (cmethod->klass);
9368 * TypeInitializationExceptions thrown from the mono_runtime_class_init
9369 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
9370 * As a workaround, we call class cctors before allocating objects.
9372 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
9373 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, cmethod->klass, helper_sig_class_init_trampoline, NULL);
9374 if (cfg->verbose_level > 2)
9375 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
9376 class_inits = g_slist_prepend (class_inits, cmethod->klass);
9379 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
9382 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
9385 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
9387 /* Now call the actual ctor */
9388 /* Avoid virtual calls to ctors if possible */
9389 if (mono_class_is_marshalbyref (cmethod->klass))
9390 callvirt_this_arg = sp [0];
9393 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
9394 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9395 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9400 CHECK_CFG_EXCEPTION;
9401 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
9402 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
9403 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
9404 !g_list_find (dont_inline, cmethod)) {
9407 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
9408 cfg->real_offset += 5;
9411 inline_costs += costs - 5;
9413 INLINE_FAILURE ("inline failure");
9414 // FIXME-VT: Clean this up
9415 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
9416 GSHAREDVT_FAILURE(*ip);
9417 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
9419 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
9422 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
9423 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
9424 } else if (context_used &&
9425 (!mono_method_is_generic_sharable (cmethod, TRUE) ||
9426 !mono_class_generic_sharing_enabled (cmethod->klass))) {
9427 MonoInst *cmethod_addr;
9429 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
9430 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9432 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
9434 INLINE_FAILURE ("ctor call");
9435 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
9436 callvirt_this_arg, NULL, vtable_arg);
9440 if (alloc == NULL) {
9442 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
9443 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
9457 token = read32 (ip + 1);
9458 klass = mini_get_class (method, token, generic_context);
9459 CHECK_TYPELOAD (klass);
9460 if (sp [0]->type != STACK_OBJ)
9463 context_used = mini_class_check_context_used (cfg, klass);
9465 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9466 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
9473 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9476 if (cfg->compile_aot)
9477 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9479 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9481 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
9483 save_cast_details (cfg, klass, sp [0]->dreg, TRUE, &bblock);
9484 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
9485 reset_cast_details (cfg);
9488 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9489 MonoMethod *mono_castclass;
9490 MonoInst *iargs [1];
9493 mono_castclass = mono_marshal_get_castclass (klass);
9496 save_cast_details (cfg, klass, sp [0]->dreg, TRUE, &bblock);
9497 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
9498 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9499 reset_cast_details (cfg);
9500 CHECK_CFG_EXCEPTION;
9501 g_assert (costs > 0);
9504 cfg->real_offset += 5;
9509 inline_costs += costs;
9512 ins = handle_castclass (cfg, klass, *sp, context_used);
9513 CHECK_CFG_EXCEPTION;
9523 token = read32 (ip + 1);
9524 klass = mini_get_class (method, token, generic_context);
9525 CHECK_TYPELOAD (klass);
9526 if (sp [0]->type != STACK_OBJ)
9529 context_used = mini_class_check_context_used (cfg, klass);
9531 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9532 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
9539 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9542 if (cfg->compile_aot)
9543 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9545 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9547 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
9550 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9551 MonoMethod *mono_isinst;
9552 MonoInst *iargs [1];
9555 mono_isinst = mono_marshal_get_isinst (klass);
9558 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
9559 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9560 CHECK_CFG_EXCEPTION;
9561 g_assert (costs > 0);
9564 cfg->real_offset += 5;
9569 inline_costs += costs;
9572 ins = handle_isinst (cfg, klass, *sp, context_used);
9573 CHECK_CFG_EXCEPTION;
9580 case CEE_UNBOX_ANY: {
9584 token = read32 (ip + 1);
9585 klass = mini_get_class (method, token, generic_context);
9586 CHECK_TYPELOAD (klass);
9588 mono_save_token_info (cfg, image, token, klass);
9590 context_used = mini_class_check_context_used (cfg, klass);
9592 if (mini_is_gsharedvt_klass (cfg, klass)) {
9593 *sp = handle_unbox_gsharedvt (cfg, klass, *sp, &bblock);
9601 if (generic_class_is_reference_type (cfg, klass)) {
9602 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
9603 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9604 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
9611 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9614 /*FIXME AOT support*/
9615 if (cfg->compile_aot)
9616 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9618 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9620 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
9621 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
9624 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9625 MonoMethod *mono_castclass;
9626 MonoInst *iargs [1];
9629 mono_castclass = mono_marshal_get_castclass (klass);
9632 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
9633 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9634 CHECK_CFG_EXCEPTION;
9635 g_assert (costs > 0);
9638 cfg->real_offset += 5;
9642 inline_costs += costs;
9644 ins = handle_castclass (cfg, klass, *sp, context_used);
9645 CHECK_CFG_EXCEPTION;
9653 if (mono_class_is_nullable (klass)) {
9654 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
9661 ins = handle_unbox (cfg, klass, sp, context_used);
9667 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9680 token = read32 (ip + 1);
9681 klass = mini_get_class (method, token, generic_context);
9682 CHECK_TYPELOAD (klass);
9684 mono_save_token_info (cfg, image, token, klass);
9686 context_used = mini_class_check_context_used (cfg, klass);
9688 if (generic_class_is_reference_type (cfg, klass)) {
9694 if (klass == mono_defaults.void_class)
9696 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
9698 /* frequent check in generic code: box (struct), brtrue */
9700 // FIXME: LLVM can't handle the inconsistent bb linking
9701 if (!mono_class_is_nullable (klass) &&
9702 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
9703 (ip [5] == CEE_BRTRUE ||
9704 ip [5] == CEE_BRTRUE_S ||
9705 ip [5] == CEE_BRFALSE ||
9706 ip [5] == CEE_BRFALSE_S)) {
9707 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
9709 MonoBasicBlock *true_bb, *false_bb;
9713 if (cfg->verbose_level > 3) {
9714 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9715 printf ("<box+brtrue opt>\n");
9723 target = ip + 1 + (signed char)(*ip);
9730 target = ip + 4 + (gint)(read32 (ip));
9734 g_assert_not_reached ();
9738 * We need to link both bblocks, since it is needed for handling stack
9739 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
9740 * Branching to only one of them would lead to inconsistencies, so
9741 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
9743 GET_BBLOCK (cfg, true_bb, target);
9744 GET_BBLOCK (cfg, false_bb, ip);
9746 mono_link_bblock (cfg, cfg->cbb, true_bb);
9747 mono_link_bblock (cfg, cfg->cbb, false_bb);
9749 if (sp != stack_start) {
9750 handle_stack_args (cfg, stack_start, sp - stack_start);
9752 CHECK_UNVERIFIABLE (cfg);
9755 if (COMPILE_LLVM (cfg)) {
9756 dreg = alloc_ireg (cfg);
9757 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
9758 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
9760 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
9762 /* The JIT can't eliminate the iconst+compare */
9763 MONO_INST_NEW (cfg, ins, OP_BR);
9764 ins->inst_target_bb = is_true ? true_bb : false_bb;
9765 MONO_ADD_INS (cfg->cbb, ins);
9768 start_new_bblock = 1;
9772 *sp++ = handle_box (cfg, val, klass, context_used, &bblock);
9774 CHECK_CFG_EXCEPTION;
9783 token = read32 (ip + 1);
9784 klass = mini_get_class (method, token, generic_context);
9785 CHECK_TYPELOAD (klass);
9787 mono_save_token_info (cfg, image, token, klass);
9789 context_used = mini_class_check_context_used (cfg, klass);
9791 if (mono_class_is_nullable (klass)) {
9794 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
9795 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
9799 ins = handle_unbox (cfg, klass, sp, context_used);
9812 MonoClassField *field;
9813 #ifndef DISABLE_REMOTING
9817 gboolean is_instance;
9819 gpointer addr = NULL;
9820 gboolean is_special_static;
9822 MonoInst *store_val = NULL;
9823 MonoInst *thread_ins;
9826 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
9828 if (op == CEE_STFLD) {
9836 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
9838 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
9841 if (op == CEE_STSFLD) {
9849 token = read32 (ip + 1);
9850 if (method->wrapper_type != MONO_WRAPPER_NONE) {
9851 field = mono_method_get_wrapper_data (method, token);
9852 klass = field->parent;
9855 field = mono_field_from_token (image, token, &klass, generic_context);
9859 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
9860 FIELD_ACCESS_FAILURE;
9861 mono_class_init (klass);
9863 if (is_instance && *ip != CEE_LDFLDA && is_magic_tls_access (field))
9866 /* if the class is Critical then transparent code cannot access it's fields */
9867 if (!is_instance && mono_security_core_clr_enabled ())
9868 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
9870 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
9871 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
9872 if (mono_security_core_clr_enabled ())
9873 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
9877 * LDFLD etc. is usable on static fields as well, so convert those cases to
9880 if (is_instance && field->type->attrs & FIELD_ATTRIBUTE_STATIC) {
9892 g_assert_not_reached ();
9894 is_instance = FALSE;
9897 context_used = mini_class_check_context_used (cfg, klass);
9901 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
9902 if (op == CEE_STFLD) {
9903 if (target_type_is_incompatible (cfg, field->type, sp [1]))
9905 #ifndef DISABLE_REMOTING
9906 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
9907 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
9908 MonoInst *iargs [5];
9910 GSHAREDVT_FAILURE (op);
9913 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9914 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
9915 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
9919 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
9920 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
9921 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9922 CHECK_CFG_EXCEPTION;
9923 g_assert (costs > 0);
9925 cfg->real_offset += 5;
9928 inline_costs += costs;
9930 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
9937 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
9939 if (mini_is_gsharedvt_klass (cfg, klass)) {
9940 MonoInst *offset_ins;
9942 context_used = mini_class_check_context_used (cfg, klass);
9944 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9945 dreg = alloc_ireg_mp (cfg);
9946 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
9947 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
9948 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
9950 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
9952 if (sp [0]->opcode != OP_LDADDR)
9953 store->flags |= MONO_INST_FAULT;
9955 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
9956 /* insert call to write barrier */
9960 dreg = alloc_ireg_mp (cfg);
9961 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
9962 emit_write_barrier (cfg, ptr, sp [1]);
9965 store->flags |= ins_flag;
9972 #ifndef DISABLE_REMOTING
9973 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
9974 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
9975 MonoInst *iargs [4];
9977 GSHAREDVT_FAILURE (op);
9980 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9981 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
9982 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
9983 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
9984 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
9985 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9986 CHECK_CFG_EXCEPTION;
9988 g_assert (costs > 0);
9990 cfg->real_offset += 5;
9994 inline_costs += costs;
9996 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
10002 if (sp [0]->type == STACK_VTYPE) {
10005 /* Have to compute the address of the variable */
10007 var = get_vreg_to_inst (cfg, sp [0]->dreg);
10009 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
10011 g_assert (var->klass == klass);
10013 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
10017 if (op == CEE_LDFLDA) {
10018 if (is_magic_tls_access (field)) {
10019 GSHAREDVT_FAILURE (*ip);
10021 *sp++ = create_magic_tls_access (cfg, field, &cached_tls_addr, ins);
10023 if (sp [0]->type == STACK_OBJ) {
10024 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
10025 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
10028 dreg = alloc_ireg_mp (cfg);
10030 if (mini_is_gsharedvt_klass (cfg, klass)) {
10031 MonoInst *offset_ins;
10033 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10034 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10036 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10038 ins->klass = mono_class_from_mono_type (field->type);
10039 ins->type = STACK_MP;
10045 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10047 if (mini_is_gsharedvt_klass (cfg, klass)) {
10048 MonoInst *offset_ins;
10050 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10051 dreg = alloc_ireg_mp (cfg);
10052 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10053 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
10055 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
10057 load->flags |= ins_flag;
10058 if (sp [0]->opcode != OP_LDADDR)
10059 load->flags |= MONO_INST_FAULT;
10073 * We can only support shared generic static
10074 * field access on architectures where the
10075 * trampoline code has been extended to handle
10076 * the generic class init.
10078 #ifndef MONO_ARCH_VTABLE_REG
10079 GENERIC_SHARING_FAILURE (op);
10082 context_used = mini_class_check_context_used (cfg, klass);
10084 ftype = mono_field_get_type (field);
10086 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
10089 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
10090 * to be called here.
10092 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
10093 mono_class_vtable (cfg->domain, klass);
10094 CHECK_TYPELOAD (klass);
10096 mono_domain_lock (cfg->domain);
10097 if (cfg->domain->special_static_fields)
10098 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
10099 mono_domain_unlock (cfg->domain);
10101 is_special_static = mono_class_field_is_special_static (field);
10103 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
10104 thread_ins = mono_get_thread_intrinsic (cfg);
10108 /* Generate IR to compute the field address */
10109 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
10111 * Fast access to TLS data
10112 * Inline version of get_thread_static_data () in
10116 int idx, static_data_reg, array_reg, dreg;
10118 GSHAREDVT_FAILURE (op);
10120 // offset &= 0x7fffffff;
10121 // idx = (offset >> 24) - 1;
10122 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
10123 MONO_ADD_INS (cfg->cbb, thread_ins);
10124 static_data_reg = alloc_ireg (cfg);
10125 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
10127 if (cfg->compile_aot) {
10128 int offset_reg, offset2_reg, idx_reg;
10130 /* For TLS variables, this will return the TLS offset */
10131 EMIT_NEW_SFLDACONST (cfg, ins, field);
10132 offset_reg = ins->dreg;
10133 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
10134 idx_reg = alloc_ireg (cfg);
10135 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
10136 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
10137 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
10138 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
10139 array_reg = alloc_ireg (cfg);
10140 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
10141 offset2_reg = alloc_ireg (cfg);
10142 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
10143 dreg = alloc_ireg (cfg);
10144 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
10146 offset = (gsize)addr & 0x7fffffff;
10147 idx = (offset >> 24) - 1;
10149 array_reg = alloc_ireg (cfg);
10150 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
10151 dreg = alloc_ireg (cfg);
10152 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
10154 } else if ((cfg->opt & MONO_OPT_SHARED) ||
10155 (cfg->compile_aot && is_special_static) ||
10156 (context_used && is_special_static)) {
10157 MonoInst *iargs [2];
10159 g_assert (field->parent);
10160 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10161 if (context_used) {
10162 iargs [1] = emit_get_rgctx_field (cfg, context_used,
10163 field, MONO_RGCTX_INFO_CLASS_FIELD);
10165 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10167 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10168 } else if (context_used) {
10169 MonoInst *static_data;
10172 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
10173 method->klass->name_space, method->klass->name, method->name,
10174 depth, field->offset);
10177 if (mono_class_needs_cctor_run (klass, method))
10178 emit_generic_class_init (cfg, klass);
10181 * The pointer we're computing here is
10183 * super_info.static_data + field->offset
10185 static_data = emit_get_rgctx_klass (cfg, context_used,
10186 klass, MONO_RGCTX_INFO_STATIC_DATA);
10188 if (mini_is_gsharedvt_klass (cfg, klass)) {
10189 MonoInst *offset_ins;
10191 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10192 dreg = alloc_ireg_mp (cfg);
10193 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
10194 } else if (field->offset == 0) {
10197 int addr_reg = mono_alloc_preg (cfg);
10198 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
10200 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
10201 MonoInst *iargs [2];
10203 g_assert (field->parent);
10204 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10205 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10206 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10208 MonoVTable *vtable = NULL;
10210 if (!cfg->compile_aot)
10211 vtable = mono_class_vtable (cfg->domain, klass);
10212 CHECK_TYPELOAD (klass);
10215 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
10216 if (!(g_slist_find (class_inits, klass))) {
10217 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, klass, helper_sig_class_init_trampoline, NULL);
10218 if (cfg->verbose_level > 2)
10219 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
10220 class_inits = g_slist_prepend (class_inits, klass);
10223 if (cfg->run_cctors) {
10225 /* This makes so that inline cannot trigger */
10226 /* .cctors: too many apps depend on them */
10227 /* running with a specific order... */
10229 if (! vtable->initialized)
10230 INLINE_FAILURE ("class init");
10231 ex = mono_runtime_class_init_full (vtable, FALSE);
10233 set_exception_object (cfg, ex);
10234 goto exception_exit;
10238 if (cfg->compile_aot)
10239 EMIT_NEW_SFLDACONST (cfg, ins, field);
10242 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10243 EMIT_NEW_PCONST (cfg, ins, addr);
10246 MonoInst *iargs [1];
10247 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
10248 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
10252 /* Generate IR to do the actual load/store operation */
10254 if (op == CEE_LDSFLDA) {
10255 ins->klass = mono_class_from_mono_type (ftype);
10256 ins->type = STACK_PTR;
10258 } else if (op == CEE_STSFLD) {
10261 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
10262 store->flags |= ins_flag;
10264 gboolean is_const = FALSE;
10265 MonoVTable *vtable = NULL;
10266 gpointer addr = NULL;
10268 if (!context_used) {
10269 vtable = mono_class_vtable (cfg->domain, klass);
10270 CHECK_TYPELOAD (klass);
10272 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
10273 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
10274 int ro_type = ftype->type;
10276 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10277 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
10278 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
10281 GSHAREDVT_FAILURE (op);
10283 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
10286 case MONO_TYPE_BOOLEAN:
10288 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
10292 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
10295 case MONO_TYPE_CHAR:
10297 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
10301 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
10306 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
10310 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
10315 case MONO_TYPE_PTR:
10316 case MONO_TYPE_FNPTR:
10317 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10318 type_to_eval_stack_type ((cfg), field->type, *sp);
10321 case MONO_TYPE_STRING:
10322 case MONO_TYPE_OBJECT:
10323 case MONO_TYPE_CLASS:
10324 case MONO_TYPE_SZARRAY:
10325 case MONO_TYPE_ARRAY:
10326 if (!mono_gc_is_moving ()) {
10327 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10328 type_to_eval_stack_type ((cfg), field->type, *sp);
10336 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
10341 case MONO_TYPE_VALUETYPE:
10351 CHECK_STACK_OVF (1);
10353 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
10354 load->flags |= ins_flag;
10367 token = read32 (ip + 1);
10368 klass = mini_get_class (method, token, generic_context);
10369 CHECK_TYPELOAD (klass);
10370 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
10371 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
10372 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
10373 generic_class_is_reference_type (cfg, klass)) {
10374 /* insert call to write barrier */
10375 emit_write_barrier (cfg, sp [0], sp [1]);
10387 const char *data_ptr;
10389 guint32 field_token;
10395 token = read32 (ip + 1);
10397 klass = mini_get_class (method, token, generic_context);
10398 CHECK_TYPELOAD (klass);
10400 context_used = mini_class_check_context_used (cfg, klass);
10402 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
10403 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
10404 ins->sreg1 = sp [0]->dreg;
10405 ins->type = STACK_I4;
10406 ins->dreg = alloc_ireg (cfg);
10407 MONO_ADD_INS (cfg->cbb, ins);
10408 *sp = mono_decompose_opcode (cfg, ins);
10411 if (context_used) {
10412 MonoInst *args [3];
10413 MonoClass *array_class = mono_array_class_get (klass, 1);
10414 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
10416 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
10419 args [0] = emit_get_rgctx_klass (cfg, context_used,
10420 array_class, MONO_RGCTX_INFO_VTABLE);
10425 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
10427 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
10429 if (cfg->opt & MONO_OPT_SHARED) {
10430 /* Decompose now to avoid problems with references to the domainvar */
10431 MonoInst *iargs [3];
10433 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10434 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10435 iargs [2] = sp [0];
10437 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
10439 /* Decompose later since it is needed by abcrem */
10440 MonoClass *array_type = mono_array_class_get (klass, 1);
10441 mono_class_vtable (cfg->domain, array_type);
10442 CHECK_TYPELOAD (array_type);
10444 MONO_INST_NEW (cfg, ins, OP_NEWARR);
10445 ins->dreg = alloc_ireg_ref (cfg);
10446 ins->sreg1 = sp [0]->dreg;
10447 ins->inst_newa_class = klass;
10448 ins->type = STACK_OBJ;
10449 ins->klass = array_type;
10450 MONO_ADD_INS (cfg->cbb, ins);
10451 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10452 cfg->cbb->has_array_access = TRUE;
10454 /* Needed so mono_emit_load_get_addr () gets called */
10455 mono_get_got_var (cfg);
10465 * we inline/optimize the initialization sequence if possible.
10466 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
10467 * for small sizes open code the memcpy
10468 * ensure the rva field is big enough
10470 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
10471 MonoMethod *memcpy_method = get_memcpy_method ();
10472 MonoInst *iargs [3];
10473 int add_reg = alloc_ireg_mp (cfg);
10475 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
10476 if (cfg->compile_aot) {
10477 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
10479 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
10481 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
10482 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
10491 if (sp [0]->type != STACK_OBJ)
10494 MONO_INST_NEW (cfg, ins, OP_LDLEN);
10495 ins->dreg = alloc_preg (cfg);
10496 ins->sreg1 = sp [0]->dreg;
10497 ins->type = STACK_I4;
10498 /* This flag will be inherited by the decomposition */
10499 ins->flags |= MONO_INST_FAULT;
10500 MONO_ADD_INS (cfg->cbb, ins);
10501 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10502 cfg->cbb->has_array_access = TRUE;
10510 if (sp [0]->type != STACK_OBJ)
10513 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10515 klass = mini_get_class (method, read32 (ip + 1), generic_context);
10516 CHECK_TYPELOAD (klass);
10517 /* we need to make sure that this array is exactly the type it needs
10518 * to be for correctness. the wrappers are lax with their usage
10519 * so we need to ignore them here
10521 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
10522 MonoClass *array_class = mono_array_class_get (klass, 1);
10523 mini_emit_check_array_type (cfg, sp [0], array_class);
10524 CHECK_TYPELOAD (array_class);
10528 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10533 case CEE_LDELEM_I1:
10534 case CEE_LDELEM_U1:
10535 case CEE_LDELEM_I2:
10536 case CEE_LDELEM_U2:
10537 case CEE_LDELEM_I4:
10538 case CEE_LDELEM_U4:
10539 case CEE_LDELEM_I8:
10541 case CEE_LDELEM_R4:
10542 case CEE_LDELEM_R8:
10543 case CEE_LDELEM_REF: {
10549 if (*ip == CEE_LDELEM) {
10551 token = read32 (ip + 1);
10552 klass = mini_get_class (method, token, generic_context);
10553 CHECK_TYPELOAD (klass);
10554 mono_class_init (klass);
10557 klass = array_access_to_klass (*ip);
10559 if (sp [0]->type != STACK_OBJ)
10562 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10564 if (mini_is_gsharedvt_klass (cfg, klass)) {
10565 // FIXME-VT: OP_ICONST optimization
10566 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10567 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10568 ins->opcode = OP_LOADV_MEMBASE;
10569 } else if (sp [1]->opcode == OP_ICONST) {
10570 int array_reg = sp [0]->dreg;
10571 int index_reg = sp [1]->dreg;
10572 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
10574 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
10575 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
10577 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10578 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10581 if (*ip == CEE_LDELEM)
10588 case CEE_STELEM_I1:
10589 case CEE_STELEM_I2:
10590 case CEE_STELEM_I4:
10591 case CEE_STELEM_I8:
10592 case CEE_STELEM_R4:
10593 case CEE_STELEM_R8:
10594 case CEE_STELEM_REF:
10599 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10601 if (*ip == CEE_STELEM) {
10603 token = read32 (ip + 1);
10604 klass = mini_get_class (method, token, generic_context);
10605 CHECK_TYPELOAD (klass);
10606 mono_class_init (klass);
10609 klass = array_access_to_klass (*ip);
10611 if (sp [0]->type != STACK_OBJ)
10614 emit_array_store (cfg, klass, sp, TRUE);
10616 if (*ip == CEE_STELEM)
10623 case CEE_CKFINITE: {
10627 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
10628 ins->sreg1 = sp [0]->dreg;
10629 ins->dreg = alloc_freg (cfg);
10630 ins->type = STACK_R8;
10631 MONO_ADD_INS (bblock, ins);
10633 *sp++ = mono_decompose_opcode (cfg, ins);
10638 case CEE_REFANYVAL: {
10639 MonoInst *src_var, *src;
10641 int klass_reg = alloc_preg (cfg);
10642 int dreg = alloc_preg (cfg);
10644 GSHAREDVT_FAILURE (*ip);
10647 MONO_INST_NEW (cfg, ins, *ip);
10650 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
10651 CHECK_TYPELOAD (klass);
10652 mono_class_init (klass);
10654 context_used = mini_class_check_context_used (cfg, klass);
10657 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10659 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
10660 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
10661 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
10663 if (context_used) {
10664 MonoInst *klass_ins;
10666 klass_ins = emit_get_rgctx_klass (cfg, context_used,
10667 klass, MONO_RGCTX_INFO_KLASS);
10670 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
10671 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
10673 mini_emit_class_check (cfg, klass_reg, klass);
10675 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
10676 ins->type = STACK_MP;
10681 case CEE_MKREFANY: {
10682 MonoInst *loc, *addr;
10684 GSHAREDVT_FAILURE (*ip);
10687 MONO_INST_NEW (cfg, ins, *ip);
10690 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
10691 CHECK_TYPELOAD (klass);
10692 mono_class_init (klass);
10694 context_used = mini_class_check_context_used (cfg, klass);
10696 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
10697 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
10699 if (context_used) {
10700 MonoInst *const_ins;
10701 int type_reg = alloc_preg (cfg);
10703 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
10704 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
10705 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
10706 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
10707 } else if (cfg->compile_aot) {
10708 int const_reg = alloc_preg (cfg);
10709 int type_reg = alloc_preg (cfg);
10711 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
10712 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
10713 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
10714 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
10716 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
10717 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
10719 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
10721 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
10722 ins->type = STACK_VTYPE;
10723 ins->klass = mono_defaults.typed_reference_class;
10728 case CEE_LDTOKEN: {
10730 MonoClass *handle_class;
10732 CHECK_STACK_OVF (1);
10735 n = read32 (ip + 1);
10737 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
10738 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
10739 handle = mono_method_get_wrapper_data (method, n);
10740 handle_class = mono_method_get_wrapper_data (method, n + 1);
10741 if (handle_class == mono_defaults.typehandle_class)
10742 handle = &((MonoClass*)handle)->byval_arg;
10745 handle = mono_ldtoken (image, n, &handle_class, generic_context);
10749 mono_class_init (handle_class);
10750 if (cfg->generic_sharing_context) {
10751 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
10752 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
10753 /* This case handles ldtoken
10754 of an open type, like for
10757 } else if (handle_class == mono_defaults.typehandle_class) {
10758 /* If we get a MONO_TYPE_CLASS
10759 then we need to provide the
10761 instantiation of it. */
10762 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
10765 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type (handle));
10766 } else if (handle_class == mono_defaults.fieldhandle_class)
10767 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
10768 else if (handle_class == mono_defaults.methodhandle_class)
10769 context_used = mini_method_check_context_used (cfg, handle);
10771 g_assert_not_reached ();
10774 if ((cfg->opt & MONO_OPT_SHARED) &&
10775 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
10776 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
10777 MonoInst *addr, *vtvar, *iargs [3];
10778 int method_context_used;
10780 method_context_used = mini_method_check_context_used (cfg, method);
10782 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
10784 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10785 EMIT_NEW_ICONST (cfg, iargs [1], n);
10786 if (method_context_used) {
10787 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
10788 method, MONO_RGCTX_INFO_METHOD);
10789 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
10791 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
10792 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
10794 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
10796 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
10798 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
10800 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
10801 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
10802 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
10803 (cmethod->klass == mono_defaults.systemtype_class) &&
10804 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
10805 MonoClass *tclass = mono_class_from_mono_type (handle);
10807 mono_class_init (tclass);
10808 if (context_used) {
10809 ins = emit_get_rgctx_klass (cfg, context_used,
10810 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
10811 } else if (cfg->compile_aot) {
10812 if (method->wrapper_type) {
10813 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
10814 /* Special case for static synchronized wrappers */
10815 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
10817 /* FIXME: n is not a normal token */
10818 cfg->disable_aot = TRUE;
10819 EMIT_NEW_PCONST (cfg, ins, NULL);
10822 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
10825 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
10827 ins->type = STACK_OBJ;
10828 ins->klass = cmethod->klass;
10831 MonoInst *addr, *vtvar;
10833 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
10835 if (context_used) {
10836 if (handle_class == mono_defaults.typehandle_class) {
10837 ins = emit_get_rgctx_klass (cfg, context_used,
10838 mono_class_from_mono_type (handle),
10839 MONO_RGCTX_INFO_TYPE);
10840 } else if (handle_class == mono_defaults.methodhandle_class) {
10841 ins = emit_get_rgctx_method (cfg, context_used,
10842 handle, MONO_RGCTX_INFO_METHOD);
10843 } else if (handle_class == mono_defaults.fieldhandle_class) {
10844 ins = emit_get_rgctx_field (cfg, context_used,
10845 handle, MONO_RGCTX_INFO_CLASS_FIELD);
10847 g_assert_not_reached ();
10849 } else if (cfg->compile_aot) {
10850 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
10852 EMIT_NEW_PCONST (cfg, ins, handle);
10854 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
10855 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
10856 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
10866 MONO_INST_NEW (cfg, ins, OP_THROW);
10868 ins->sreg1 = sp [0]->dreg;
10870 bblock->out_of_line = TRUE;
10871 MONO_ADD_INS (bblock, ins);
10872 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
10873 MONO_ADD_INS (bblock, ins);
10876 link_bblock (cfg, bblock, end_bblock);
10877 start_new_bblock = 1;
10879 case CEE_ENDFINALLY:
10880 /* mono_save_seq_point_info () depends on this */
10881 if (sp != stack_start)
10882 emit_seq_point (cfg, method, ip, FALSE, FALSE);
10883 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
10884 MONO_ADD_INS (bblock, ins);
10886 start_new_bblock = 1;
10889 * Control will leave the method so empty the stack, otherwise
10890 * the next basic block will start with a nonempty stack.
10892 while (sp != stack_start) {
10897 case CEE_LEAVE_S: {
10900 if (*ip == CEE_LEAVE) {
10902 target = ip + 5 + (gint32)read32(ip + 1);
10905 target = ip + 2 + (signed char)(ip [1]);
10908 /* empty the stack */
10909 while (sp != stack_start) {
10914 * If this leave statement is in a catch block, check for a
10915 * pending exception, and rethrow it if necessary.
10916 * We avoid doing this in runtime invoke wrappers, since those are called
10917 * by native code which excepts the wrapper to catch all exceptions.
10919 for (i = 0; i < header->num_clauses; ++i) {
10920 MonoExceptionClause *clause = &header->clauses [i];
10923 * Use <= in the final comparison to handle clauses with multiple
10924 * leave statements, like in bug #78024.
10925 * The ordering of the exception clauses guarantees that we find the
10926 * innermost clause.
10928 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
10930 MonoBasicBlock *dont_throw;
10935 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
10938 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
10940 NEW_BBLOCK (cfg, dont_throw);
10943 * Currently, we always rethrow the abort exception, despite the
10944 * fact that this is not correct. See thread6.cs for an example.
10945 * But propagating the abort exception is more important than
10946 * getting the sematics right.
10948 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
10949 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
10950 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
10952 MONO_START_BB (cfg, dont_throw);
10957 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
10959 MonoExceptionClause *clause;
10961 for (tmp = handlers; tmp; tmp = tmp->next) {
10962 clause = tmp->data;
10963 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
10965 link_bblock (cfg, bblock, tblock);
10966 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
10967 ins->inst_target_bb = tblock;
10968 ins->inst_eh_block = clause;
10969 MONO_ADD_INS (bblock, ins);
10970 bblock->has_call_handler = 1;
10971 if (COMPILE_LLVM (cfg)) {
10972 MonoBasicBlock *target_bb;
10975 * Link the finally bblock with the target, since it will
10976 * conceptually branch there.
10977 * FIXME: Have to link the bblock containing the endfinally.
10979 GET_BBLOCK (cfg, target_bb, target);
10980 link_bblock (cfg, tblock, target_bb);
10983 g_list_free (handlers);
10986 MONO_INST_NEW (cfg, ins, OP_BR);
10987 MONO_ADD_INS (bblock, ins);
10988 GET_BBLOCK (cfg, tblock, target);
10989 link_bblock (cfg, bblock, tblock);
10990 ins->inst_target_bb = tblock;
10991 start_new_bblock = 1;
10993 if (*ip == CEE_LEAVE)
11002 * Mono specific opcodes
11004 case MONO_CUSTOM_PREFIX: {
11006 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
11010 case CEE_MONO_ICALL: {
11012 MonoJitICallInfo *info;
11014 token = read32 (ip + 2);
11015 func = mono_method_get_wrapper_data (method, token);
11016 info = mono_find_jit_icall_by_addr (func);
11018 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
11021 CHECK_STACK (info->sig->param_count);
11022 sp -= info->sig->param_count;
11024 ins = mono_emit_jit_icall (cfg, info->func, sp);
11025 if (!MONO_TYPE_IS_VOID (info->sig->ret))
11029 inline_costs += 10 * num_calls++;
11033 case CEE_MONO_LDPTR: {
11036 CHECK_STACK_OVF (1);
11038 token = read32 (ip + 2);
11040 ptr = mono_method_get_wrapper_data (method, token);
11041 /* FIXME: Generalize this */
11042 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
11043 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
11048 EMIT_NEW_PCONST (cfg, ins, ptr);
11051 inline_costs += 10 * num_calls++;
11052 /* Can't embed random pointers into AOT code */
11053 cfg->disable_aot = 1;
11056 case CEE_MONO_JIT_ICALL_ADDR: {
11057 MonoJitICallInfo *callinfo;
11060 CHECK_STACK_OVF (1);
11062 token = read32 (ip + 2);
11064 ptr = mono_method_get_wrapper_data (method, token);
11065 callinfo = mono_find_jit_icall_by_addr (ptr);
11066 g_assert (callinfo);
11067 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
11070 inline_costs += 10 * num_calls++;
11073 case CEE_MONO_ICALL_ADDR: {
11074 MonoMethod *cmethod;
11077 CHECK_STACK_OVF (1);
11079 token = read32 (ip + 2);
11081 cmethod = mono_method_get_wrapper_data (method, token);
11083 if (cfg->compile_aot) {
11084 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
11086 ptr = mono_lookup_internal_call (cmethod);
11088 EMIT_NEW_PCONST (cfg, ins, ptr);
11094 case CEE_MONO_VTADDR: {
11095 MonoInst *src_var, *src;
11101 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11102 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
11107 case CEE_MONO_NEWOBJ: {
11108 MonoInst *iargs [2];
11110 CHECK_STACK_OVF (1);
11112 token = read32 (ip + 2);
11113 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11114 mono_class_init (klass);
11115 NEW_DOMAINCONST (cfg, iargs [0]);
11116 MONO_ADD_INS (cfg->cbb, iargs [0]);
11117 NEW_CLASSCONST (cfg, iargs [1], klass);
11118 MONO_ADD_INS (cfg->cbb, iargs [1]);
11119 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
11121 inline_costs += 10 * num_calls++;
11124 case CEE_MONO_OBJADDR:
11127 MONO_INST_NEW (cfg, ins, OP_MOVE);
11128 ins->dreg = alloc_ireg_mp (cfg);
11129 ins->sreg1 = sp [0]->dreg;
11130 ins->type = STACK_MP;
11131 MONO_ADD_INS (cfg->cbb, ins);
11135 case CEE_MONO_LDNATIVEOBJ:
11137 * Similar to LDOBJ, but instead load the unmanaged
11138 * representation of the vtype to the stack.
11143 token = read32 (ip + 2);
11144 klass = mono_method_get_wrapper_data (method, token);
11145 g_assert (klass->valuetype);
11146 mono_class_init (klass);
11149 MonoInst *src, *dest, *temp;
11152 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
11153 temp->backend.is_pinvoke = 1;
11154 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
11155 mini_emit_stobj (cfg, dest, src, klass, TRUE);
11157 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
11158 dest->type = STACK_VTYPE;
11159 dest->klass = klass;
11165 case CEE_MONO_RETOBJ: {
11167 * Same as RET, but return the native representation of a vtype
11170 g_assert (cfg->ret);
11171 g_assert (mono_method_signature (method)->pinvoke);
11176 token = read32 (ip + 2);
11177 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11179 if (!cfg->vret_addr) {
11180 g_assert (cfg->ret_var_is_local);
11182 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
11184 EMIT_NEW_RETLOADA (cfg, ins);
11186 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
11188 if (sp != stack_start)
11191 MONO_INST_NEW (cfg, ins, OP_BR);
11192 ins->inst_target_bb = end_bblock;
11193 MONO_ADD_INS (bblock, ins);
11194 link_bblock (cfg, bblock, end_bblock);
11195 start_new_bblock = 1;
11199 case CEE_MONO_CISINST:
11200 case CEE_MONO_CCASTCLASS: {
11205 token = read32 (ip + 2);
11206 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11207 if (ip [1] == CEE_MONO_CISINST)
11208 ins = handle_cisinst (cfg, klass, sp [0]);
11210 ins = handle_ccastclass (cfg, klass, sp [0]);
11216 case CEE_MONO_SAVE_LMF:
11217 case CEE_MONO_RESTORE_LMF:
11218 #ifdef MONO_ARCH_HAVE_LMF_OPS
11219 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
11220 MONO_ADD_INS (bblock, ins);
11221 cfg->need_lmf_area = TRUE;
11225 case CEE_MONO_CLASSCONST:
11226 CHECK_STACK_OVF (1);
11228 token = read32 (ip + 2);
11229 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
11232 inline_costs += 10 * num_calls++;
11234 case CEE_MONO_NOT_TAKEN:
11235 bblock->out_of_line = TRUE;
11238 case CEE_MONO_TLS: {
11241 CHECK_STACK_OVF (1);
11243 key = (gint32)read32 (ip + 2);
11244 g_assert (key < TLS_KEY_NUM);
11246 ins = mono_create_tls_get (cfg, key);
11248 if (cfg->compile_aot) {
11249 cfg->disable_aot = TRUE;
11250 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
11251 ins->dreg = alloc_preg (cfg);
11252 ins->type = STACK_PTR;
11254 g_assert_not_reached ();
11257 ins->type = STACK_PTR;
11258 MONO_ADD_INS (bblock, ins);
11263 case CEE_MONO_DYN_CALL: {
11264 MonoCallInst *call;
11266 /* It would be easier to call a trampoline, but that would put an
11267 * extra frame on the stack, confusing exception handling. So
11268 * implement it inline using an opcode for now.
11271 if (!cfg->dyn_call_var) {
11272 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11273 /* prevent it from being register allocated */
11274 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
11277 /* Has to use a call inst since it local regalloc expects it */
11278 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
11279 ins = (MonoInst*)call;
11281 ins->sreg1 = sp [0]->dreg;
11282 ins->sreg2 = sp [1]->dreg;
11283 MONO_ADD_INS (bblock, ins);
11285 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
11288 inline_costs += 10 * num_calls++;
11292 case CEE_MONO_MEMORY_BARRIER: {
11294 emit_memory_barrier (cfg, (int)read32 (ip + 1));
11298 case CEE_MONO_JIT_ATTACH: {
11299 MonoInst *args [16];
11300 MonoInst *ad_ins, *lmf_ins;
11301 MonoBasicBlock *next_bb = NULL;
11303 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11305 EMIT_NEW_PCONST (cfg, ins, NULL);
11306 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11312 ad_ins = mono_get_domain_intrinsic (cfg);
11313 lmf_ins = mono_get_lmf_intrinsic (cfg);
11316 if (MONO_ARCH_HAVE_TLS_GET && ad_ins && lmf_ins) {
11317 NEW_BBLOCK (cfg, next_bb);
11319 MONO_ADD_INS (cfg->cbb, ad_ins);
11320 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ad_ins->dreg, 0);
11321 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
11323 MONO_ADD_INS (cfg->cbb, lmf_ins);
11324 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, lmf_ins->dreg, 0);
11325 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
11328 if (cfg->compile_aot) {
11329 /* AOT code is only used in the root domain */
11330 EMIT_NEW_PCONST (cfg, args [0], NULL);
11332 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
11334 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
11335 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11338 MONO_START_BB (cfg, next_bb);
11344 case CEE_MONO_JIT_DETACH: {
11345 MonoInst *args [16];
11347 /* Restore the original domain */
11348 dreg = alloc_ireg (cfg);
11349 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
11350 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
11355 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
11361 case CEE_PREFIX1: {
11364 case CEE_ARGLIST: {
11365 /* somewhat similar to LDTOKEN */
11366 MonoInst *addr, *vtvar;
11367 CHECK_STACK_OVF (1);
11368 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
11370 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11371 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
11373 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11374 ins->type = STACK_VTYPE;
11375 ins->klass = mono_defaults.argumenthandle_class;
11388 * The following transforms:
11389 * CEE_CEQ into OP_CEQ
11390 * CEE_CGT into OP_CGT
11391 * CEE_CGT_UN into OP_CGT_UN
11392 * CEE_CLT into OP_CLT
11393 * CEE_CLT_UN into OP_CLT_UN
11395 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
11397 MONO_INST_NEW (cfg, ins, cmp->opcode);
11399 cmp->sreg1 = sp [0]->dreg;
11400 cmp->sreg2 = sp [1]->dreg;
11401 type_from_op (cmp, sp [0], sp [1]);
11403 if ((sp [0]->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
11404 cmp->opcode = OP_LCOMPARE;
11405 else if (sp [0]->type == STACK_R8)
11406 cmp->opcode = OP_FCOMPARE;
11408 cmp->opcode = OP_ICOMPARE;
11409 MONO_ADD_INS (bblock, cmp);
11410 ins->type = STACK_I4;
11411 ins->dreg = alloc_dreg (cfg, ins->type);
11412 type_from_op (ins, sp [0], sp [1]);
11414 if (cmp->opcode == OP_FCOMPARE) {
11416 * The backends expect the fceq opcodes to do the
11419 cmp->opcode = OP_NOP;
11420 ins->sreg1 = cmp->sreg1;
11421 ins->sreg2 = cmp->sreg2;
11423 MONO_ADD_INS (bblock, ins);
11429 MonoInst *argconst;
11430 MonoMethod *cil_method;
11432 CHECK_STACK_OVF (1);
11434 n = read32 (ip + 2);
11435 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
11436 if (!cmethod || mono_loader_get_last_error ())
11438 mono_class_init (cmethod->klass);
11440 mono_save_token_info (cfg, image, n, cmethod);
11442 context_used = mini_method_check_context_used (cfg, cmethod);
11444 cil_method = cmethod;
11445 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
11446 METHOD_ACCESS_FAILURE;
11448 if (mono_security_cas_enabled ()) {
11449 if (check_linkdemand (cfg, method, cmethod))
11450 INLINE_FAILURE ("linkdemand");
11451 CHECK_CFG_EXCEPTION;
11452 } else if (mono_security_core_clr_enabled ()) {
11453 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
11457 * Optimize the common case of ldftn+delegate creation
11459 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
11460 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
11461 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
11462 MonoInst *target_ins;
11463 MonoMethod *invoke;
11464 int invoke_context_used;
11466 invoke = mono_get_delegate_invoke (ctor_method->klass);
11467 if (!invoke || !mono_method_signature (invoke))
11470 invoke_context_used = mini_method_check_context_used (cfg, invoke);
11472 target_ins = sp [-1];
11474 if (mono_security_core_clr_enabled ())
11475 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
11477 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
11478 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
11479 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
11480 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
11481 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
11485 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
11486 /* FIXME: SGEN support */
11487 if (invoke_context_used == 0) {
11489 if (cfg->verbose_level > 3)
11490 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
11492 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
11493 CHECK_CFG_EXCEPTION;
11502 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
11503 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
11507 inline_costs += 10 * num_calls++;
11510 case CEE_LDVIRTFTN: {
11511 MonoInst *args [2];
11515 n = read32 (ip + 2);
11516 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
11517 if (!cmethod || mono_loader_get_last_error ())
11519 mono_class_init (cmethod->klass);
11521 context_used = mini_method_check_context_used (cfg, cmethod);
11523 if (mono_security_cas_enabled ()) {
11524 if (check_linkdemand (cfg, method, cmethod))
11525 INLINE_FAILURE ("linkdemand");
11526 CHECK_CFG_EXCEPTION;
11527 } else if (mono_security_core_clr_enabled ()) {
11528 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
11534 args [1] = emit_get_rgctx_method (cfg, context_used,
11535 cmethod, MONO_RGCTX_INFO_METHOD);
11538 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
11540 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
11543 inline_costs += 10 * num_calls++;
11547 CHECK_STACK_OVF (1);
11549 n = read16 (ip + 2);
11551 EMIT_NEW_ARGLOAD (cfg, ins, n);
11556 CHECK_STACK_OVF (1);
11558 n = read16 (ip + 2);
11560 NEW_ARGLOADA (cfg, ins, n);
11561 MONO_ADD_INS (cfg->cbb, ins);
11569 n = read16 (ip + 2);
11571 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
11573 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
11577 CHECK_STACK_OVF (1);
11579 n = read16 (ip + 2);
11581 EMIT_NEW_LOCLOAD (cfg, ins, n);
11586 unsigned char *tmp_ip;
11587 CHECK_STACK_OVF (1);
11589 n = read16 (ip + 2);
11592 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
11598 EMIT_NEW_LOCLOADA (cfg, ins, n);
11607 n = read16 (ip + 2);
11609 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
11611 emit_stloc_ir (cfg, sp, header, n);
11618 if (sp != stack_start)
11620 if (cfg->method != method)
11622 * Inlining this into a loop in a parent could lead to
11623 * stack overflows which is different behavior than the
11624 * non-inlined case, thus disable inlining in this case.
11626 goto inline_failure;
11628 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
11629 ins->dreg = alloc_preg (cfg);
11630 ins->sreg1 = sp [0]->dreg;
11631 ins->type = STACK_PTR;
11632 MONO_ADD_INS (cfg->cbb, ins);
11634 cfg->flags |= MONO_CFG_HAS_ALLOCA;
11636 ins->flags |= MONO_INST_INIT;
11641 case CEE_ENDFILTER: {
11642 MonoExceptionClause *clause, *nearest;
11643 int cc, nearest_num;
11647 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
11649 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
11650 ins->sreg1 = (*sp)->dreg;
11651 MONO_ADD_INS (bblock, ins);
11652 start_new_bblock = 1;
11657 for (cc = 0; cc < header->num_clauses; ++cc) {
11658 clause = &header->clauses [cc];
11659 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
11660 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
11661 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
11666 g_assert (nearest);
11667 if ((ip - header->code) != nearest->handler_offset)
11672 case CEE_UNALIGNED_:
11673 ins_flag |= MONO_INST_UNALIGNED;
11674 /* FIXME: record alignment? we can assume 1 for now */
11678 case CEE_VOLATILE_:
11679 ins_flag |= MONO_INST_VOLATILE;
11683 ins_flag |= MONO_INST_TAILCALL;
11684 cfg->flags |= MONO_CFG_HAS_TAIL;
11685 /* Can't inline tail calls at this time */
11686 inline_costs += 100000;
11693 token = read32 (ip + 2);
11694 klass = mini_get_class (method, token, generic_context);
11695 CHECK_TYPELOAD (klass);
11696 if (generic_class_is_reference_type (cfg, klass))
11697 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
11699 mini_emit_initobj (cfg, *sp, NULL, klass);
11703 case CEE_CONSTRAINED_:
11705 token = read32 (ip + 2);
11706 constrained_call = mini_get_class (method, token, generic_context);
11707 CHECK_TYPELOAD (constrained_call);
11711 case CEE_INITBLK: {
11712 MonoInst *iargs [3];
11716 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
11717 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
11718 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
11719 /* emit_memset only works when val == 0 */
11720 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
11722 iargs [0] = sp [0];
11723 iargs [1] = sp [1];
11724 iargs [2] = sp [2];
11725 if (ip [1] == CEE_CPBLK) {
11726 MonoMethod *memcpy_method = get_memcpy_method ();
11727 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11729 MonoMethod *memset_method = get_memset_method ();
11730 mono_emit_method_call (cfg, memset_method, iargs, NULL);
11740 ins_flag |= MONO_INST_NOTYPECHECK;
11742 ins_flag |= MONO_INST_NORANGECHECK;
11743 /* we ignore the no-nullcheck for now since we
11744 * really do it explicitly only when doing callvirt->call
11748 case CEE_RETHROW: {
11750 int handler_offset = -1;
11752 for (i = 0; i < header->num_clauses; ++i) {
11753 MonoExceptionClause *clause = &header->clauses [i];
11754 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
11755 handler_offset = clause->handler_offset;
11760 bblock->flags |= BB_EXCEPTION_UNSAFE;
11762 g_assert (handler_offset != -1);
11764 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
11765 MONO_INST_NEW (cfg, ins, OP_RETHROW);
11766 ins->sreg1 = load->dreg;
11767 MONO_ADD_INS (bblock, ins);
11769 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11770 MONO_ADD_INS (bblock, ins);
11773 link_bblock (cfg, bblock, end_bblock);
11774 start_new_bblock = 1;
11782 GSHAREDVT_FAILURE (*ip);
11784 CHECK_STACK_OVF (1);
11786 token = read32 (ip + 2);
11787 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic && !generic_context) {
11788 MonoType *type = mono_type_create_from_typespec (image, token);
11789 val = mono_type_size (type, &ialign);
11791 MonoClass *klass = mono_class_get_full (image, token, generic_context);
11792 CHECK_TYPELOAD (klass);
11793 mono_class_init (klass);
11794 val = mono_type_size (&klass->byval_arg, &ialign);
11796 EMIT_NEW_ICONST (cfg, ins, val);
11801 case CEE_REFANYTYPE: {
11802 MonoInst *src_var, *src;
11804 GSHAREDVT_FAILURE (*ip);
11810 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11812 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11813 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11814 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
11819 case CEE_READONLY_:
11832 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
11842 g_warning ("opcode 0x%02x not handled", *ip);
11846 if (start_new_bblock != 1)
11849 bblock->cil_length = ip - bblock->cil_code;
11850 if (bblock->next_bb) {
11851 /* This could already be set because of inlining, #693905 */
11852 MonoBasicBlock *bb = bblock;
11854 while (bb->next_bb)
11856 bb->next_bb = end_bblock;
11858 bblock->next_bb = end_bblock;
11861 if (cfg->method == method && cfg->domainvar) {
11863 MonoInst *get_domain;
11865 cfg->cbb = init_localsbb;
11867 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
11868 MONO_ADD_INS (cfg->cbb, get_domain);
11870 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
11872 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
11873 MONO_ADD_INS (cfg->cbb, store);
11876 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
11877 if (cfg->compile_aot)
11878 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
11879 mono_get_got_var (cfg);
11882 if (cfg->method == method && cfg->got_var)
11883 mono_emit_load_got_addr (cfg);
11886 cfg->cbb = init_localsbb;
11888 for (i = 0; i < header->num_locals; ++i) {
11889 emit_init_local (cfg, i, header->locals [i]);
11893 if (cfg->init_ref_vars && cfg->method == method) {
11894 /* Emit initialization for ref vars */
11895 // FIXME: Avoid duplication initialization for IL locals.
11896 for (i = 0; i < cfg->num_varinfo; ++i) {
11897 MonoInst *ins = cfg->varinfo [i];
11899 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
11900 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
11904 if (cfg->lmf_var && cfg->method == method) {
11905 cfg->cbb = init_localsbb;
11906 emit_push_lmf (cfg);
11910 MonoBasicBlock *bb;
11913 * Make seq points at backward branch targets interruptable.
11915 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
11916 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
11917 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
11920 /* Add a sequence point for method entry/exit events */
11922 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
11923 MONO_ADD_INS (init_localsbb, ins);
11924 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
11925 MONO_ADD_INS (cfg->bb_exit, ins);
11929 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
11930 * the code they refer to was dead (#11880).
11932 if (sym_seq_points) {
11933 for (i = 0; i < header->code_size; ++i) {
11934 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
11937 NEW_SEQ_POINT (cfg, ins, i, FALSE);
11938 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
11945 if (cfg->method == method) {
11946 MonoBasicBlock *bb;
11947 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
11948 bb->region = mono_find_block_region (cfg, bb->real_offset);
11950 mono_create_spvar_for_region (cfg, bb->region);
11951 if (cfg->verbose_level > 2)
11952 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
11956 g_slist_free (class_inits);
11957 dont_inline = g_list_remove (dont_inline, method);
11959 if (inline_costs < 0) {
11962 /* Method is too large */
11963 mname = mono_method_full_name (method, TRUE);
11964 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
11965 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
11967 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
11968 mono_basic_block_free (original_bb);
11972 if ((cfg->verbose_level > 2) && (cfg->method == method))
11973 mono_print_code (cfg, "AFTER METHOD-TO-IR");
11975 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
11976 mono_basic_block_free (original_bb);
11977 return inline_costs;
11980 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
11987 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
11991 set_exception_type_from_invalid_il (cfg, method, ip);
11995 g_slist_free (class_inits);
11996 mono_basic_block_free (original_bb);
11997 dont_inline = g_list_remove (dont_inline, method);
11998 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
12003 store_membase_reg_to_store_membase_imm (int opcode)
12006 case OP_STORE_MEMBASE_REG:
12007 return OP_STORE_MEMBASE_IMM;
12008 case OP_STOREI1_MEMBASE_REG:
12009 return OP_STOREI1_MEMBASE_IMM;
12010 case OP_STOREI2_MEMBASE_REG:
12011 return OP_STOREI2_MEMBASE_IMM;
12012 case OP_STOREI4_MEMBASE_REG:
12013 return OP_STOREI4_MEMBASE_IMM;
12014 case OP_STOREI8_MEMBASE_REG:
12015 return OP_STOREI8_MEMBASE_IMM;
12017 g_assert_not_reached ();
12024 mono_op_to_op_imm (int opcode)
12028 return OP_IADD_IMM;
12030 return OP_ISUB_IMM;
12032 return OP_IDIV_IMM;
12034 return OP_IDIV_UN_IMM;
12036 return OP_IREM_IMM;
12038 return OP_IREM_UN_IMM;
12040 return OP_IMUL_IMM;
12042 return OP_IAND_IMM;
12046 return OP_IXOR_IMM;
12048 return OP_ISHL_IMM;
12050 return OP_ISHR_IMM;
12052 return OP_ISHR_UN_IMM;
12055 return OP_LADD_IMM;
12057 return OP_LSUB_IMM;
12059 return OP_LAND_IMM;
12063 return OP_LXOR_IMM;
12065 return OP_LSHL_IMM;
12067 return OP_LSHR_IMM;
12069 return OP_LSHR_UN_IMM;
12072 return OP_COMPARE_IMM;
12074 return OP_ICOMPARE_IMM;
12076 return OP_LCOMPARE_IMM;
12078 case OP_STORE_MEMBASE_REG:
12079 return OP_STORE_MEMBASE_IMM;
12080 case OP_STOREI1_MEMBASE_REG:
12081 return OP_STOREI1_MEMBASE_IMM;
12082 case OP_STOREI2_MEMBASE_REG:
12083 return OP_STOREI2_MEMBASE_IMM;
12084 case OP_STOREI4_MEMBASE_REG:
12085 return OP_STOREI4_MEMBASE_IMM;
12087 #if defined(TARGET_X86) || defined (TARGET_AMD64)
12089 return OP_X86_PUSH_IMM;
12090 case OP_X86_COMPARE_MEMBASE_REG:
12091 return OP_X86_COMPARE_MEMBASE_IMM;
12093 #if defined(TARGET_AMD64)
12094 case OP_AMD64_ICOMPARE_MEMBASE_REG:
12095 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
12097 case OP_VOIDCALL_REG:
12098 return OP_VOIDCALL;
12106 return OP_LOCALLOC_IMM;
12113 ldind_to_load_membase (int opcode)
12117 return OP_LOADI1_MEMBASE;
12119 return OP_LOADU1_MEMBASE;
12121 return OP_LOADI2_MEMBASE;
12123 return OP_LOADU2_MEMBASE;
12125 return OP_LOADI4_MEMBASE;
12127 return OP_LOADU4_MEMBASE;
12129 return OP_LOAD_MEMBASE;
12130 case CEE_LDIND_REF:
12131 return OP_LOAD_MEMBASE;
12133 return OP_LOADI8_MEMBASE;
12135 return OP_LOADR4_MEMBASE;
12137 return OP_LOADR8_MEMBASE;
12139 g_assert_not_reached ();
12146 stind_to_store_membase (int opcode)
12150 return OP_STOREI1_MEMBASE_REG;
12152 return OP_STOREI2_MEMBASE_REG;
12154 return OP_STOREI4_MEMBASE_REG;
12156 case CEE_STIND_REF:
12157 return OP_STORE_MEMBASE_REG;
12159 return OP_STOREI8_MEMBASE_REG;
12161 return OP_STORER4_MEMBASE_REG;
12163 return OP_STORER8_MEMBASE_REG;
12165 g_assert_not_reached ();
12172 mono_load_membase_to_load_mem (int opcode)
12174 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
12175 #if defined(TARGET_X86) || defined(TARGET_AMD64)
12177 case OP_LOAD_MEMBASE:
12178 return OP_LOAD_MEM;
12179 case OP_LOADU1_MEMBASE:
12180 return OP_LOADU1_MEM;
12181 case OP_LOADU2_MEMBASE:
12182 return OP_LOADU2_MEM;
12183 case OP_LOADI4_MEMBASE:
12184 return OP_LOADI4_MEM;
12185 case OP_LOADU4_MEMBASE:
12186 return OP_LOADU4_MEM;
12187 #if SIZEOF_REGISTER == 8
12188 case OP_LOADI8_MEMBASE:
12189 return OP_LOADI8_MEM;
12198 op_to_op_dest_membase (int store_opcode, int opcode)
12200 #if defined(TARGET_X86)
12201 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
12206 return OP_X86_ADD_MEMBASE_REG;
12208 return OP_X86_SUB_MEMBASE_REG;
12210 return OP_X86_AND_MEMBASE_REG;
12212 return OP_X86_OR_MEMBASE_REG;
12214 return OP_X86_XOR_MEMBASE_REG;
12217 return OP_X86_ADD_MEMBASE_IMM;
12220 return OP_X86_SUB_MEMBASE_IMM;
12223 return OP_X86_AND_MEMBASE_IMM;
12226 return OP_X86_OR_MEMBASE_IMM;
12229 return OP_X86_XOR_MEMBASE_IMM;
12235 #if defined(TARGET_AMD64)
12236 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
12241 return OP_X86_ADD_MEMBASE_REG;
12243 return OP_X86_SUB_MEMBASE_REG;
12245 return OP_X86_AND_MEMBASE_REG;
12247 return OP_X86_OR_MEMBASE_REG;
12249 return OP_X86_XOR_MEMBASE_REG;
12251 return OP_X86_ADD_MEMBASE_IMM;
12253 return OP_X86_SUB_MEMBASE_IMM;
12255 return OP_X86_AND_MEMBASE_IMM;
12257 return OP_X86_OR_MEMBASE_IMM;
12259 return OP_X86_XOR_MEMBASE_IMM;
12261 return OP_AMD64_ADD_MEMBASE_REG;
12263 return OP_AMD64_SUB_MEMBASE_REG;
12265 return OP_AMD64_AND_MEMBASE_REG;
12267 return OP_AMD64_OR_MEMBASE_REG;
12269 return OP_AMD64_XOR_MEMBASE_REG;
12272 return OP_AMD64_ADD_MEMBASE_IMM;
12275 return OP_AMD64_SUB_MEMBASE_IMM;
12278 return OP_AMD64_AND_MEMBASE_IMM;
12281 return OP_AMD64_OR_MEMBASE_IMM;
12284 return OP_AMD64_XOR_MEMBASE_IMM;
12294 op_to_op_store_membase (int store_opcode, int opcode)
12296 #if defined(TARGET_X86) || defined(TARGET_AMD64)
12299 if (store_opcode == OP_STOREI1_MEMBASE_REG)
12300 return OP_X86_SETEQ_MEMBASE;
12302 if (store_opcode == OP_STOREI1_MEMBASE_REG)
12303 return OP_X86_SETNE_MEMBASE;
12311 op_to_op_src1_membase (int load_opcode, int opcode)
12314 /* FIXME: This has sign extension issues */
12316 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
12317 return OP_X86_COMPARE_MEMBASE8_IMM;
12320 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
12325 return OP_X86_PUSH_MEMBASE;
12326 case OP_COMPARE_IMM:
12327 case OP_ICOMPARE_IMM:
12328 return OP_X86_COMPARE_MEMBASE_IMM;
12331 return OP_X86_COMPARE_MEMBASE_REG;
12335 #ifdef TARGET_AMD64
12336 /* FIXME: This has sign extension issues */
12338 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
12339 return OP_X86_COMPARE_MEMBASE8_IMM;
12344 #ifdef __mono_ilp32__
12345 if (load_opcode == OP_LOADI8_MEMBASE)
12347 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12349 return OP_X86_PUSH_MEMBASE;
12351 /* FIXME: This only works for 32 bit immediates
12352 case OP_COMPARE_IMM:
12353 case OP_LCOMPARE_IMM:
12354 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12355 return OP_AMD64_COMPARE_MEMBASE_IMM;
12357 case OP_ICOMPARE_IMM:
12358 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
12359 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
12363 #ifdef __mono_ilp32__
12364 if (load_opcode == OP_LOAD_MEMBASE)
12365 return OP_AMD64_ICOMPARE_MEMBASE_REG;
12366 if (load_opcode == OP_LOADI8_MEMBASE)
12368 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12370 return OP_AMD64_COMPARE_MEMBASE_REG;
12373 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
12374 return OP_AMD64_ICOMPARE_MEMBASE_REG;
12383 op_to_op_src2_membase (int load_opcode, int opcode)
12386 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
12392 return OP_X86_COMPARE_REG_MEMBASE;
12394 return OP_X86_ADD_REG_MEMBASE;
12396 return OP_X86_SUB_REG_MEMBASE;
12398 return OP_X86_AND_REG_MEMBASE;
12400 return OP_X86_OR_REG_MEMBASE;
12402 return OP_X86_XOR_REG_MEMBASE;
12406 #ifdef TARGET_AMD64
12407 #ifdef __mono_ilp32__
12408 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
12410 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
12414 return OP_AMD64_ICOMPARE_REG_MEMBASE;
12416 return OP_X86_ADD_REG_MEMBASE;
12418 return OP_X86_SUB_REG_MEMBASE;
12420 return OP_X86_AND_REG_MEMBASE;
12422 return OP_X86_OR_REG_MEMBASE;
12424 return OP_X86_XOR_REG_MEMBASE;
12426 #ifdef __mono_ilp32__
12427 } else if (load_opcode == OP_LOADI8_MEMBASE) {
12429 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
12434 return OP_AMD64_COMPARE_REG_MEMBASE;
12436 return OP_AMD64_ADD_REG_MEMBASE;
12438 return OP_AMD64_SUB_REG_MEMBASE;
12440 return OP_AMD64_AND_REG_MEMBASE;
12442 return OP_AMD64_OR_REG_MEMBASE;
12444 return OP_AMD64_XOR_REG_MEMBASE;
12453 mono_op_to_op_imm_noemul (int opcode)
12456 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
12462 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
12469 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
12474 return mono_op_to_op_imm (opcode);
12479 * mono_handle_global_vregs:
12481 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
12485 mono_handle_global_vregs (MonoCompile *cfg)
12487 gint32 *vreg_to_bb;
12488 MonoBasicBlock *bb;
12491 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
12493 #ifdef MONO_ARCH_SIMD_INTRINSICS
12494 if (cfg->uses_simd_intrinsics)
12495 mono_simd_simplify_indirection (cfg);
12498 /* Find local vregs used in more than one bb */
12499 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12500 MonoInst *ins = bb->code;
12501 int block_num = bb->block_num;
12503 if (cfg->verbose_level > 2)
12504 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
12507 for (; ins; ins = ins->next) {
12508 const char *spec = INS_INFO (ins->opcode);
12509 int regtype = 0, regindex;
12512 if (G_UNLIKELY (cfg->verbose_level > 2))
12513 mono_print_ins (ins);
12515 g_assert (ins->opcode >= MONO_CEE_LAST);
12517 for (regindex = 0; regindex < 4; regindex ++) {
12520 if (regindex == 0) {
12521 regtype = spec [MONO_INST_DEST];
12522 if (regtype == ' ')
12525 } else if (regindex == 1) {
12526 regtype = spec [MONO_INST_SRC1];
12527 if (regtype == ' ')
12530 } else if (regindex == 2) {
12531 regtype = spec [MONO_INST_SRC2];
12532 if (regtype == ' ')
12535 } else if (regindex == 3) {
12536 regtype = spec [MONO_INST_SRC3];
12537 if (regtype == ' ')
12542 #if SIZEOF_REGISTER == 4
12543 /* In the LLVM case, the long opcodes are not decomposed */
12544 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
12546 * Since some instructions reference the original long vreg,
12547 * and some reference the two component vregs, it is quite hard
12548 * to determine when it needs to be global. So be conservative.
12550 if (!get_vreg_to_inst (cfg, vreg)) {
12551 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
12553 if (cfg->verbose_level > 2)
12554 printf ("LONG VREG R%d made global.\n", vreg);
12558 * Make the component vregs volatile since the optimizations can
12559 * get confused otherwise.
12561 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
12562 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
12566 g_assert (vreg != -1);
12568 prev_bb = vreg_to_bb [vreg];
12569 if (prev_bb == 0) {
12570 /* 0 is a valid block num */
12571 vreg_to_bb [vreg] = block_num + 1;
12572 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
12573 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
12576 if (!get_vreg_to_inst (cfg, vreg)) {
12577 if (G_UNLIKELY (cfg->verbose_level > 2))
12578 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
12582 if (vreg_is_ref (cfg, vreg))
12583 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
12585 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
12588 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
12591 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
12594 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
12597 g_assert_not_reached ();
12601 /* Flag as having been used in more than one bb */
12602 vreg_to_bb [vreg] = -1;
12608 /* If a variable is used in only one bblock, convert it into a local vreg */
12609 for (i = 0; i < cfg->num_varinfo; i++) {
12610 MonoInst *var = cfg->varinfo [i];
12611 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
12613 switch (var->type) {
12619 #if SIZEOF_REGISTER == 8
12622 #if !defined(TARGET_X86)
12623 /* Enabling this screws up the fp stack on x86 */
12626 if (mono_arch_is_soft_float ())
12629 /* Arguments are implicitly global */
12630 /* Putting R4 vars into registers doesn't work currently */
12631 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
12632 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
12634 * Make that the variable's liveness interval doesn't contain a call, since
12635 * that would cause the lvreg to be spilled, making the whole optimization
12638 /* This is too slow for JIT compilation */
12640 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
12642 int def_index, call_index, ins_index;
12643 gboolean spilled = FALSE;
12648 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
12649 const char *spec = INS_INFO (ins->opcode);
12651 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
12652 def_index = ins_index;
12654 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
12655 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
12656 if (call_index > def_index) {
12662 if (MONO_IS_CALL (ins))
12663 call_index = ins_index;
12673 if (G_UNLIKELY (cfg->verbose_level > 2))
12674 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
12675 var->flags |= MONO_INST_IS_DEAD;
12676 cfg->vreg_to_inst [var->dreg] = NULL;
12683 * Compress the varinfo and vars tables so the liveness computation is faster and
12684 * takes up less space.
12687 for (i = 0; i < cfg->num_varinfo; ++i) {
12688 MonoInst *var = cfg->varinfo [i];
12689 if (pos < i && cfg->locals_start == i)
12690 cfg->locals_start = pos;
12691 if (!(var->flags & MONO_INST_IS_DEAD)) {
12693 cfg->varinfo [pos] = cfg->varinfo [i];
12694 cfg->varinfo [pos]->inst_c0 = pos;
12695 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
12696 cfg->vars [pos].idx = pos;
12697 #if SIZEOF_REGISTER == 4
12698 if (cfg->varinfo [pos]->type == STACK_I8) {
12699 /* Modify the two component vars too */
12702 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
12703 var1->inst_c0 = pos;
12704 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
12705 var1->inst_c0 = pos;
12712 cfg->num_varinfo = pos;
12713 if (cfg->locals_start > cfg->num_varinfo)
12714 cfg->locals_start = cfg->num_varinfo;
12718 * mono_spill_global_vars:
12720 * Generate spill code for variables which are not allocated to registers,
12721 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
12722 * code is generated which could be optimized by the local optimization passes.
12725 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
12727 MonoBasicBlock *bb;
12729 int orig_next_vreg;
12730 guint32 *vreg_to_lvreg;
12732 guint32 i, lvregs_len;
12733 gboolean dest_has_lvreg = FALSE;
12734 guint32 stacktypes [128];
12735 MonoInst **live_range_start, **live_range_end;
12736 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
12737 int *gsharedvt_vreg_to_idx = NULL;
12739 *need_local_opts = FALSE;
12741 memset (spec2, 0, sizeof (spec2));
12743 /* FIXME: Move this function to mini.c */
12744 stacktypes ['i'] = STACK_PTR;
12745 stacktypes ['l'] = STACK_I8;
12746 stacktypes ['f'] = STACK_R8;
12747 #ifdef MONO_ARCH_SIMD_INTRINSICS
12748 stacktypes ['x'] = STACK_VTYPE;
12751 #if SIZEOF_REGISTER == 4
12752 /* Create MonoInsts for longs */
12753 for (i = 0; i < cfg->num_varinfo; i++) {
12754 MonoInst *ins = cfg->varinfo [i];
12756 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
12757 switch (ins->type) {
12762 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
12765 g_assert (ins->opcode == OP_REGOFFSET);
12767 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
12769 tree->opcode = OP_REGOFFSET;
12770 tree->inst_basereg = ins->inst_basereg;
12771 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
12773 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
12775 tree->opcode = OP_REGOFFSET;
12776 tree->inst_basereg = ins->inst_basereg;
12777 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
12787 if (cfg->compute_gc_maps) {
12788 /* registers need liveness info even for !non refs */
12789 for (i = 0; i < cfg->num_varinfo; i++) {
12790 MonoInst *ins = cfg->varinfo [i];
12792 if (ins->opcode == OP_REGVAR)
12793 ins->flags |= MONO_INST_GC_TRACK;
12797 if (cfg->gsharedvt) {
12798 gsharedvt_vreg_to_idx = g_new0 (int, cfg->next_vreg);
12800 for (i = 0; i < cfg->num_varinfo; ++i) {
12801 MonoInst *ins = cfg->varinfo [i];
12804 if (mini_is_gsharedvt_variable_type (cfg, ins->inst_vtype)) {
12805 if (i >= cfg->locals_start) {
12807 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
12808 gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
12809 ins->opcode = OP_GSHAREDVT_LOCAL;
12810 ins->inst_imm = idx;
12813 gsharedvt_vreg_to_idx [ins->dreg] = -1;
12814 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
12820 /* FIXME: widening and truncation */
12823 * As an optimization, when a variable allocated to the stack is first loaded into
12824 * an lvreg, we will remember the lvreg and use it the next time instead of loading
12825 * the variable again.
12827 orig_next_vreg = cfg->next_vreg;
12828 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
12829 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
12833 * These arrays contain the first and last instructions accessing a given
12835 * Since we emit bblocks in the same order we process them here, and we
12836 * don't split live ranges, these will precisely describe the live range of
12837 * the variable, i.e. the instruction range where a valid value can be found
12838 * in the variables location.
12839 * The live range is computed using the liveness info computed by the liveness pass.
12840 * We can't use vmv->range, since that is an abstract live range, and we need
12841 * one which is instruction precise.
12842 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
12844 /* FIXME: Only do this if debugging info is requested */
12845 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
12846 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
12847 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
12848 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
12850 /* Add spill loads/stores */
12851 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12854 if (cfg->verbose_level > 2)
12855 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
12857 /* Clear vreg_to_lvreg array */
12858 for (i = 0; i < lvregs_len; i++)
12859 vreg_to_lvreg [lvregs [i]] = 0;
12863 MONO_BB_FOR_EACH_INS (bb, ins) {
12864 const char *spec = INS_INFO (ins->opcode);
12865 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
12866 gboolean store, no_lvreg;
12867 int sregs [MONO_MAX_SRC_REGS];
12869 if (G_UNLIKELY (cfg->verbose_level > 2))
12870 mono_print_ins (ins);
12872 if (ins->opcode == OP_NOP)
12876 * We handle LDADDR here as well, since it can only be decomposed
12877 * when variable addresses are known.
12879 if (ins->opcode == OP_LDADDR) {
12880 MonoInst *var = ins->inst_p0;
12882 if (var->opcode == OP_VTARG_ADDR) {
12883 /* Happens on SPARC/S390 where vtypes are passed by reference */
12884 MonoInst *vtaddr = var->inst_left;
12885 if (vtaddr->opcode == OP_REGVAR) {
12886 ins->opcode = OP_MOVE;
12887 ins->sreg1 = vtaddr->dreg;
12889 else if (var->inst_left->opcode == OP_REGOFFSET) {
12890 ins->opcode = OP_LOAD_MEMBASE;
12891 ins->inst_basereg = vtaddr->inst_basereg;
12892 ins->inst_offset = vtaddr->inst_offset;
12895 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg] < 0) {
12896 /* gsharedvt arg passed by ref */
12897 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
12899 ins->opcode = OP_LOAD_MEMBASE;
12900 ins->inst_basereg = var->inst_basereg;
12901 ins->inst_offset = var->inst_offset;
12902 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg]) {
12903 MonoInst *load, *load2, *load3;
12904 int idx = gsharedvt_vreg_to_idx [var->dreg] - 1;
12905 int reg1, reg2, reg3;
12906 MonoInst *info_var = cfg->gsharedvt_info_var;
12907 MonoInst *locals_var = cfg->gsharedvt_locals_var;
12911 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
12914 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
12916 g_assert (info_var);
12917 g_assert (locals_var);
12919 /* Mark the instruction used to compute the locals var as used */
12920 cfg->gsharedvt_locals_var_ins = NULL;
12922 /* Load the offset */
12923 if (info_var->opcode == OP_REGOFFSET) {
12924 reg1 = alloc_ireg (cfg);
12925 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
12926 } else if (info_var->opcode == OP_REGVAR) {
12928 reg1 = info_var->dreg;
12930 g_assert_not_reached ();
12932 reg2 = alloc_ireg (cfg);
12933 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
12934 /* Load the locals area address */
12935 reg3 = alloc_ireg (cfg);
12936 if (locals_var->opcode == OP_REGOFFSET) {
12937 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
12938 } else if (locals_var->opcode == OP_REGVAR) {
12939 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
12941 g_assert_not_reached ();
12943 /* Compute the address */
12944 ins->opcode = OP_PADD;
12948 mono_bblock_insert_before_ins (bb, ins, load3);
12949 mono_bblock_insert_before_ins (bb, load3, load2);
12951 mono_bblock_insert_before_ins (bb, load2, load);
12953 g_assert (var->opcode == OP_REGOFFSET);
12955 ins->opcode = OP_ADD_IMM;
12956 ins->sreg1 = var->inst_basereg;
12957 ins->inst_imm = var->inst_offset;
12960 *need_local_opts = TRUE;
12961 spec = INS_INFO (ins->opcode);
12964 if (ins->opcode < MONO_CEE_LAST) {
12965 mono_print_ins (ins);
12966 g_assert_not_reached ();
12970 * Store opcodes have destbasereg in the dreg, but in reality, it is an
12974 if (MONO_IS_STORE_MEMBASE (ins)) {
12975 tmp_reg = ins->dreg;
12976 ins->dreg = ins->sreg2;
12977 ins->sreg2 = tmp_reg;
12980 spec2 [MONO_INST_DEST] = ' ';
12981 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
12982 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
12983 spec2 [MONO_INST_SRC3] = ' ';
12985 } else if (MONO_IS_STORE_MEMINDEX (ins))
12986 g_assert_not_reached ();
12991 if (G_UNLIKELY (cfg->verbose_level > 2)) {
12992 printf ("\t %.3s %d", spec, ins->dreg);
12993 num_sregs = mono_inst_get_src_registers (ins, sregs);
12994 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
12995 printf (" %d", sregs [srcindex]);
13002 regtype = spec [MONO_INST_DEST];
13003 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
13006 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
13007 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
13008 MonoInst *store_ins;
13010 MonoInst *def_ins = ins;
13011 int dreg = ins->dreg; /* The original vreg */
13013 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
13015 if (var->opcode == OP_REGVAR) {
13016 ins->dreg = var->dreg;
13017 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
13019 * Instead of emitting a load+store, use a _membase opcode.
13021 g_assert (var->opcode == OP_REGOFFSET);
13022 if (ins->opcode == OP_MOVE) {
13026 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
13027 ins->inst_basereg = var->inst_basereg;
13028 ins->inst_offset = var->inst_offset;
13031 spec = INS_INFO (ins->opcode);
13035 g_assert (var->opcode == OP_REGOFFSET);
13037 prev_dreg = ins->dreg;
13039 /* Invalidate any previous lvreg for this vreg */
13040 vreg_to_lvreg [ins->dreg] = 0;
13044 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
13046 store_opcode = OP_STOREI8_MEMBASE_REG;
13049 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
13051 #if SIZEOF_REGISTER != 8
13052 if (regtype == 'l') {
13053 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
13054 mono_bblock_insert_after_ins (bb, ins, store_ins);
13055 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
13056 mono_bblock_insert_after_ins (bb, ins, store_ins);
13057 def_ins = store_ins;
13062 g_assert (store_opcode != OP_STOREV_MEMBASE);
13064 /* Try to fuse the store into the instruction itself */
13065 /* FIXME: Add more instructions */
13066 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
13067 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
13068 ins->inst_imm = ins->inst_c0;
13069 ins->inst_destbasereg = var->inst_basereg;
13070 ins->inst_offset = var->inst_offset;
13071 spec = INS_INFO (ins->opcode);
13072 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
13073 ins->opcode = store_opcode;
13074 ins->inst_destbasereg = var->inst_basereg;
13075 ins->inst_offset = var->inst_offset;
13079 tmp_reg = ins->dreg;
13080 ins->dreg = ins->sreg2;
13081 ins->sreg2 = tmp_reg;
13084 spec2 [MONO_INST_DEST] = ' ';
13085 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
13086 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
13087 spec2 [MONO_INST_SRC3] = ' ';
13089 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
13090 // FIXME: The backends expect the base reg to be in inst_basereg
13091 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
13093 ins->inst_basereg = var->inst_basereg;
13094 ins->inst_offset = var->inst_offset;
13095 spec = INS_INFO (ins->opcode);
13097 /* printf ("INS: "); mono_print_ins (ins); */
13098 /* Create a store instruction */
13099 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
13101 /* Insert it after the instruction */
13102 mono_bblock_insert_after_ins (bb, ins, store_ins);
13104 def_ins = store_ins;
13107 * We can't assign ins->dreg to var->dreg here, since the
13108 * sregs could use it. So set a flag, and do it after
13111 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
13112 dest_has_lvreg = TRUE;
13117 if (def_ins && !live_range_start [dreg]) {
13118 live_range_start [dreg] = def_ins;
13119 live_range_start_bb [dreg] = bb;
13122 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
13125 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
13126 tmp->inst_c1 = dreg;
13127 mono_bblock_insert_after_ins (bb, def_ins, tmp);
13134 num_sregs = mono_inst_get_src_registers (ins, sregs);
13135 for (srcindex = 0; srcindex < 3; ++srcindex) {
13136 regtype = spec [MONO_INST_SRC1 + srcindex];
13137 sreg = sregs [srcindex];
13139 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
13140 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
13141 MonoInst *var = get_vreg_to_inst (cfg, sreg);
13142 MonoInst *use_ins = ins;
13143 MonoInst *load_ins;
13144 guint32 load_opcode;
13146 if (var->opcode == OP_REGVAR) {
13147 sregs [srcindex] = var->dreg;
13148 //mono_inst_set_src_registers (ins, sregs);
13149 live_range_end [sreg] = use_ins;
13150 live_range_end_bb [sreg] = bb;
13152 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
13155 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
13156 /* var->dreg is a hreg */
13157 tmp->inst_c1 = sreg;
13158 mono_bblock_insert_after_ins (bb, ins, tmp);
13164 g_assert (var->opcode == OP_REGOFFSET);
13166 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
13168 g_assert (load_opcode != OP_LOADV_MEMBASE);
13170 if (vreg_to_lvreg [sreg]) {
13171 g_assert (vreg_to_lvreg [sreg] != -1);
13173 /* The variable is already loaded to an lvreg */
13174 if (G_UNLIKELY (cfg->verbose_level > 2))
13175 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
13176 sregs [srcindex] = vreg_to_lvreg [sreg];
13177 //mono_inst_set_src_registers (ins, sregs);
13181 /* Try to fuse the load into the instruction */
13182 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
13183 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
13184 sregs [0] = var->inst_basereg;
13185 //mono_inst_set_src_registers (ins, sregs);
13186 ins->inst_offset = var->inst_offset;
13187 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
13188 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
13189 sregs [1] = var->inst_basereg;
13190 //mono_inst_set_src_registers (ins, sregs);
13191 ins->inst_offset = var->inst_offset;
13193 if (MONO_IS_REAL_MOVE (ins)) {
13194 ins->opcode = OP_NOP;
13197 //printf ("%d ", srcindex); mono_print_ins (ins);
13199 sreg = alloc_dreg (cfg, stacktypes [regtype]);
13201 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
13202 if (var->dreg == prev_dreg) {
13204 * sreg refers to the value loaded by the load
13205 * emitted below, but we need to use ins->dreg
13206 * since it refers to the store emitted earlier.
13210 g_assert (sreg != -1);
13211 vreg_to_lvreg [var->dreg] = sreg;
13212 g_assert (lvregs_len < 1024);
13213 lvregs [lvregs_len ++] = var->dreg;
13217 sregs [srcindex] = sreg;
13218 //mono_inst_set_src_registers (ins, sregs);
13220 #if SIZEOF_REGISTER != 8
13221 if (regtype == 'l') {
13222 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
13223 mono_bblock_insert_before_ins (bb, ins, load_ins);
13224 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
13225 mono_bblock_insert_before_ins (bb, ins, load_ins);
13226 use_ins = load_ins;
13231 #if SIZEOF_REGISTER == 4
13232 g_assert (load_opcode != OP_LOADI8_MEMBASE);
13234 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
13235 mono_bblock_insert_before_ins (bb, ins, load_ins);
13236 use_ins = load_ins;
13240 if (var->dreg < orig_next_vreg) {
13241 live_range_end [var->dreg] = use_ins;
13242 live_range_end_bb [var->dreg] = bb;
13245 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
13248 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
13249 tmp->inst_c1 = var->dreg;
13250 mono_bblock_insert_after_ins (bb, ins, tmp);
13254 mono_inst_set_src_registers (ins, sregs);
13256 if (dest_has_lvreg) {
13257 g_assert (ins->dreg != -1);
13258 vreg_to_lvreg [prev_dreg] = ins->dreg;
13259 g_assert (lvregs_len < 1024);
13260 lvregs [lvregs_len ++] = prev_dreg;
13261 dest_has_lvreg = FALSE;
13265 tmp_reg = ins->dreg;
13266 ins->dreg = ins->sreg2;
13267 ins->sreg2 = tmp_reg;
13270 if (MONO_IS_CALL (ins)) {
13271 /* Clear vreg_to_lvreg array */
13272 for (i = 0; i < lvregs_len; i++)
13273 vreg_to_lvreg [lvregs [i]] = 0;
13275 } else if (ins->opcode == OP_NOP) {
13277 MONO_INST_NULLIFY_SREGS (ins);
13280 if (cfg->verbose_level > 2)
13281 mono_print_ins_index (1, ins);
13284 /* Extend the live range based on the liveness info */
13285 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
13286 for (i = 0; i < cfg->num_varinfo; i ++) {
13287 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
13289 if (vreg_is_volatile (cfg, vi->vreg))
13290 /* The liveness info is incomplete */
13293 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
13294 /* Live from at least the first ins of this bb */
13295 live_range_start [vi->vreg] = bb->code;
13296 live_range_start_bb [vi->vreg] = bb;
13299 if (mono_bitset_test_fast (bb->live_out_set, i)) {
13300 /* Live at least until the last ins of this bb */
13301 live_range_end [vi->vreg] = bb->last_ins;
13302 live_range_end_bb [vi->vreg] = bb;
13308 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
13310 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
13311 * by storing the current native offset into MonoMethodVar->live_range_start/end.
13313 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
13314 for (i = 0; i < cfg->num_varinfo; ++i) {
13315 int vreg = MONO_VARINFO (cfg, i)->vreg;
13318 if (live_range_start [vreg]) {
13319 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
13321 ins->inst_c1 = vreg;
13322 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
13324 if (live_range_end [vreg]) {
13325 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
13327 ins->inst_c1 = vreg;
13328 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
13329 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
13331 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
13337 if (cfg->gsharedvt_locals_var_ins) {
13338 /* Nullify if unused */
13339 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
13340 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
13343 g_free (live_range_start);
13344 g_free (live_range_end);
13345 g_free (live_range_start_bb);
13346 g_free (live_range_end_bb);
13351 * - use 'iadd' instead of 'int_add'
13352 * - handling ovf opcodes: decompose in method_to_ir.
13353 * - unify iregs/fregs
13354 * -> partly done, the missing parts are:
13355 * - a more complete unification would involve unifying the hregs as well, so
13356 * code wouldn't need if (fp) all over the place. but that would mean the hregs
13357 * would no longer map to the machine hregs, so the code generators would need to
13358 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
13359 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
13360 * fp/non-fp branches speeds it up by about 15%.
13361 * - use sext/zext opcodes instead of shifts
13363 * - get rid of TEMPLOADs if possible and use vregs instead
13364 * - clean up usage of OP_P/OP_ opcodes
13365 * - cleanup usage of DUMMY_USE
13366 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
13368 * - set the stack type and allocate a dreg in the EMIT_NEW macros
13369 * - get rid of all the <foo>2 stuff when the new JIT is ready.
13370 * - make sure handle_stack_args () is called before the branch is emitted
13371 * - when the new IR is done, get rid of all unused stuff
13372 * - COMPARE/BEQ as separate instructions or unify them ?
13373 * - keeping them separate allows specialized compare instructions like
13374 * compare_imm, compare_membase
13375 * - most back ends unify fp compare+branch, fp compare+ceq
13376 * - integrate mono_save_args into inline_method
13377 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
13378 * - handle long shift opts on 32 bit platforms somehow: they require
13379 * 3 sregs (2 for arg1 and 1 for arg2)
13380 * - make byref a 'normal' type.
13381 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
13382 * variable if needed.
13383 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
13384 * like inline_method.
13385 * - remove inlining restrictions
13386 * - fix LNEG and enable cfold of INEG
13387 * - generalize x86 optimizations like ldelema as a peephole optimization
13388 * - add store_mem_imm for amd64
13389 * - optimize the loading of the interruption flag in the managed->native wrappers
13390 * - avoid special handling of OP_NOP in passes
13391 * - move code inserting instructions into one function/macro.
13392 * - try a coalescing phase after liveness analysis
13393 * - add float -> vreg conversion + local optimizations on !x86
13394 * - figure out how to handle decomposed branches during optimizations, ie.
13395 * compare+branch, op_jump_table+op_br etc.
13396 * - promote RuntimeXHandles to vregs
13397 * - vtype cleanups:
13398 * - add a NEW_VARLOADA_VREG macro
13399 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
13400 * accessing vtype fields.
13401 * - get rid of I8CONST on 64 bit platforms
13402 * - dealing with the increase in code size due to branches created during opcode
13404 * - use extended basic blocks
13405 * - all parts of the JIT
13406 * - handle_global_vregs () && local regalloc
13407 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
13408 * - sources of increase in code size:
13411 * - isinst and castclass
13412 * - lvregs not allocated to global registers even if used multiple times
13413 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
13415 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
13416 * - add all micro optimizations from the old JIT
13417 * - put tree optimizations into the deadce pass
13418 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
13419 * specific function.
13420 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
13421 * fcompare + branchCC.
13422 * - create a helper function for allocating a stack slot, taking into account
13423 * MONO_CFG_HAS_SPILLUP.
13425 * - merge the ia64 switch changes.
13426 * - optimize mono_regstate2_alloc_int/float.
13427 * - fix the pessimistic handling of variables accessed in exception handler blocks.
13428 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
13429 * parts of the tree could be separated by other instructions, killing the tree
13430 * arguments, or stores killing loads etc. Also, should we fold loads into other
13431 * instructions if the result of the load is used multiple times ?
13432 * - make the REM_IMM optimization in mini-x86.c arch-independent.
13433 * - LAST MERGE: 108395.
13434 * - when returning vtypes in registers, generate IR and append it to the end of the
13435 * last bb instead of doing it in the epilog.
13436 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
13444 - When to decompose opcodes:
13445 - earlier: this makes some optimizations hard to implement, since the low level IR
13446 no longer contains the neccessary information. But it is easier to do.
13447 - later: harder to implement, enables more optimizations.
13448 - Branches inside bblocks:
13449 - created when decomposing complex opcodes.
13450 - branches to another bblock: harmless, but not tracked by the branch
13451 optimizations, so need to branch to a label at the start of the bblock.
13452 - branches to inside the same bblock: very problematic, trips up the local
13453 reg allocator. Can be fixed by spitting the current bblock, but that is a
13454 complex operation, since some local vregs can become global vregs etc.
13455 - Local/global vregs:
13456 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
13457 local register allocator.
13458 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
13459 structure, created by mono_create_var (). Assigned to hregs or the stack by
13460 the global register allocator.
13461 - When to do optimizations like alu->alu_imm:
13462 - earlier -> saves work later on since the IR will be smaller/simpler
13463 - later -> can work on more instructions
13464 - Handling of valuetypes:
13465 - When a vtype is pushed on the stack, a new temporary is created, an
13466 instruction computing its address (LDADDR) is emitted and pushed on
13467 the stack. Need to optimize cases when the vtype is used immediately as in
13468 argument passing, stloc etc.
13469 - Instead of the to_end stuff in the old JIT, simply call the function handling
13470 the values on the stack before emitting the last instruction of the bb.
13473 #endif /* DISABLE_JIT */