2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/assembly.h>
38 #include <mono/metadata/attrdefs.h>
39 #include <mono/metadata/loader.h>
40 #include <mono/metadata/tabledefs.h>
41 #include <mono/metadata/class.h>
42 #include <mono/metadata/object.h>
43 #include <mono/metadata/exception.h>
44 #include <mono/metadata/opcodes.h>
45 #include <mono/metadata/mono-endian.h>
46 #include <mono/metadata/tokentype.h>
47 #include <mono/metadata/tabledefs.h>
48 #include <mono/metadata/marshal.h>
49 #include <mono/metadata/debug-helpers.h>
50 #include <mono/metadata/mono-debug.h>
51 #include <mono/metadata/gc-internal.h>
52 #include <mono/metadata/security-manager.h>
53 #include <mono/metadata/threads-types.h>
54 #include <mono/metadata/security-core-clr.h>
55 #include <mono/metadata/monitor.h>
56 #include <mono/metadata/profiler-private.h>
57 #include <mono/metadata/profiler.h>
58 #include <mono/metadata/debug-mono-symfile.h>
59 #include <mono/utils/mono-compiler.h>
60 #include <mono/utils/mono-memory-model.h>
61 #include <mono/metadata/mono-basic-block.h>
68 #include "jit-icalls.h"
70 #include "debugger-agent.h"
72 #define BRANCH_COST 10
73 #define INLINE_LENGTH_LIMIT 20
74 #define INLINE_FAILURE(msg) do { \
75 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE)) { \
76 if (cfg->verbose_level >= 2) \
77 printf ("inline failed: %s\n", msg); \
78 goto inline_failure; \
81 #define CHECK_CFG_EXCEPTION do {\
82 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
85 #define METHOD_ACCESS_FAILURE do { \
86 char *method_fname = mono_method_full_name (method, TRUE); \
87 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
88 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS); \
89 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
90 g_free (method_fname); \
91 g_free (cil_method_fname); \
92 goto exception_exit; \
94 #define FIELD_ACCESS_FAILURE do { \
95 char *method_fname = mono_method_full_name (method, TRUE); \
96 char *field_fname = mono_field_full_name (field); \
97 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS); \
98 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
99 g_free (method_fname); \
100 g_free (field_fname); \
101 goto exception_exit; \
103 #define GENERIC_SHARING_FAILURE(opcode) do { \
104 if (cfg->generic_sharing_context) { \
105 if (cfg->verbose_level > 2) \
106 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
107 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
108 goto exception_exit; \
111 #define GSHAREDVT_FAILURE(opcode) do { \
112 if (cfg->gsharedvt) { \
113 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __FILE__, __LINE__); \
114 if (cfg->verbose_level >= 2) \
115 printf ("%s\n", cfg->exception_message); \
116 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
117 goto exception_exit; \
120 #define OUT_OF_MEMORY_FAILURE do { \
121 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
122 goto exception_exit; \
124 /* Determine whenever 'ins' represents a load of the 'this' argument */
125 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
127 static int ldind_to_load_membase (int opcode);
128 static int stind_to_store_membase (int opcode);
130 int mono_op_to_op_imm (int opcode);
131 int mono_op_to_op_imm_noemul (int opcode);
133 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
135 /* helper methods signatures */
136 static MonoMethodSignature *helper_sig_class_init_trampoline = NULL;
137 static MonoMethodSignature *helper_sig_domain_get = NULL;
138 static MonoMethodSignature *helper_sig_generic_class_init_trampoline = NULL;
139 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm = NULL;
140 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline = NULL;
141 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline = NULL;
142 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm = NULL;
145 * Instruction metadata
153 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
154 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
160 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
165 /* keep in sync with the enum in mini.h */
168 #include "mini-ops.h"
173 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
174 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
176 * This should contain the index of the last sreg + 1. This is not the same
177 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
179 const gint8 ins_sreg_counts[] = {
180 #include "mini-ops.h"
185 #define MONO_INIT_VARINFO(vi,id) do { \
186 (vi)->range.first_use.pos.bid = 0xffff; \
192 mono_inst_set_src_registers (MonoInst *ins, int *regs)
194 ins->sreg1 = regs [0];
195 ins->sreg2 = regs [1];
196 ins->sreg3 = regs [2];
200 mono_alloc_ireg (MonoCompile *cfg)
202 return alloc_ireg (cfg);
206 mono_alloc_freg (MonoCompile *cfg)
208 return alloc_freg (cfg);
212 mono_alloc_preg (MonoCompile *cfg)
214 return alloc_preg (cfg);
218 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
220 return alloc_dreg (cfg, stack_type);
224 * mono_alloc_ireg_ref:
226 * Allocate an IREG, and mark it as holding a GC ref.
229 mono_alloc_ireg_ref (MonoCompile *cfg)
231 return alloc_ireg_ref (cfg);
235 * mono_alloc_ireg_mp:
237 * Allocate an IREG, and mark it as holding a managed pointer.
240 mono_alloc_ireg_mp (MonoCompile *cfg)
242 return alloc_ireg_mp (cfg);
246 * mono_alloc_ireg_copy:
248 * Allocate an IREG with the same GC type as VREG.
251 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
253 if (vreg_is_ref (cfg, vreg))
254 return alloc_ireg_ref (cfg);
255 else if (vreg_is_mp (cfg, vreg))
256 return alloc_ireg_mp (cfg);
258 return alloc_ireg (cfg);
262 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
268 switch (type->type) {
271 case MONO_TYPE_BOOLEAN:
283 case MONO_TYPE_FNPTR:
285 case MONO_TYPE_CLASS:
286 case MONO_TYPE_STRING:
287 case MONO_TYPE_OBJECT:
288 case MONO_TYPE_SZARRAY:
289 case MONO_TYPE_ARRAY:
293 #if SIZEOF_REGISTER == 8
302 case MONO_TYPE_VALUETYPE:
303 if (type->data.klass->enumtype) {
304 type = mono_class_enum_basetype (type->data.klass);
307 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
310 case MONO_TYPE_TYPEDBYREF:
312 case MONO_TYPE_GENERICINST:
313 type = &type->data.generic_class->container_class->byval_arg;
317 g_assert (cfg->generic_sharing_context);
318 if (mini_type_var_is_vt (cfg, type))
323 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
329 mono_print_bb (MonoBasicBlock *bb, const char *msg)
334 printf ("\n%s %d: [IN: ", msg, bb->block_num);
335 for (i = 0; i < bb->in_count; ++i)
336 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
338 for (i = 0; i < bb->out_count; ++i)
339 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
341 for (tree = bb->code; tree; tree = tree->next)
342 mono_print_ins_index (-1, tree);
346 mono_create_helper_signatures (void)
348 helper_sig_domain_get = mono_create_icall_signature ("ptr");
349 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
350 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
351 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
352 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
353 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
354 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
358 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
359 * foo<T> (int i) { ldarg.0; box T; }
361 #define UNVERIFIED do { \
362 if (cfg->gsharedvt) { \
363 if (cfg->verbose_level > 2) \
364 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
365 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
366 goto exception_exit; \
368 if (mini_get_debug_options ()->break_on_unverified) \
374 #define LOAD_ERROR do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto load_error; } while (0)
376 #define TYPE_LOAD_ERROR(klass) do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else { cfg->exception_ptr = klass; goto load_error; } } while (0)
378 #define GET_BBLOCK(cfg,tblock,ip) do { \
379 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
381 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
382 NEW_BBLOCK (cfg, (tblock)); \
383 (tblock)->cil_code = (ip); \
384 ADD_BBLOCK (cfg, (tblock)); \
388 #if defined(TARGET_X86) || defined(TARGET_AMD64)
389 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
390 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
391 (dest)->dreg = alloc_ireg_mp ((cfg)); \
392 (dest)->sreg1 = (sr1); \
393 (dest)->sreg2 = (sr2); \
394 (dest)->inst_imm = (imm); \
395 (dest)->backend.shift_amount = (shift); \
396 MONO_ADD_INS ((cfg)->cbb, (dest)); \
400 #if SIZEOF_REGISTER == 8
401 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
402 /* FIXME: Need to add many more cases */ \
403 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
405 int dr = alloc_preg (cfg); \
406 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
407 (ins)->sreg2 = widen->dreg; \
411 #define ADD_WIDEN_OP(ins, arg1, arg2)
414 #define ADD_BINOP(op) do { \
415 MONO_INST_NEW (cfg, ins, (op)); \
417 ins->sreg1 = sp [0]->dreg; \
418 ins->sreg2 = sp [1]->dreg; \
419 type_from_op (ins, sp [0], sp [1]); \
421 /* Have to insert a widening op */ \
422 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
423 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
424 MONO_ADD_INS ((cfg)->cbb, (ins)); \
425 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
428 #define ADD_UNOP(op) do { \
429 MONO_INST_NEW (cfg, ins, (op)); \
431 ins->sreg1 = sp [0]->dreg; \
432 type_from_op (ins, sp [0], NULL); \
434 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
435 MONO_ADD_INS ((cfg)->cbb, (ins)); \
436 *sp++ = mono_decompose_opcode (cfg, ins); \
439 #define ADD_BINCOND(next_block) do { \
442 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
443 cmp->sreg1 = sp [0]->dreg; \
444 cmp->sreg2 = sp [1]->dreg; \
445 type_from_op (cmp, sp [0], sp [1]); \
447 type_from_op (ins, sp [0], sp [1]); \
448 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
449 GET_BBLOCK (cfg, tblock, target); \
450 link_bblock (cfg, bblock, tblock); \
451 ins->inst_true_bb = tblock; \
452 if ((next_block)) { \
453 link_bblock (cfg, bblock, (next_block)); \
454 ins->inst_false_bb = (next_block); \
455 start_new_bblock = 1; \
457 GET_BBLOCK (cfg, tblock, ip); \
458 link_bblock (cfg, bblock, tblock); \
459 ins->inst_false_bb = tblock; \
460 start_new_bblock = 2; \
462 if (sp != stack_start) { \
463 handle_stack_args (cfg, stack_start, sp - stack_start); \
464 CHECK_UNVERIFIABLE (cfg); \
466 MONO_ADD_INS (bblock, cmp); \
467 MONO_ADD_INS (bblock, ins); \
471 * link_bblock: Links two basic blocks
473 * links two basic blocks in the control flow graph, the 'from'
474 * argument is the starting block and the 'to' argument is the block
475 * the control flow ends to after 'from'.
478 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
480 MonoBasicBlock **newa;
484 if (from->cil_code) {
486 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
488 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
491 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
493 printf ("edge from entry to exit\n");
498 for (i = 0; i < from->out_count; ++i) {
499 if (to == from->out_bb [i]) {
505 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
506 for (i = 0; i < from->out_count; ++i) {
507 newa [i] = from->out_bb [i];
515 for (i = 0; i < to->in_count; ++i) {
516 if (from == to->in_bb [i]) {
522 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
523 for (i = 0; i < to->in_count; ++i) {
524 newa [i] = to->in_bb [i];
533 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
535 link_bblock (cfg, from, to);
539 * mono_find_block_region:
541 * We mark each basic block with a region ID. We use that to avoid BB
542 * optimizations when blocks are in different regions.
545 * A region token that encodes where this region is, and information
546 * about the clause owner for this block.
548 * The region encodes the try/catch/filter clause that owns this block
549 * as well as the type. -1 is a special value that represents a block
550 * that is in none of try/catch/filter.
553 mono_find_block_region (MonoCompile *cfg, int offset)
555 MonoMethodHeader *header = cfg->header;
556 MonoExceptionClause *clause;
559 for (i = 0; i < header->num_clauses; ++i) {
560 clause = &header->clauses [i];
561 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
562 (offset < (clause->handler_offset)))
563 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
565 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
566 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
567 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
568 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
569 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
571 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
574 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
575 return ((i + 1) << 8) | clause->flags;
582 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
584 MonoMethodHeader *header = cfg->header;
585 MonoExceptionClause *clause;
589 for (i = 0; i < header->num_clauses; ++i) {
590 clause = &header->clauses [i];
591 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
592 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
593 if (clause->flags == type)
594 res = g_list_append (res, clause);
601 mono_create_spvar_for_region (MonoCompile *cfg, int region)
605 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
609 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
610 /* prevent it from being register allocated */
611 var->flags |= MONO_INST_INDIRECT;
613 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
617 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
619 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
623 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
627 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
631 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
632 /* prevent it from being register allocated */
633 var->flags |= MONO_INST_INDIRECT;
635 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
641 * Returns the type used in the eval stack when @type is loaded.
642 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
645 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
649 inst->klass = klass = mono_class_from_mono_type (type);
651 inst->type = STACK_MP;
656 switch (type->type) {
658 inst->type = STACK_INV;
662 case MONO_TYPE_BOOLEAN:
668 inst->type = STACK_I4;
673 case MONO_TYPE_FNPTR:
674 inst->type = STACK_PTR;
676 case MONO_TYPE_CLASS:
677 case MONO_TYPE_STRING:
678 case MONO_TYPE_OBJECT:
679 case MONO_TYPE_SZARRAY:
680 case MONO_TYPE_ARRAY:
681 inst->type = STACK_OBJ;
685 inst->type = STACK_I8;
689 inst->type = STACK_R8;
691 case MONO_TYPE_VALUETYPE:
692 if (type->data.klass->enumtype) {
693 type = mono_class_enum_basetype (type->data.klass);
697 inst->type = STACK_VTYPE;
700 case MONO_TYPE_TYPEDBYREF:
701 inst->klass = mono_defaults.typed_reference_class;
702 inst->type = STACK_VTYPE;
704 case MONO_TYPE_GENERICINST:
705 type = &type->data.generic_class->container_class->byval_arg;
709 g_assert (cfg->generic_sharing_context);
710 if (mini_is_gsharedvt_type (cfg, type)) {
711 g_assert (cfg->gsharedvt);
712 inst->type = STACK_VTYPE;
714 inst->type = STACK_OBJ;
718 g_error ("unknown type 0x%02x in eval stack type", type->type);
723 * The following tables are used to quickly validate the IL code in type_from_op ().
726 bin_num_table [STACK_MAX] [STACK_MAX] = {
727 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
728 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
729 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
730 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
731 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
732 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
733 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
734 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
739 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
742 /* reduce the size of this table */
744 bin_int_table [STACK_MAX] [STACK_MAX] = {
745 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
746 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
747 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
748 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
749 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
750 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
751 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
752 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
756 bin_comp_table [STACK_MAX] [STACK_MAX] = {
757 /* Inv i L p F & O vt */
759 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
760 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
761 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
762 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
763 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
764 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
765 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
768 /* reduce the size of this table */
770 shift_table [STACK_MAX] [STACK_MAX] = {
771 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
772 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
773 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
774 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
775 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
776 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
777 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
778 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
782 * Tables to map from the non-specific opcode to the matching
783 * type-specific opcode.
785 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
787 binops_op_map [STACK_MAX] = {
788 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
791 /* handles from CEE_NEG to CEE_CONV_U8 */
793 unops_op_map [STACK_MAX] = {
794 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
797 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
799 ovfops_op_map [STACK_MAX] = {
800 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
803 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
805 ovf2ops_op_map [STACK_MAX] = {
806 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
809 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
811 ovf3ops_op_map [STACK_MAX] = {
812 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
815 /* handles from CEE_BEQ to CEE_BLT_UN */
817 beqops_op_map [STACK_MAX] = {
818 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
821 /* handles from CEE_CEQ to CEE_CLT_UN */
823 ceqops_op_map [STACK_MAX] = {
824 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
828 * Sets ins->type (the type on the eval stack) according to the
829 * type of the opcode and the arguments to it.
830 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
832 * FIXME: this function sets ins->type unconditionally in some cases, but
833 * it should set it to invalid for some types (a conv.x on an object)
836 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
838 switch (ins->opcode) {
845 /* FIXME: check unverifiable args for STACK_MP */
846 ins->type = bin_num_table [src1->type] [src2->type];
847 ins->opcode += binops_op_map [ins->type];
854 ins->type = bin_int_table [src1->type] [src2->type];
855 ins->opcode += binops_op_map [ins->type];
860 ins->type = shift_table [src1->type] [src2->type];
861 ins->opcode += binops_op_map [ins->type];
866 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
867 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
868 ins->opcode = OP_LCOMPARE;
869 else if (src1->type == STACK_R8)
870 ins->opcode = OP_FCOMPARE;
872 ins->opcode = OP_ICOMPARE;
874 case OP_ICOMPARE_IMM:
875 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
876 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
877 ins->opcode = OP_LCOMPARE_IMM;
889 ins->opcode += beqops_op_map [src1->type];
892 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
893 ins->opcode += ceqops_op_map [src1->type];
899 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
900 ins->opcode += ceqops_op_map [src1->type];
904 ins->type = neg_table [src1->type];
905 ins->opcode += unops_op_map [ins->type];
908 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
909 ins->type = src1->type;
911 ins->type = STACK_INV;
912 ins->opcode += unops_op_map [ins->type];
918 ins->type = STACK_I4;
919 ins->opcode += unops_op_map [src1->type];
922 ins->type = STACK_R8;
923 switch (src1->type) {
926 ins->opcode = OP_ICONV_TO_R_UN;
929 ins->opcode = OP_LCONV_TO_R_UN;
933 case CEE_CONV_OVF_I1:
934 case CEE_CONV_OVF_U1:
935 case CEE_CONV_OVF_I2:
936 case CEE_CONV_OVF_U2:
937 case CEE_CONV_OVF_I4:
938 case CEE_CONV_OVF_U4:
939 ins->type = STACK_I4;
940 ins->opcode += ovf3ops_op_map [src1->type];
942 case CEE_CONV_OVF_I_UN:
943 case CEE_CONV_OVF_U_UN:
944 ins->type = STACK_PTR;
945 ins->opcode += ovf2ops_op_map [src1->type];
947 case CEE_CONV_OVF_I1_UN:
948 case CEE_CONV_OVF_I2_UN:
949 case CEE_CONV_OVF_I4_UN:
950 case CEE_CONV_OVF_U1_UN:
951 case CEE_CONV_OVF_U2_UN:
952 case CEE_CONV_OVF_U4_UN:
953 ins->type = STACK_I4;
954 ins->opcode += ovf2ops_op_map [src1->type];
957 ins->type = STACK_PTR;
958 switch (src1->type) {
960 ins->opcode = OP_ICONV_TO_U;
964 #if SIZEOF_VOID_P == 8
965 ins->opcode = OP_LCONV_TO_U;
967 ins->opcode = OP_MOVE;
971 ins->opcode = OP_LCONV_TO_U;
974 ins->opcode = OP_FCONV_TO_U;
980 ins->type = STACK_I8;
981 ins->opcode += unops_op_map [src1->type];
983 case CEE_CONV_OVF_I8:
984 case CEE_CONV_OVF_U8:
985 ins->type = STACK_I8;
986 ins->opcode += ovf3ops_op_map [src1->type];
988 case CEE_CONV_OVF_U8_UN:
989 case CEE_CONV_OVF_I8_UN:
990 ins->type = STACK_I8;
991 ins->opcode += ovf2ops_op_map [src1->type];
995 ins->type = STACK_R8;
996 ins->opcode += unops_op_map [src1->type];
999 ins->type = STACK_R8;
1003 ins->type = STACK_I4;
1004 ins->opcode += ovfops_op_map [src1->type];
1007 case CEE_CONV_OVF_I:
1008 case CEE_CONV_OVF_U:
1009 ins->type = STACK_PTR;
1010 ins->opcode += ovfops_op_map [src1->type];
1013 case CEE_ADD_OVF_UN:
1015 case CEE_MUL_OVF_UN:
1017 case CEE_SUB_OVF_UN:
1018 ins->type = bin_num_table [src1->type] [src2->type];
1019 ins->opcode += ovfops_op_map [src1->type];
1020 if (ins->type == STACK_R8)
1021 ins->type = STACK_INV;
1023 case OP_LOAD_MEMBASE:
1024 ins->type = STACK_PTR;
1026 case OP_LOADI1_MEMBASE:
1027 case OP_LOADU1_MEMBASE:
1028 case OP_LOADI2_MEMBASE:
1029 case OP_LOADU2_MEMBASE:
1030 case OP_LOADI4_MEMBASE:
1031 case OP_LOADU4_MEMBASE:
1032 ins->type = STACK_PTR;
1034 case OP_LOADI8_MEMBASE:
1035 ins->type = STACK_I8;
1037 case OP_LOADR4_MEMBASE:
1038 case OP_LOADR8_MEMBASE:
1039 ins->type = STACK_R8;
1042 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1046 if (ins->type == STACK_MP)
1047 ins->klass = mono_defaults.object_class;
1052 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1058 param_table [STACK_MAX] [STACK_MAX] = {
1063 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1067 switch (args->type) {
1077 for (i = 0; i < sig->param_count; ++i) {
1078 switch (args [i].type) {
1082 if (!sig->params [i]->byref)
1086 if (sig->params [i]->byref)
1088 switch (sig->params [i]->type) {
1089 case MONO_TYPE_CLASS:
1090 case MONO_TYPE_STRING:
1091 case MONO_TYPE_OBJECT:
1092 case MONO_TYPE_SZARRAY:
1093 case MONO_TYPE_ARRAY:
1100 if (sig->params [i]->byref)
1102 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1111 /*if (!param_table [args [i].type] [sig->params [i]->type])
1119 * When we need a pointer to the current domain many times in a method, we
1120 * call mono_domain_get() once and we store the result in a local variable.
1121 * This function returns the variable that represents the MonoDomain*.
1123 inline static MonoInst *
1124 mono_get_domainvar (MonoCompile *cfg)
1126 if (!cfg->domainvar)
1127 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1128 return cfg->domainvar;
1132 * The got_var contains the address of the Global Offset Table when AOT
1136 mono_get_got_var (MonoCompile *cfg)
1138 #ifdef MONO_ARCH_NEED_GOT_VAR
1139 if (!cfg->compile_aot)
1141 if (!cfg->got_var) {
1142 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1144 return cfg->got_var;
1151 mono_get_vtable_var (MonoCompile *cfg)
1153 g_assert (cfg->generic_sharing_context);
1155 if (!cfg->rgctx_var) {
1156 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1157 /* force the var to be stack allocated */
1158 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1161 return cfg->rgctx_var;
1165 type_from_stack_type (MonoInst *ins) {
1166 switch (ins->type) {
1167 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1168 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1169 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1170 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1172 return &ins->klass->this_arg;
1173 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1174 case STACK_VTYPE: return &ins->klass->byval_arg;
1176 g_error ("stack type %d to monotype not handled\n", ins->type);
1181 static G_GNUC_UNUSED int
1182 type_to_stack_type (MonoType *t)
1184 t = mono_type_get_underlying_type (t);
1188 case MONO_TYPE_BOOLEAN:
1191 case MONO_TYPE_CHAR:
1198 case MONO_TYPE_FNPTR:
1200 case MONO_TYPE_CLASS:
1201 case MONO_TYPE_STRING:
1202 case MONO_TYPE_OBJECT:
1203 case MONO_TYPE_SZARRAY:
1204 case MONO_TYPE_ARRAY:
1212 case MONO_TYPE_VALUETYPE:
1213 case MONO_TYPE_TYPEDBYREF:
1215 case MONO_TYPE_GENERICINST:
1216 if (mono_type_generic_inst_is_valuetype (t))
1222 g_assert_not_reached ();
1229 array_access_to_klass (int opcode)
1233 return mono_defaults.byte_class;
1235 return mono_defaults.uint16_class;
1238 return mono_defaults.int_class;
1241 return mono_defaults.sbyte_class;
1244 return mono_defaults.int16_class;
1247 return mono_defaults.int32_class;
1249 return mono_defaults.uint32_class;
1252 return mono_defaults.int64_class;
1255 return mono_defaults.single_class;
1258 return mono_defaults.double_class;
1259 case CEE_LDELEM_REF:
1260 case CEE_STELEM_REF:
1261 return mono_defaults.object_class;
1263 g_assert_not_reached ();
1269 * We try to share variables when possible
1272 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1277 /* inlining can result in deeper stacks */
1278 if (slot >= cfg->header->max_stack)
1279 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1281 pos = ins->type - 1 + slot * STACK_MAX;
1283 switch (ins->type) {
1290 if ((vnum = cfg->intvars [pos]))
1291 return cfg->varinfo [vnum];
1292 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1293 cfg->intvars [pos] = res->inst_c0;
1296 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1302 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1305 * Don't use this if a generic_context is set, since that means AOT can't
1306 * look up the method using just the image+token.
1307 * table == 0 means this is a reference made from a wrapper.
1309 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1310 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1311 jump_info_token->image = image;
1312 jump_info_token->token = token;
1313 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1318 * This function is called to handle items that are left on the evaluation stack
1319 * at basic block boundaries. What happens is that we save the values to local variables
1320 * and we reload them later when first entering the target basic block (with the
1321 * handle_loaded_temps () function).
1322 * A single joint point will use the same variables (stored in the array bb->out_stack or
1323 * bb->in_stack, if the basic block is before or after the joint point).
1325 * This function needs to be called _before_ emitting the last instruction of
1326 * the bb (i.e. before emitting a branch).
1327 * If the stack merge fails at a join point, cfg->unverifiable is set.
1330 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1333 MonoBasicBlock *bb = cfg->cbb;
1334 MonoBasicBlock *outb;
1335 MonoInst *inst, **locals;
1340 if (cfg->verbose_level > 3)
1341 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1342 if (!bb->out_scount) {
1343 bb->out_scount = count;
1344 //printf ("bblock %d has out:", bb->block_num);
1346 for (i = 0; i < bb->out_count; ++i) {
1347 outb = bb->out_bb [i];
1348 /* exception handlers are linked, but they should not be considered for stack args */
1349 if (outb->flags & BB_EXCEPTION_HANDLER)
1351 //printf (" %d", outb->block_num);
1352 if (outb->in_stack) {
1354 bb->out_stack = outb->in_stack;
1360 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1361 for (i = 0; i < count; ++i) {
1363 * try to reuse temps already allocated for this purpouse, if they occupy the same
1364 * stack slot and if they are of the same type.
1365 * This won't cause conflicts since if 'local' is used to
1366 * store one of the values in the in_stack of a bblock, then
1367 * the same variable will be used for the same outgoing stack
1369 * This doesn't work when inlining methods, since the bblocks
1370 * in the inlined methods do not inherit their in_stack from
1371 * the bblock they are inlined to. See bug #58863 for an
1374 if (cfg->inlined_method)
1375 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1377 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1382 for (i = 0; i < bb->out_count; ++i) {
1383 outb = bb->out_bb [i];
1384 /* exception handlers are linked, but they should not be considered for stack args */
1385 if (outb->flags & BB_EXCEPTION_HANDLER)
1387 if (outb->in_scount) {
1388 if (outb->in_scount != bb->out_scount) {
1389 cfg->unverifiable = TRUE;
1392 continue; /* check they are the same locals */
1394 outb->in_scount = count;
1395 outb->in_stack = bb->out_stack;
1398 locals = bb->out_stack;
1400 for (i = 0; i < count; ++i) {
1401 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1402 inst->cil_code = sp [i]->cil_code;
1403 sp [i] = locals [i];
1404 if (cfg->verbose_level > 3)
1405 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1409 * It is possible that the out bblocks already have in_stack assigned, and
1410 * the in_stacks differ. In this case, we will store to all the different
1417 /* Find a bblock which has a different in_stack */
1419 while (bindex < bb->out_count) {
1420 outb = bb->out_bb [bindex];
1421 /* exception handlers are linked, but they should not be considered for stack args */
1422 if (outb->flags & BB_EXCEPTION_HANDLER) {
1426 if (outb->in_stack != locals) {
1427 for (i = 0; i < count; ++i) {
1428 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1429 inst->cil_code = sp [i]->cil_code;
1430 sp [i] = locals [i];
1431 if (cfg->verbose_level > 3)
1432 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1434 locals = outb->in_stack;
1443 /* Emit code which loads interface_offsets [klass->interface_id]
1444 * The array is stored in memory before vtable.
1447 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1449 if (cfg->compile_aot) {
1450 int ioffset_reg = alloc_preg (cfg);
1451 int iid_reg = alloc_preg (cfg);
1453 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1454 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1455 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1458 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1463 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1465 int ibitmap_reg = alloc_preg (cfg);
1466 #ifdef COMPRESSED_INTERFACE_BITMAP
1468 MonoInst *res, *ins;
1469 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1470 MONO_ADD_INS (cfg->cbb, ins);
1472 if (cfg->compile_aot)
1473 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1475 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1476 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1477 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1479 int ibitmap_byte_reg = alloc_preg (cfg);
1481 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1483 if (cfg->compile_aot) {
1484 int iid_reg = alloc_preg (cfg);
1485 int shifted_iid_reg = alloc_preg (cfg);
1486 int ibitmap_byte_address_reg = alloc_preg (cfg);
1487 int masked_iid_reg = alloc_preg (cfg);
1488 int iid_one_bit_reg = alloc_preg (cfg);
1489 int iid_bit_reg = alloc_preg (cfg);
1490 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1491 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1492 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1493 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1494 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1495 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1496 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1497 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1499 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1500 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1506 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1507 * stored in "klass_reg" implements the interface "klass".
1510 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1512 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1516 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1517 * stored in "vtable_reg" implements the interface "klass".
1520 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1522 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1526 * Emit code which checks whenever the interface id of @klass is smaller than
1527 * than the value given by max_iid_reg.
1530 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1531 MonoBasicBlock *false_target)
1533 if (cfg->compile_aot) {
1534 int iid_reg = alloc_preg (cfg);
1535 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1536 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1539 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1541 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1543 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1546 /* Same as above, but obtains max_iid from a vtable */
1548 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1549 MonoBasicBlock *false_target)
1551 int max_iid_reg = alloc_preg (cfg);
1553 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1554 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1557 /* Same as above, but obtains max_iid from a klass */
1559 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1560 MonoBasicBlock *false_target)
1562 int max_iid_reg = alloc_preg (cfg);
1564 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1565 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1569 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1571 int idepth_reg = alloc_preg (cfg);
1572 int stypes_reg = alloc_preg (cfg);
1573 int stype = alloc_preg (cfg);
1575 mono_class_setup_supertypes (klass);
1577 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1578 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1579 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1580 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1582 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1583 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1585 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1586 } else if (cfg->compile_aot) {
1587 int const_reg = alloc_preg (cfg);
1588 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1589 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1591 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1593 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1597 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1599 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1603 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1605 int intf_reg = alloc_preg (cfg);
1607 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1608 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1609 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1611 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1613 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1617 * Variant of the above that takes a register to the class, not the vtable.
1620 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1622 int intf_bit_reg = alloc_preg (cfg);
1624 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1625 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1626 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1628 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1630 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1634 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1637 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1638 } else if (cfg->compile_aot) {
1639 int const_reg = alloc_preg (cfg);
1640 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1641 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1643 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1645 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1649 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1651 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1655 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1657 if (cfg->compile_aot) {
1658 int const_reg = alloc_preg (cfg);
1659 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1660 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1662 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1664 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1668 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1671 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1674 int rank_reg = alloc_preg (cfg);
1675 int eclass_reg = alloc_preg (cfg);
1677 g_assert (!klass_inst);
1678 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1679 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1680 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1681 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1682 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1683 if (klass->cast_class == mono_defaults.object_class) {
1684 int parent_reg = alloc_preg (cfg);
1685 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1686 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1687 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1688 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1689 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1690 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1691 } else if (klass->cast_class == mono_defaults.enum_class) {
1692 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1693 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1694 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1696 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1697 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1700 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1701 /* Check that the object is a vector too */
1702 int bounds_reg = alloc_preg (cfg);
1703 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1704 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1705 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1708 int idepth_reg = alloc_preg (cfg);
1709 int stypes_reg = alloc_preg (cfg);
1710 int stype = alloc_preg (cfg);
1712 mono_class_setup_supertypes (klass);
1714 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1715 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1716 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1717 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1719 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1720 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1721 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1726 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1728 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1732 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1736 g_assert (val == 0);
1741 if ((size <= 4) && (size <= align)) {
1744 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1747 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1750 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1752 #if SIZEOF_REGISTER == 8
1754 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1760 val_reg = alloc_preg (cfg);
1762 if (SIZEOF_REGISTER == 8)
1763 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1765 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1768 /* This could be optimized further if neccesary */
1770 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1777 #if !NO_UNALIGNED_ACCESS
1778 if (SIZEOF_REGISTER == 8) {
1780 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1785 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1793 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1798 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1803 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1810 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1817 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1818 g_assert (size < 10000);
1821 /* This could be optimized further if neccesary */
1823 cur_reg = alloc_preg (cfg);
1824 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1825 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1832 #if !NO_UNALIGNED_ACCESS
1833 if (SIZEOF_REGISTER == 8) {
1835 cur_reg = alloc_preg (cfg);
1836 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1837 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1846 cur_reg = alloc_preg (cfg);
1847 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1848 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1854 cur_reg = alloc_preg (cfg);
1855 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1856 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1862 cur_reg = alloc_preg (cfg);
1863 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1864 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1874 * Emit IR to push the current LMF onto the LMF stack.
1877 emit_push_lmf (MonoCompile *cfg)
1880 * Emit IR to push the LMF:
1881 * lmf_addr = <lmf_addr from tls>
1882 * lmf->lmf_addr = lmf_addr
1883 * lmf->prev_lmf = *lmf_addr
1886 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
1887 MonoInst *ins, *lmf_ins;
1892 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
1894 MONO_ADD_INS (cfg->cbb, lmf_ins);
1896 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
1897 lmf_addr_reg = lmf_ins->dreg;
1899 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1900 lmf_reg = ins->dreg;
1902 if (!cfg->lmf_addr_var)
1903 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1904 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, cfg->lmf_addr_var->dreg, lmf_ins->dreg);
1905 prev_lmf_reg = alloc_preg (cfg);
1906 /* Save previous_lmf */
1907 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_addr_reg, 0);
1908 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
1910 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, lmf_reg);
1916 * Emit IR to pop the current LMF from the LMF stack.
1919 emit_pop_lmf (MonoCompile *cfg)
1921 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
1928 * Emit IR to pop the LMF:
1929 * *(lmf->lmf_addr) = lmf->prev_lmf
1931 cfg->cbb = cfg->bb_exit;
1932 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1933 lmf_reg = ins->dreg;
1934 /* This could be called before emit_push_lmf () */
1935 if (!cfg->lmf_addr_var)
1936 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1937 lmf_addr_reg = cfg->lmf_addr_var->dreg;
1938 prev_lmf_reg = alloc_preg (cfg);
1939 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
1940 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
1944 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1947 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1950 type = mini_get_basic_type_from_generic (gsctx, type);
1951 switch (type->type) {
1952 case MONO_TYPE_VOID:
1953 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1956 case MONO_TYPE_BOOLEAN:
1959 case MONO_TYPE_CHAR:
1962 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1966 case MONO_TYPE_FNPTR:
1967 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1968 case MONO_TYPE_CLASS:
1969 case MONO_TYPE_STRING:
1970 case MONO_TYPE_OBJECT:
1971 case MONO_TYPE_SZARRAY:
1972 case MONO_TYPE_ARRAY:
1973 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1976 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1979 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1980 case MONO_TYPE_VALUETYPE:
1981 if (type->data.klass->enumtype) {
1982 type = mono_class_enum_basetype (type->data.klass);
1985 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1986 case MONO_TYPE_TYPEDBYREF:
1987 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1988 case MONO_TYPE_GENERICINST:
1989 type = &type->data.generic_class->container_class->byval_arg;
1992 case MONO_TYPE_MVAR:
1994 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1996 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2002 * target_type_is_incompatible:
2003 * @cfg: MonoCompile context
2005 * Check that the item @arg on the evaluation stack can be stored
2006 * in the target type (can be a local, or field, etc).
2007 * The cfg arg can be used to check if we need verification or just
2010 * Returns: non-0 value if arg can't be stored on a target.
2013 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2015 MonoType *simple_type;
2018 if (target->byref) {
2019 /* FIXME: check that the pointed to types match */
2020 if (arg->type == STACK_MP)
2021 return arg->klass != mono_class_from_mono_type (target);
2022 if (arg->type == STACK_PTR)
2027 simple_type = mono_type_get_underlying_type (target);
2028 switch (simple_type->type) {
2029 case MONO_TYPE_VOID:
2033 case MONO_TYPE_BOOLEAN:
2036 case MONO_TYPE_CHAR:
2039 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2043 /* STACK_MP is needed when setting pinned locals */
2044 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2049 case MONO_TYPE_FNPTR:
2051 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2052 * in native int. (#688008).
2054 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2057 case MONO_TYPE_CLASS:
2058 case MONO_TYPE_STRING:
2059 case MONO_TYPE_OBJECT:
2060 case MONO_TYPE_SZARRAY:
2061 case MONO_TYPE_ARRAY:
2062 if (arg->type != STACK_OBJ)
2064 /* FIXME: check type compatibility */
2068 if (arg->type != STACK_I8)
2073 if (arg->type != STACK_R8)
2076 case MONO_TYPE_VALUETYPE:
2077 if (arg->type != STACK_VTYPE)
2079 klass = mono_class_from_mono_type (simple_type);
2080 if (klass != arg->klass)
2083 case MONO_TYPE_TYPEDBYREF:
2084 if (arg->type != STACK_VTYPE)
2086 klass = mono_class_from_mono_type (simple_type);
2087 if (klass != arg->klass)
2090 case MONO_TYPE_GENERICINST:
2091 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2092 if (arg->type != STACK_VTYPE)
2094 klass = mono_class_from_mono_type (simple_type);
2095 if (klass != arg->klass)
2099 if (arg->type != STACK_OBJ)
2101 /* FIXME: check type compatibility */
2105 case MONO_TYPE_MVAR:
2106 g_assert (cfg->generic_sharing_context);
2107 if (mini_type_var_is_vt (cfg, simple_type)) {
2108 if (arg->type != STACK_VTYPE)
2111 if (arg->type != STACK_OBJ)
2116 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2122 * Prepare arguments for passing to a function call.
2123 * Return a non-zero value if the arguments can't be passed to the given
2125 * The type checks are not yet complete and some conversions may need
2126 * casts on 32 or 64 bit architectures.
2128 * FIXME: implement this using target_type_is_incompatible ()
2131 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2133 MonoType *simple_type;
2137 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2141 for (i = 0; i < sig->param_count; ++i) {
2142 if (sig->params [i]->byref) {
2143 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2147 simple_type = sig->params [i];
2148 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
2150 switch (simple_type->type) {
2151 case MONO_TYPE_VOID:
2156 case MONO_TYPE_BOOLEAN:
2159 case MONO_TYPE_CHAR:
2162 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2168 case MONO_TYPE_FNPTR:
2169 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2172 case MONO_TYPE_CLASS:
2173 case MONO_TYPE_STRING:
2174 case MONO_TYPE_OBJECT:
2175 case MONO_TYPE_SZARRAY:
2176 case MONO_TYPE_ARRAY:
2177 if (args [i]->type != STACK_OBJ)
2182 if (args [i]->type != STACK_I8)
2187 if (args [i]->type != STACK_R8)
2190 case MONO_TYPE_VALUETYPE:
2191 if (simple_type->data.klass->enumtype) {
2192 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2195 if (args [i]->type != STACK_VTYPE)
2198 case MONO_TYPE_TYPEDBYREF:
2199 if (args [i]->type != STACK_VTYPE)
2202 case MONO_TYPE_GENERICINST:
2203 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2206 case MONO_TYPE_MVAR:
2208 if (args [i]->type != STACK_VTYPE)
2212 g_error ("unknown type 0x%02x in check_call_signature",
2220 callvirt_to_call (int opcode)
2225 case OP_VOIDCALLVIRT:
2234 g_assert_not_reached ();
2241 callvirt_to_call_membase (int opcode)
2245 return OP_CALL_MEMBASE;
2246 case OP_VOIDCALLVIRT:
2247 return OP_VOIDCALL_MEMBASE;
2249 return OP_FCALL_MEMBASE;
2251 return OP_LCALL_MEMBASE;
2253 return OP_VCALL_MEMBASE;
2255 g_assert_not_reached ();
2261 #ifdef MONO_ARCH_HAVE_IMT
2262 /* Either METHOD or IMT_ARG needs to be set */
2264 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2268 if (COMPILE_LLVM (cfg)) {
2269 method_reg = alloc_preg (cfg);
2272 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2273 } else if (cfg->compile_aot) {
2274 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2277 MONO_INST_NEW (cfg, ins, OP_PCONST);
2278 ins->inst_p0 = method;
2279 ins->dreg = method_reg;
2280 MONO_ADD_INS (cfg->cbb, ins);
2284 call->imt_arg_reg = method_reg;
2286 #ifdef MONO_ARCH_IMT_REG
2287 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2289 /* Need this to keep the IMT arg alive */
2290 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2295 #ifdef MONO_ARCH_IMT_REG
2296 method_reg = alloc_preg (cfg);
2299 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2300 } else if (cfg->compile_aot) {
2301 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2304 MONO_INST_NEW (cfg, ins, OP_PCONST);
2305 ins->inst_p0 = method;
2306 ins->dreg = method_reg;
2307 MONO_ADD_INS (cfg->cbb, ins);
2310 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2312 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2317 static MonoJumpInfo *
2318 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2320 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2324 ji->data.target = target;
2330 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2332 if (cfg->generic_sharing_context)
2333 return mono_class_check_context_used (klass);
2339 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2341 if (cfg->generic_sharing_context)
2342 return mono_method_check_context_used (method);
2348 * check_method_sharing:
2350 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2353 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2355 gboolean pass_vtable = FALSE;
2356 gboolean pass_mrgctx = FALSE;
2358 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2359 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2360 gboolean sharable = FALSE;
2362 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2365 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2366 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
2367 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2369 sharable = sharing_enabled && context_sharable;
2373 * Pass vtable iff target method might
2374 * be shared, which means that sharing
2375 * is enabled for its class and its
2376 * context is sharable (and it's not a
2379 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2383 if (mini_method_get_context (cmethod) &&
2384 mini_method_get_context (cmethod)->method_inst) {
2385 g_assert (!pass_vtable);
2387 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2390 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2391 MonoGenericContext *context = mini_method_get_context (cmethod);
2392 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2394 if (sharing_enabled && context_sharable)
2396 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, mono_method_signature (cmethod)))
2401 if (out_pass_vtable)
2402 *out_pass_vtable = pass_vtable;
2403 if (out_pass_mrgctx)
2404 *out_pass_mrgctx = pass_mrgctx;
2407 inline static MonoCallInst *
2408 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2409 MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
2412 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2417 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2419 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2422 call->signature = sig;
2423 call->rgctx_reg = rgctx;
2425 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2428 if (mini_type_is_vtype (cfg, sig->ret)) {
2429 call->vret_var = cfg->vret_addr;
2430 //g_assert_not_reached ();
2432 } else if (mini_type_is_vtype (cfg, sig->ret)) {
2433 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2436 temp->backend.is_pinvoke = sig->pinvoke;
2439 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2440 * address of return value to increase optimization opportunities.
2441 * Before vtype decomposition, the dreg of the call ins itself represents the
2442 * fact the call modifies the return value. After decomposition, the call will
2443 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2444 * will be transformed into an LDADDR.
2446 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2447 loada->dreg = alloc_preg (cfg);
2448 loada->inst_p0 = temp;
2449 /* We reference the call too since call->dreg could change during optimization */
2450 loada->inst_p1 = call;
2451 MONO_ADD_INS (cfg->cbb, loada);
2453 call->inst.dreg = temp->dreg;
2455 call->vret_var = loada;
2456 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2457 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2459 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2460 if (COMPILE_SOFT_FLOAT (cfg)) {
2462 * If the call has a float argument, we would need to do an r8->r4 conversion using
2463 * an icall, but that cannot be done during the call sequence since it would clobber
2464 * the call registers + the stack. So we do it before emitting the call.
2466 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2468 MonoInst *in = call->args [i];
2470 if (i >= sig->hasthis)
2471 t = sig->params [i - sig->hasthis];
2473 t = &mono_defaults.int_class->byval_arg;
2474 t = mono_type_get_underlying_type (t);
2476 if (!t->byref && t->type == MONO_TYPE_R4) {
2477 MonoInst *iargs [1];
2481 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2483 /* The result will be in an int vreg */
2484 call->args [i] = conv;
2490 call->need_unbox_trampoline = unbox_trampoline;
2493 if (COMPILE_LLVM (cfg))
2494 mono_llvm_emit_call (cfg, call);
2496 mono_arch_emit_call (cfg, call);
2498 mono_arch_emit_call (cfg, call);
2501 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2502 cfg->flags |= MONO_CFG_HAS_CALLS;
2508 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2510 #ifdef MONO_ARCH_RGCTX_REG
2511 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2512 cfg->uses_rgctx_reg = TRUE;
2513 call->rgctx_reg = TRUE;
2515 call->rgctx_arg_reg = rgctx_reg;
2522 inline static MonoInst*
2523 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2529 rgctx_reg = mono_alloc_preg (cfg);
2530 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2533 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2535 call->inst.sreg1 = addr->dreg;
2538 emit_imt_argument (cfg, call, NULL, imt_arg);
2540 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2543 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2545 return (MonoInst*)call;
2549 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2552 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2554 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2557 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2558 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2560 #ifndef DISABLE_REMOTING
2561 gboolean might_be_remote = FALSE;
2563 gboolean virtual = this != NULL;
2564 gboolean enable_for_aot = TRUE;
2568 gboolean need_unbox_trampoline;
2571 sig = mono_method_signature (method);
2574 rgctx_reg = mono_alloc_preg (cfg);
2575 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2578 if (method->string_ctor) {
2579 /* Create the real signature */
2580 /* FIXME: Cache these */
2581 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2582 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2587 context_used = mini_method_check_context_used (cfg, method);
2589 #ifndef DISABLE_REMOTING
2590 might_be_remote = this && sig->hasthis &&
2591 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2592 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2594 if (might_be_remote && context_used) {
2597 g_assert (cfg->generic_sharing_context);
2599 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2601 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2605 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2607 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2609 #ifndef DISABLE_REMOTING
2610 if (might_be_remote)
2611 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2614 call->method = method;
2615 call->inst.flags |= MONO_INST_HAS_METHOD;
2616 call->inst.inst_left = this;
2617 call->tail_call = tail;
2620 int vtable_reg, slot_reg, this_reg;
2623 this_reg = this->dreg;
2625 if (ARCH_HAVE_DELEGATE_TRAMPOLINES && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2626 MonoInst *dummy_use;
2628 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2630 /* Make a call to delegate->invoke_impl */
2631 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2632 call->inst.inst_basereg = this_reg;
2633 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2634 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2636 /* We must emit a dummy use here because the delegate trampoline will
2637 replace the 'this' argument with the delegate target making this activation
2638 no longer a root for the delegate.
2639 This is an issue for delegates that target collectible code such as dynamic
2640 methods of GC'able assemblies.
2642 For a test case look into #667921.
2644 FIXME: a dummy use is not the best way to do it as the local register allocator
2645 will put it on a caller save register and spil it around the call.
2646 Ideally, we would either put it on a callee save register or only do the store part.
2648 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2650 return (MonoInst*)call;
2653 if ((!cfg->compile_aot || enable_for_aot) &&
2654 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2655 (MONO_METHOD_IS_FINAL (method) &&
2656 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2657 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2659 * the method is not virtual, we just need to ensure this is not null
2660 * and then we can call the method directly.
2662 #ifndef DISABLE_REMOTING
2663 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2665 * The check above ensures method is not gshared, this is needed since
2666 * gshared methods can't have wrappers.
2668 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2672 if (!method->string_ctor)
2673 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2675 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2676 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2678 * the method is virtual, but we can statically dispatch since either
2679 * it's class or the method itself are sealed.
2680 * But first we need to ensure it's not a null reference.
2682 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2684 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2686 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2688 vtable_reg = alloc_preg (cfg);
2689 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2690 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2692 #ifdef MONO_ARCH_HAVE_IMT
2694 guint32 imt_slot = mono_method_get_imt_slot (method);
2695 emit_imt_argument (cfg, call, call->method, imt_arg);
2696 slot_reg = vtable_reg;
2697 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2700 if (slot_reg == -1) {
2701 slot_reg = alloc_preg (cfg);
2702 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2703 offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2706 slot_reg = vtable_reg;
2707 offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2708 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2709 #ifdef MONO_ARCH_HAVE_IMT
2711 g_assert (mono_method_signature (method)->generic_param_count);
2712 emit_imt_argument (cfg, call, call->method, imt_arg);
2717 call->inst.sreg1 = slot_reg;
2718 call->inst.inst_offset = offset;
2719 call->virtual = TRUE;
2723 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2726 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2728 return (MonoInst*)call;
2732 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2734 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this, NULL, NULL);
2738 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2745 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2748 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2750 return (MonoInst*)call;
2754 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2756 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2760 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2764 * mono_emit_abs_call:
2766 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2768 inline static MonoInst*
2769 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2770 MonoMethodSignature *sig, MonoInst **args)
2772 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2776 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2779 if (cfg->abs_patches == NULL)
2780 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2781 g_hash_table_insert (cfg->abs_patches, ji, ji);
2782 ins = mono_emit_native_call (cfg, ji, sig, args);
2783 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2788 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2790 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2791 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2795 * Native code might return non register sized integers
2796 * without initializing the upper bits.
2798 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2799 case OP_LOADI1_MEMBASE:
2800 widen_op = OP_ICONV_TO_I1;
2802 case OP_LOADU1_MEMBASE:
2803 widen_op = OP_ICONV_TO_U1;
2805 case OP_LOADI2_MEMBASE:
2806 widen_op = OP_ICONV_TO_I2;
2808 case OP_LOADU2_MEMBASE:
2809 widen_op = OP_ICONV_TO_U2;
2815 if (widen_op != -1) {
2816 int dreg = alloc_preg (cfg);
2819 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2820 widen->type = ins->type;
2830 get_memcpy_method (void)
2832 static MonoMethod *memcpy_method = NULL;
2833 if (!memcpy_method) {
2834 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2836 g_error ("Old corlib found. Install a new one");
2838 return memcpy_method;
2842 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
2844 MonoClassField *field;
2845 gpointer iter = NULL;
2847 while ((field = mono_class_get_fields (klass, &iter))) {
2850 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2852 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2853 if (mini_type_is_reference (cfg, mono_field_get_type (field))) {
2854 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2855 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2857 MonoClass *field_class = mono_class_from_mono_type (field->type);
2858 if (field_class->has_references)
2859 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
2865 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
2867 int card_table_shift_bits;
2868 gpointer card_table_mask;
2870 MonoInst *dummy_use;
2871 int nursery_shift_bits;
2872 size_t nursery_size;
2873 gboolean has_card_table_wb = FALSE;
2875 if (!cfg->gen_write_barriers)
2878 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2880 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2882 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
2883 has_card_table_wb = TRUE;
2886 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0) {
2889 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2890 wbarrier->sreg1 = ptr->dreg;
2891 wbarrier->sreg2 = value->dreg;
2892 MONO_ADD_INS (cfg->cbb, wbarrier);
2893 } else if (card_table) {
2894 int offset_reg = alloc_preg (cfg);
2895 int card_reg = alloc_preg (cfg);
2898 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2899 if (card_table_mask)
2900 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
2902 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2903 * IMM's larger than 32bits.
2905 if (cfg->compile_aot) {
2906 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
2908 MONO_INST_NEW (cfg, ins, OP_PCONST);
2909 ins->inst_p0 = card_table;
2910 ins->dreg = card_reg;
2911 MONO_ADD_INS (cfg->cbb, ins);
2914 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
2915 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
2917 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2918 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
2921 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
2925 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
2927 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
2928 unsigned need_wb = 0;
2933 /*types with references can't have alignment smaller than sizeof(void*) */
2934 if (align < SIZEOF_VOID_P)
2937 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
2938 if (size > 32 * SIZEOF_VOID_P)
2941 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
2943 /* We don't unroll more than 5 stores to avoid code bloat. */
2944 if (size > 5 * SIZEOF_VOID_P) {
2945 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
2946 size += (SIZEOF_VOID_P - 1);
2947 size &= ~(SIZEOF_VOID_P - 1);
2949 EMIT_NEW_ICONST (cfg, iargs [2], size);
2950 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
2951 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
2955 destreg = iargs [0]->dreg;
2956 srcreg = iargs [1]->dreg;
2959 dest_ptr_reg = alloc_preg (cfg);
2960 tmp_reg = alloc_preg (cfg);
2963 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
2965 while (size >= SIZEOF_VOID_P) {
2966 MonoInst *load_inst;
2967 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
2968 load_inst->dreg = tmp_reg;
2969 load_inst->inst_basereg = srcreg;
2970 load_inst->inst_offset = offset;
2971 MONO_ADD_INS (cfg->cbb, load_inst);
2973 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
2976 emit_write_barrier (cfg, iargs [0], load_inst);
2978 offset += SIZEOF_VOID_P;
2979 size -= SIZEOF_VOID_P;
2982 /*tmp += sizeof (void*)*/
2983 if (size >= SIZEOF_VOID_P) {
2984 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
2985 MONO_ADD_INS (cfg->cbb, iargs [0]);
2989 /* Those cannot be references since size < sizeof (void*) */
2991 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
2992 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
2998 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
2999 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3005 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3006 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3015 * Emit code to copy a valuetype of type @klass whose address is stored in
3016 * @src->dreg to memory whose address is stored at @dest->dreg.
3019 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3021 MonoInst *iargs [4];
3022 int context_used, n;
3024 MonoMethod *memcpy_method;
3025 MonoInst *size_ins = NULL;
3026 MonoInst *memcpy_ins = NULL;
3030 * This check breaks with spilled vars... need to handle it during verification anyway.
3031 * g_assert (klass && klass == src->klass && klass == dest->klass);
3034 if (mini_is_gsharedvt_klass (cfg, klass)) {
3036 context_used = mini_class_check_context_used (cfg, klass);
3037 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3038 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3042 n = mono_class_native_size (klass, &align);
3044 n = mono_class_value_size (klass, &align);
3046 /* if native is true there should be no references in the struct */
3047 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3048 /* Avoid barriers when storing to the stack */
3049 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3050 (dest->opcode == OP_LDADDR))) {
3056 context_used = mini_class_check_context_used (cfg, klass);
3058 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3059 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3061 } else if (context_used) {
3062 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3064 if (cfg->compile_aot) {
3065 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
3067 EMIT_NEW_PCONST (cfg, iargs [2], klass);
3068 mono_class_compute_gc_descriptor (klass);
3073 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3075 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3080 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
3081 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3082 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3087 iargs [2] = size_ins;
3089 EMIT_NEW_ICONST (cfg, iargs [2], n);
3091 memcpy_method = get_memcpy_method ();
3093 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3095 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3100 get_memset_method (void)
3102 static MonoMethod *memset_method = NULL;
3103 if (!memset_method) {
3104 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3106 g_error ("Old corlib found. Install a new one");
3108 return memset_method;
3112 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3114 MonoInst *iargs [3];
3115 int n, context_used;
3117 MonoMethod *memset_method;
3118 MonoInst *size_ins = NULL;
3119 MonoInst *bzero_ins = NULL;
3120 static MonoMethod *bzero_method;
3122 /* FIXME: Optimize this for the case when dest is an LDADDR */
3124 mono_class_init (klass);
3125 if (mini_is_gsharedvt_klass (cfg, klass)) {
3126 context_used = mini_class_check_context_used (cfg, klass);
3127 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3128 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3130 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3131 g_assert (bzero_method);
3133 iargs [1] = size_ins;
3134 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3138 n = mono_class_value_size (klass, &align);
3140 if (n <= sizeof (gpointer) * 5) {
3141 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3144 memset_method = get_memset_method ();
3146 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3147 EMIT_NEW_ICONST (cfg, iargs [2], n);
3148 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3153 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3155 MonoInst *this = NULL;
3157 g_assert (cfg->generic_sharing_context);
3159 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3160 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3161 !method->klass->valuetype)
3162 EMIT_NEW_ARGLOAD (cfg, this, 0);
3164 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3165 MonoInst *mrgctx_loc, *mrgctx_var;
3168 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3170 mrgctx_loc = mono_get_vtable_var (cfg);
3171 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3174 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3175 MonoInst *vtable_loc, *vtable_var;
3179 vtable_loc = mono_get_vtable_var (cfg);
3180 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3182 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3183 MonoInst *mrgctx_var = vtable_var;
3186 vtable_reg = alloc_preg (cfg);
3187 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3188 vtable_var->type = STACK_PTR;
3196 vtable_reg = alloc_preg (cfg);
3197 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3202 static MonoJumpInfoRgctxEntry *
3203 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3205 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3206 res->method = method;
3207 res->in_mrgctx = in_mrgctx;
3208 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3209 res->data->type = patch_type;
3210 res->data->data.target = patch_data;
3211 res->info_type = info_type;
3216 static inline MonoInst*
3217 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3219 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3223 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3224 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3226 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3227 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3229 return emit_rgctx_fetch (cfg, rgctx, entry);
3233 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3234 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3236 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3237 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3239 return emit_rgctx_fetch (cfg, rgctx, entry);
3243 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3244 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3246 MonoJumpInfoGSharedVtCall *call_info;
3247 MonoJumpInfoRgctxEntry *entry;
3250 call_info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3251 call_info->sig = sig;
3252 call_info->method = cmethod;
3254 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3255 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3257 return emit_rgctx_fetch (cfg, rgctx, entry);
3262 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3263 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3265 MonoJumpInfoRgctxEntry *entry;
3268 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3269 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3271 return emit_rgctx_fetch (cfg, rgctx, entry);
3275 * emit_get_rgctx_method:
3277 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3278 * normal constants, else emit a load from the rgctx.
3281 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3282 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3284 if (!context_used) {
3287 switch (rgctx_type) {
3288 case MONO_RGCTX_INFO_METHOD:
3289 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3291 case MONO_RGCTX_INFO_METHOD_RGCTX:
3292 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3295 g_assert_not_reached ();
3298 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3299 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3301 return emit_rgctx_fetch (cfg, rgctx, entry);
3306 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3307 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3309 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3310 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3312 return emit_rgctx_fetch (cfg, rgctx, entry);
3316 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3318 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3319 MonoRuntimeGenericContextInfoTemplate *template;
3324 for (i = 0; i < info->entries->len; ++i) {
3325 MonoRuntimeGenericContextInfoTemplate *otemplate = g_ptr_array_index (info->entries, i);
3327 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3331 template = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate));
3332 template->info_type = rgctx_type;
3333 template->data = data;
3335 idx = info->entries->len;
3337 g_ptr_array_add (info->entries, template);
3343 * emit_get_gsharedvt_info:
3345 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3348 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3353 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3354 /* Load info->entries [idx] */
3355 dreg = alloc_preg (cfg);
3356 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3362 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3364 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3368 * On return the caller must check @klass for load errors.
3371 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3373 MonoInst *vtable_arg;
3377 context_used = mini_class_check_context_used (cfg, klass);
3380 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3381 klass, MONO_RGCTX_INFO_VTABLE);
3383 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3387 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3390 if (COMPILE_LLVM (cfg))
3391 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3393 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3394 #ifdef MONO_ARCH_VTABLE_REG
3395 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3396 cfg->uses_vtable_reg = TRUE;
3403 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc)
3407 if (cfg->gen_seq_points && cfg->method == method) {
3408 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3409 MONO_ADD_INS (cfg->cbb, ins);
3414 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
3416 if (mini_get_debug_options ()->better_cast_details) {
3417 int to_klass_reg = alloc_preg (cfg);
3418 int vtable_reg = alloc_preg (cfg);
3419 int klass_reg = alloc_preg (cfg);
3420 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3423 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3427 MONO_ADD_INS (cfg->cbb, tls_get);
3428 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3429 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3431 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3432 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3433 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3438 reset_cast_details (MonoCompile *cfg)
3440 /* Reset the variables holding the cast details */
3441 if (mini_get_debug_options ()->better_cast_details) {
3442 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3444 MONO_ADD_INS (cfg->cbb, tls_get);
3445 /* It is enough to reset the from field */
3446 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3451 * On return the caller must check @array_class for load errors
3454 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3456 int vtable_reg = alloc_preg (cfg);
3459 context_used = mini_class_check_context_used (cfg, array_class);
3461 save_cast_details (cfg, array_class, obj->dreg);
3463 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3465 if (cfg->opt & MONO_OPT_SHARED) {
3466 int class_reg = alloc_preg (cfg);
3467 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3468 if (cfg->compile_aot) {
3469 int klass_reg = alloc_preg (cfg);
3470 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3471 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3473 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3475 } else if (context_used) {
3476 MonoInst *vtable_ins;
3478 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3479 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3481 if (cfg->compile_aot) {
3485 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3487 vt_reg = alloc_preg (cfg);
3488 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3489 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3492 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3494 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3498 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3500 reset_cast_details (cfg);
3504 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3505 * generic code is generated.
3508 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3510 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3513 MonoInst *rgctx, *addr;
3515 /* FIXME: What if the class is shared? We might not
3516 have to get the address of the method from the
3518 addr = emit_get_rgctx_method (cfg, context_used, method,
3519 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3521 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3523 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3525 gboolean pass_vtable, pass_mrgctx;
3526 MonoInst *rgctx_arg = NULL;
3528 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3529 g_assert (!pass_mrgctx);
3532 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3535 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3538 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3543 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3547 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3548 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3549 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3550 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3552 obj_reg = sp [0]->dreg;
3553 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3554 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3556 /* FIXME: generics */
3557 g_assert (klass->rank == 0);
3560 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3561 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3563 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3564 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3567 MonoInst *element_class;
3569 /* This assertion is from the unboxcast insn */
3570 g_assert (klass->rank == 0);
3572 element_class = emit_get_rgctx_klass (cfg, context_used,
3573 klass->element_class, MONO_RGCTX_INFO_KLASS);
3575 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3576 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3578 save_cast_details (cfg, klass->element_class, obj_reg);
3579 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3580 reset_cast_details (cfg);
3583 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3584 MONO_ADD_INS (cfg->cbb, add);
3585 add->type = STACK_MP;
3592 handle_unbox_gsharedvt (MonoCompile *cfg, int context_used, MonoClass *klass, MonoInst *obj, MonoBasicBlock **out_cbb)
3594 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3595 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3599 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3605 args [1] = klass_inst;
3608 obj = mono_emit_jit_icall (cfg, mono_object_castclass, args);
3610 NEW_BBLOCK (cfg, is_ref_bb);
3611 NEW_BBLOCK (cfg, is_nullable_bb);
3612 NEW_BBLOCK (cfg, end_bb);
3613 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3614 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3615 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3617 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3618 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3620 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3621 addr_reg = alloc_dreg (cfg, STACK_MP);
3625 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3626 MONO_ADD_INS (cfg->cbb, addr);
3628 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3631 MONO_START_BB (cfg, is_ref_bb);
3633 /* Save the ref to a temporary */
3634 dreg = alloc_ireg (cfg);
3635 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3636 addr->dreg = addr_reg;
3637 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3638 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3641 MONO_START_BB (cfg, is_nullable_bb);
3644 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3645 MonoInst *unbox_call;
3646 MonoMethodSignature *unbox_sig;
3649 var = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
3651 unbox_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3652 unbox_sig->ret = &klass->byval_arg;
3653 unbox_sig->param_count = 1;
3654 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3655 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3657 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3658 addr->dreg = addr_reg;
3661 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3664 MONO_START_BB (cfg, end_bb);
3667 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3669 *out_cbb = cfg->cbb;
3675 * Returns NULL and set the cfg exception on error.
3678 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3680 MonoInst *iargs [2];
3686 MonoInst *iargs [2];
3688 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box);
3690 if (cfg->opt & MONO_OPT_SHARED)
3691 rgctx_info = MONO_RGCTX_INFO_KLASS;
3693 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3694 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3696 if (cfg->opt & MONO_OPT_SHARED) {
3697 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3699 alloc_ftn = mono_object_new;
3702 alloc_ftn = mono_object_new_specific;
3705 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED))
3706 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3708 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3711 if (cfg->opt & MONO_OPT_SHARED) {
3712 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3713 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3715 alloc_ftn = mono_object_new;
3716 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3717 /* This happens often in argument checking code, eg. throw new FooException... */
3718 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3719 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3720 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3722 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3723 MonoMethod *managed_alloc = NULL;
3727 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3728 cfg->exception_ptr = klass;
3732 #ifndef MONO_CROSS_COMPILE
3733 managed_alloc = mono_gc_get_managed_allocator (klass, for_box);
3736 if (managed_alloc) {
3737 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3738 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3740 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3742 guint32 lw = vtable->klass->instance_size;
3743 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3744 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3745 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3748 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3752 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3756 * Returns NULL and set the cfg exception on error.
3759 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoBasicBlock **out_cbb)
3761 MonoInst *alloc, *ins;
3763 *out_cbb = cfg->cbb;
3765 if (mono_class_is_nullable (klass)) {
3766 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3769 /* FIXME: What if the class is shared? We might not
3770 have to get the method address from the RGCTX. */
3771 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3772 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3773 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3775 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3777 gboolean pass_vtable, pass_mrgctx;
3778 MonoInst *rgctx_arg = NULL;
3780 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3781 g_assert (!pass_mrgctx);
3784 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3787 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3790 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3794 if (mini_is_gsharedvt_klass (cfg, klass)) {
3795 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3796 MonoInst *res, *is_ref, *src_var, *addr;
3799 dreg = alloc_ireg (cfg);
3801 NEW_BBLOCK (cfg, is_ref_bb);
3802 NEW_BBLOCK (cfg, is_nullable_bb);
3803 NEW_BBLOCK (cfg, end_bb);
3804 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3805 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3806 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3808 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3809 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3812 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3815 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3816 ins->opcode = OP_STOREV_MEMBASE;
3818 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
3819 res->type = STACK_OBJ;
3821 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3824 MONO_START_BB (cfg, is_ref_bb);
3825 addr_reg = alloc_ireg (cfg);
3827 /* val is a vtype, so has to load the value manually */
3828 src_var = get_vreg_to_inst (cfg, val->dreg);
3830 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
3831 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
3832 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
3833 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3836 MONO_START_BB (cfg, is_nullable_bb);
3839 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
3840 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
3842 MonoMethodSignature *box_sig;
3845 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
3846 * construct that method at JIT time, so have to do things by hand.
3848 box_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3849 box_sig->ret = &mono_defaults.object_class->byval_arg;
3850 box_sig->param_count = 1;
3851 box_sig->params [0] = &klass->byval_arg;
3852 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
3853 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
3854 res->type = STACK_OBJ;
3858 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3860 MONO_START_BB (cfg, end_bb);
3862 *out_cbb = cfg->cbb;
3866 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3870 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3877 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
3880 MonoGenericContainer *container;
3881 MonoGenericInst *ginst;
3883 if (klass->generic_class) {
3884 container = klass->generic_class->container_class->generic_container;
3885 ginst = klass->generic_class->context.class_inst;
3886 } else if (klass->generic_container && context_used) {
3887 container = klass->generic_container;
3888 ginst = container->context.class_inst;
3893 for (i = 0; i < container->type_argc; ++i) {
3895 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
3897 type = ginst->type_argv [i];
3898 if (mini_type_is_reference (cfg, type))
3904 // FIXME: This doesn't work yet (class libs tests fail?)
3905 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3908 * Returns NULL and set the cfg exception on error.
3911 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3913 MonoBasicBlock *is_null_bb;
3914 int obj_reg = src->dreg;
3915 int vtable_reg = alloc_preg (cfg);
3916 MonoInst *klass_inst = NULL;
3921 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
3922 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
3923 MonoInst *cache_ins;
3925 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3930 /* klass - it's the second element of the cache entry*/
3931 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3934 args [2] = cache_ins;
3936 return mono_emit_method_call (cfg, mono_castclass, args, NULL);
3939 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3942 NEW_BBLOCK (cfg, is_null_bb);
3944 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3945 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3947 save_cast_details (cfg, klass, obj_reg);
3949 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3950 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3951 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3953 int klass_reg = alloc_preg (cfg);
3955 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3957 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3958 /* the remoting code is broken, access the class for now */
3959 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3960 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3962 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3963 cfg->exception_ptr = klass;
3966 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3968 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3969 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3971 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3973 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3974 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3978 MONO_START_BB (cfg, is_null_bb);
3980 reset_cast_details (cfg);
3986 * Returns NULL and set the cfg exception on error.
3989 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3992 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3993 int obj_reg = src->dreg;
3994 int vtable_reg = alloc_preg (cfg);
3995 int res_reg = alloc_ireg_ref (cfg);
3996 MonoInst *klass_inst = NULL;
4001 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4002 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4003 MonoInst *cache_ins;
4005 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4010 /* klass - it's the second element of the cache entry*/
4011 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4014 args [2] = cache_ins;
4016 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4019 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4022 NEW_BBLOCK (cfg, is_null_bb);
4023 NEW_BBLOCK (cfg, false_bb);
4024 NEW_BBLOCK (cfg, end_bb);
4026 /* Do the assignment at the beginning, so the other assignment can be if converted */
4027 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4028 ins->type = STACK_OBJ;
4031 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4032 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4034 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4036 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4037 g_assert (!context_used);
4038 /* the is_null_bb target simply copies the input register to the output */
4039 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4041 int klass_reg = alloc_preg (cfg);
4044 int rank_reg = alloc_preg (cfg);
4045 int eclass_reg = alloc_preg (cfg);
4047 g_assert (!context_used);
4048 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4049 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4050 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4051 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4052 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
4053 if (klass->cast_class == mono_defaults.object_class) {
4054 int parent_reg = alloc_preg (cfg);
4055 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
4056 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4057 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4058 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4059 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4060 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4061 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4062 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4063 } else if (klass->cast_class == mono_defaults.enum_class) {
4064 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4065 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4066 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4067 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4069 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4070 /* Check that the object is a vector too */
4071 int bounds_reg = alloc_preg (cfg);
4072 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
4073 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4074 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4077 /* the is_null_bb target simply copies the input register to the output */
4078 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4080 } else if (mono_class_is_nullable (klass)) {
4081 g_assert (!context_used);
4082 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4083 /* the is_null_bb target simply copies the input register to the output */
4084 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4086 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4087 g_assert (!context_used);
4088 /* the remoting code is broken, access the class for now */
4089 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4090 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4092 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4093 cfg->exception_ptr = klass;
4096 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4098 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4099 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4101 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4102 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4104 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4105 /* the is_null_bb target simply copies the input register to the output */
4106 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4111 MONO_START_BB (cfg, false_bb);
4113 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4114 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4116 MONO_START_BB (cfg, is_null_bb);
4118 MONO_START_BB (cfg, end_bb);
4124 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4126 /* This opcode takes as input an object reference and a class, and returns:
4127 0) if the object is an instance of the class,
4128 1) if the object is not instance of the class,
4129 2) if the object is a proxy whose type cannot be determined */
4132 #ifndef DISABLE_REMOTING
4133 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4135 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4137 int obj_reg = src->dreg;
4138 int dreg = alloc_ireg (cfg);
4140 #ifndef DISABLE_REMOTING
4141 int klass_reg = alloc_preg (cfg);
4144 NEW_BBLOCK (cfg, true_bb);
4145 NEW_BBLOCK (cfg, false_bb);
4146 NEW_BBLOCK (cfg, end_bb);
4147 #ifndef DISABLE_REMOTING
4148 NEW_BBLOCK (cfg, false2_bb);
4149 NEW_BBLOCK (cfg, no_proxy_bb);
4152 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4153 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4155 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4156 #ifndef DISABLE_REMOTING
4157 NEW_BBLOCK (cfg, interface_fail_bb);
4160 tmp_reg = alloc_preg (cfg);
4161 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4162 #ifndef DISABLE_REMOTING
4163 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4164 MONO_START_BB (cfg, interface_fail_bb);
4165 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4167 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4169 tmp_reg = alloc_preg (cfg);
4170 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4171 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4172 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4174 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4177 #ifndef DISABLE_REMOTING
4178 tmp_reg = alloc_preg (cfg);
4179 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4180 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4182 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4183 tmp_reg = alloc_preg (cfg);
4184 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4185 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4187 tmp_reg = alloc_preg (cfg);
4188 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4189 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4190 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4192 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4193 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4195 MONO_START_BB (cfg, no_proxy_bb);
4197 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4199 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4203 MONO_START_BB (cfg, false_bb);
4205 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4206 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4208 #ifndef DISABLE_REMOTING
4209 MONO_START_BB (cfg, false2_bb);
4211 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4212 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4215 MONO_START_BB (cfg, true_bb);
4217 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4219 MONO_START_BB (cfg, end_bb);
4222 MONO_INST_NEW (cfg, ins, OP_ICONST);
4224 ins->type = STACK_I4;
4230 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4232 /* This opcode takes as input an object reference and a class, and returns:
4233 0) if the object is an instance of the class,
4234 1) if the object is a proxy whose type cannot be determined
4235 an InvalidCastException exception is thrown otherwhise*/
4238 #ifndef DISABLE_REMOTING
4239 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4241 MonoBasicBlock *ok_result_bb;
4243 int obj_reg = src->dreg;
4244 int dreg = alloc_ireg (cfg);
4245 int tmp_reg = alloc_preg (cfg);
4247 #ifndef DISABLE_REMOTING
4248 int klass_reg = alloc_preg (cfg);
4249 NEW_BBLOCK (cfg, end_bb);
4252 NEW_BBLOCK (cfg, ok_result_bb);
4254 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4255 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4257 save_cast_details (cfg, klass, obj_reg);
4259 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4260 #ifndef DISABLE_REMOTING
4261 NEW_BBLOCK (cfg, interface_fail_bb);
4263 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4264 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4265 MONO_START_BB (cfg, interface_fail_bb);
4266 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4268 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4270 tmp_reg = alloc_preg (cfg);
4271 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4272 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4273 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4275 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4276 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4278 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4279 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4280 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4283 #ifndef DISABLE_REMOTING
4284 NEW_BBLOCK (cfg, no_proxy_bb);
4286 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4287 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4288 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4290 tmp_reg = alloc_preg (cfg);
4291 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4292 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4294 tmp_reg = alloc_preg (cfg);
4295 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4296 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4297 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4299 NEW_BBLOCK (cfg, fail_1_bb);
4301 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4303 MONO_START_BB (cfg, fail_1_bb);
4305 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4306 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4308 MONO_START_BB (cfg, no_proxy_bb);
4310 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4312 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4316 MONO_START_BB (cfg, ok_result_bb);
4318 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4320 #ifndef DISABLE_REMOTING
4321 MONO_START_BB (cfg, end_bb);
4325 MONO_INST_NEW (cfg, ins, OP_ICONST);
4327 ins->type = STACK_I4;
4333 * Returns NULL and set the cfg exception on error.
4335 static G_GNUC_UNUSED MonoInst*
4336 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
4340 gpointer *trampoline;
4341 MonoInst *obj, *method_ins, *tramp_ins;
4345 obj = handle_alloc (cfg, klass, FALSE, 0);
4349 /* Inline the contents of mono_delegate_ctor */
4351 /* Set target field */
4352 /* Optimize away setting of NULL target */
4353 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
4354 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4355 if (cfg->gen_write_barriers) {
4356 dreg = alloc_preg (cfg);
4357 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target));
4358 emit_write_barrier (cfg, ptr, target);
4362 /* Set method field */
4363 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4364 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4365 if (cfg->gen_write_barriers) {
4366 dreg = alloc_preg (cfg);
4367 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method));
4368 emit_write_barrier (cfg, ptr, method_ins);
4371 * To avoid looking up the compiled code belonging to the target method
4372 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4373 * store it, and we fill it after the method has been compiled.
4375 if (!cfg->compile_aot && !method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4376 MonoInst *code_slot_ins;
4379 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4381 domain = mono_domain_get ();
4382 mono_domain_lock (domain);
4383 if (!domain_jit_info (domain)->method_code_hash)
4384 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4385 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4387 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
4388 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4390 mono_domain_unlock (domain);
4392 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
4394 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4397 /* Set invoke_impl field */
4398 if (cfg->compile_aot) {
4399 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
4401 trampoline = mono_create_delegate_trampoline (cfg->domain, klass);
4402 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4404 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4406 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4412 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4414 MonoJitICallInfo *info;
4416 /* Need to register the icall so it gets an icall wrapper */
4417 info = mono_get_array_new_va_icall (rank);
4419 cfg->flags |= MONO_CFG_HAS_VARARGS;
4421 /* mono_array_new_va () needs a vararg calling convention */
4422 cfg->disable_llvm = TRUE;
4424 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4425 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4429 mono_emit_load_got_addr (MonoCompile *cfg)
4431 MonoInst *getaddr, *dummy_use;
4433 if (!cfg->got_var || cfg->got_var_allocated)
4436 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
4437 getaddr->cil_code = cfg->header->code;
4438 getaddr->dreg = cfg->got_var->dreg;
4440 /* Add it to the start of the first bblock */
4441 if (cfg->bb_entry->code) {
4442 getaddr->next = cfg->bb_entry->code;
4443 cfg->bb_entry->code = getaddr;
4446 MONO_ADD_INS (cfg->bb_entry, getaddr);
4448 cfg->got_var_allocated = TRUE;
4451 * Add a dummy use to keep the got_var alive, since real uses might
4452 * only be generated by the back ends.
4453 * Add it to end_bblock, so the variable's lifetime covers the whole
4455 * It would be better to make the usage of the got var explicit in all
4456 * cases when the backend needs it (i.e. calls, throw etc.), so this
4457 * wouldn't be needed.
4459 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
4460 MONO_ADD_INS (cfg->bb_exit, dummy_use);
4463 static int inline_limit;
4464 static gboolean inline_limit_inited;
4467 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
4469 MonoMethodHeaderSummary header;
4471 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4472 MonoMethodSignature *sig = mono_method_signature (method);
4476 if (cfg->generic_sharing_context)
4479 if (cfg->inline_depth > 10)
4482 #ifdef MONO_ARCH_HAVE_LMF_OPS
4483 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
4484 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
4485 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
4490 if (!mono_method_get_header_summary (method, &header))
4493 /*runtime, icall and pinvoke are checked by summary call*/
4494 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
4495 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
4496 (mono_class_is_marshalbyref (method->klass)) ||
4500 /* also consider num_locals? */
4501 /* Do the size check early to avoid creating vtables */
4502 if (!inline_limit_inited) {
4503 if (getenv ("MONO_INLINELIMIT"))
4504 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
4506 inline_limit = INLINE_LENGTH_LIMIT;
4507 inline_limit_inited = TRUE;
4509 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
4513 * if we can initialize the class of the method right away, we do,
4514 * otherwise we don't allow inlining if the class needs initialization,
4515 * since it would mean inserting a call to mono_runtime_class_init()
4516 * inside the inlined code
4518 if (!(cfg->opt & MONO_OPT_SHARED)) {
4519 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
4520 if (cfg->run_cctors && method->klass->has_cctor) {
4521 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
4522 if (!method->klass->runtime_info)
4523 /* No vtable created yet */
4525 vtable = mono_class_vtable (cfg->domain, method->klass);
4528 /* This makes so that inline cannot trigger */
4529 /* .cctors: too many apps depend on them */
4530 /* running with a specific order... */
4531 if (! vtable->initialized)
4533 mono_runtime_class_init (vtable);
4535 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
4536 if (!method->klass->runtime_info)
4537 /* No vtable created yet */
4539 vtable = mono_class_vtable (cfg->domain, method->klass);
4542 if (!vtable->initialized)
4547 * If we're compiling for shared code
4548 * the cctor will need to be run at aot method load time, for example,
4549 * or at the end of the compilation of the inlining method.
4551 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
4556 * CAS - do not inline methods with declarative security
4557 * Note: this has to be before any possible return TRUE;
4559 if (mono_security_method_has_declsec (method))
4562 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4563 if (mono_arch_is_soft_float ()) {
4565 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4567 for (i = 0; i < sig->param_count; ++i)
4568 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4577 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
4579 if (vtable->initialized && !cfg->compile_aot)
4582 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
4585 if (!mono_class_needs_cctor_run (vtable->klass, method))
4588 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
4589 /* The initialization is already done before the method is called */
4596 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4600 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4603 if (mini_is_gsharedvt_klass (cfg, klass)) {
4606 mono_class_init (klass);
4607 size = mono_class_array_element_size (klass);
4610 mult_reg = alloc_preg (cfg);
4611 array_reg = arr->dreg;
4612 index_reg = index->dreg;
4614 #if SIZEOF_REGISTER == 8
4615 /* The array reg is 64 bits but the index reg is only 32 */
4616 if (COMPILE_LLVM (cfg)) {
4618 index2_reg = index_reg;
4620 index2_reg = alloc_preg (cfg);
4621 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4624 if (index->type == STACK_I8) {
4625 index2_reg = alloc_preg (cfg);
4626 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4628 index2_reg = index_reg;
4633 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4635 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4636 if (size == 1 || size == 2 || size == 4 || size == 8) {
4637 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4639 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
4640 ins->klass = mono_class_get_element_class (klass);
4641 ins->type = STACK_MP;
4647 add_reg = alloc_ireg_mp (cfg);
4650 MonoInst *rgctx_ins;
4653 g_assert (cfg->generic_sharing_context);
4654 context_used = mini_class_check_context_used (cfg, klass);
4655 g_assert (context_used);
4656 rgctx_ins = emit_get_gsharedvt_info (cfg, &klass->byval_arg, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
4657 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
4659 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4661 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4662 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4663 ins->klass = mono_class_get_element_class (klass);
4664 ins->type = STACK_MP;
4665 MONO_ADD_INS (cfg->cbb, ins);
4670 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4672 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4674 int bounds_reg = alloc_preg (cfg);
4675 int add_reg = alloc_ireg_mp (cfg);
4676 int mult_reg = alloc_preg (cfg);
4677 int mult2_reg = alloc_preg (cfg);
4678 int low1_reg = alloc_preg (cfg);
4679 int low2_reg = alloc_preg (cfg);
4680 int high1_reg = alloc_preg (cfg);
4681 int high2_reg = alloc_preg (cfg);
4682 int realidx1_reg = alloc_preg (cfg);
4683 int realidx2_reg = alloc_preg (cfg);
4684 int sum_reg = alloc_preg (cfg);
4685 int index1, index2, tmpreg;
4689 mono_class_init (klass);
4690 size = mono_class_array_element_size (klass);
4692 index1 = index_ins1->dreg;
4693 index2 = index_ins2->dreg;
4695 #if SIZEOF_REGISTER == 8
4696 /* The array reg is 64 bits but the index reg is only 32 */
4697 if (COMPILE_LLVM (cfg)) {
4700 tmpreg = alloc_preg (cfg);
4701 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
4703 tmpreg = alloc_preg (cfg);
4704 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
4708 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
4712 /* range checking */
4713 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4714 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4716 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4717 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4718 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4719 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4720 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4721 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4722 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4724 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4725 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4726 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4727 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4728 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
4729 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4730 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4732 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4733 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4734 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4735 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4736 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4738 ins->type = STACK_MP;
4740 MONO_ADD_INS (cfg->cbb, ins);
4747 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4751 MonoMethod *addr_method;
4754 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4757 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
4759 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4760 /* emit_ldelema_2 depends on OP_LMUL */
4761 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
4762 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
4766 element_size = mono_class_array_element_size (cmethod->klass->element_class);
4767 addr_method = mono_marshal_get_array_address (rank, element_size);
4768 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4773 static MonoBreakPolicy
4774 always_insert_breakpoint (MonoMethod *method)
4776 return MONO_BREAK_POLICY_ALWAYS;
4779 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4782 * mono_set_break_policy:
4783 * policy_callback: the new callback function
4785 * Allow embedders to decide wherther to actually obey breakpoint instructions
4786 * (both break IL instructions and Debugger.Break () method calls), for example
4787 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4788 * untrusted or semi-trusted code.
4790 * @policy_callback will be called every time a break point instruction needs to
4791 * be inserted with the method argument being the method that calls Debugger.Break()
4792 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4793 * if it wants the breakpoint to not be effective in the given method.
4794 * #MONO_BREAK_POLICY_ALWAYS is the default.
4797 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4799 if (policy_callback)
4800 break_policy_func = policy_callback;
4802 break_policy_func = always_insert_breakpoint;
4806 should_insert_brekpoint (MonoMethod *method) {
4807 switch (break_policy_func (method)) {
4808 case MONO_BREAK_POLICY_ALWAYS:
4810 case MONO_BREAK_POLICY_NEVER:
4812 case MONO_BREAK_POLICY_ON_DBG:
4813 return mono_debug_using_mono_debugger ();
4815 g_warning ("Incorrect value returned from break policy callback");
4820 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4822 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4824 MonoInst *addr, *store, *load;
4825 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4827 /* the bounds check is already done by the callers */
4828 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4830 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4831 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4832 if (mini_type_is_reference (cfg, fsig->params [2]))
4833 emit_write_barrier (cfg, addr, load);
4835 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4836 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4843 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4845 return mini_type_is_reference (cfg, &klass->byval_arg);
4849 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
4851 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
4852 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
4853 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
4854 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
4855 MonoInst *iargs [3];
4858 mono_class_setup_vtable (obj_array);
4859 g_assert (helper->slot);
4861 if (sp [0]->type != STACK_OBJ)
4863 if (sp [2]->type != STACK_OBJ)
4870 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
4874 if (mini_is_gsharedvt_klass (cfg, klass)) {
4877 // FIXME-VT: OP_ICONST optimization
4878 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
4879 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
4880 ins->opcode = OP_STOREV_MEMBASE;
4881 } else if (sp [1]->opcode == OP_ICONST) {
4882 int array_reg = sp [0]->dreg;
4883 int index_reg = sp [1]->dreg;
4884 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
4887 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
4888 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
4890 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
4891 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
4892 if (generic_class_is_reference_type (cfg, klass))
4893 emit_write_barrier (cfg, addr, sp [2]);
4900 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4905 eklass = mono_class_from_mono_type (fsig->params [2]);
4907 eklass = mono_class_from_mono_type (fsig->ret);
4911 return emit_array_store (cfg, eklass, args, FALSE);
4913 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4914 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
4920 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4922 MonoInst *ins = NULL;
4923 #ifdef MONO_ARCH_SIMD_INTRINSICS
4924 if (cfg->opt & MONO_OPT_SIMD) {
4925 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4935 emit_memory_barrier (MonoCompile *cfg, int kind)
4937 MonoInst *ins = NULL;
4938 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4939 MONO_ADD_INS (cfg->cbb, ins);
4940 ins->backend.memory_barrier_kind = kind;
4946 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4948 MonoInst *ins = NULL;
4951 /* The LLVM backend supports these intrinsics */
4952 if (cmethod->klass == mono_defaults.math_class) {
4953 if (strcmp (cmethod->name, "Sin") == 0) {
4955 } else if (strcmp (cmethod->name, "Cos") == 0) {
4957 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
4959 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
4964 MONO_INST_NEW (cfg, ins, opcode);
4965 ins->type = STACK_R8;
4966 ins->dreg = mono_alloc_freg (cfg);
4967 ins->sreg1 = args [0]->dreg;
4968 MONO_ADD_INS (cfg->cbb, ins);
4972 if (cfg->opt & MONO_OPT_CMOV) {
4973 if (strcmp (cmethod->name, "Min") == 0) {
4974 if (fsig->params [0]->type == MONO_TYPE_I4)
4976 if (fsig->params [0]->type == MONO_TYPE_U4)
4977 opcode = OP_IMIN_UN;
4978 else if (fsig->params [0]->type == MONO_TYPE_I8)
4980 else if (fsig->params [0]->type == MONO_TYPE_U8)
4981 opcode = OP_LMIN_UN;
4982 } else if (strcmp (cmethod->name, "Max") == 0) {
4983 if (fsig->params [0]->type == MONO_TYPE_I4)
4985 if (fsig->params [0]->type == MONO_TYPE_U4)
4986 opcode = OP_IMAX_UN;
4987 else if (fsig->params [0]->type == MONO_TYPE_I8)
4989 else if (fsig->params [0]->type == MONO_TYPE_U8)
4990 opcode = OP_LMAX_UN;
4995 MONO_INST_NEW (cfg, ins, opcode);
4996 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
4997 ins->dreg = mono_alloc_ireg (cfg);
4998 ins->sreg1 = args [0]->dreg;
4999 ins->sreg2 = args [1]->dreg;
5000 MONO_ADD_INS (cfg->cbb, ins);
5008 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5010 if (cmethod->klass == mono_defaults.array_class) {
5011 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5012 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5013 if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5014 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5021 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5023 MonoInst *ins = NULL;
5025 static MonoClass *runtime_helpers_class = NULL;
5026 if (! runtime_helpers_class)
5027 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
5028 "System.Runtime.CompilerServices", "RuntimeHelpers");
5030 if (cmethod->klass == mono_defaults.string_class) {
5031 if (strcmp (cmethod->name, "get_Chars") == 0) {
5032 int dreg = alloc_ireg (cfg);
5033 int index_reg = alloc_preg (cfg);
5034 int mult_reg = alloc_preg (cfg);
5035 int add_reg = alloc_preg (cfg);
5037 #if SIZEOF_REGISTER == 8
5038 /* The array reg is 64 bits but the index reg is only 32 */
5039 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5041 index_reg = args [1]->dreg;
5043 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5045 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5046 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
5047 add_reg = ins->dreg;
5048 /* Avoid a warning */
5050 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5053 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5054 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5055 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5056 add_reg, G_STRUCT_OFFSET (MonoString, chars));
5058 type_from_op (ins, NULL, NULL);
5060 } else if (strcmp (cmethod->name, "get_Length") == 0) {
5061 int dreg = alloc_ireg (cfg);
5062 /* Decompose later to allow more optimizations */
5063 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5064 ins->type = STACK_I4;
5065 ins->flags |= MONO_INST_FAULT;
5066 cfg->cbb->has_array_access = TRUE;
5067 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5070 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
5071 int mult_reg = alloc_preg (cfg);
5072 int add_reg = alloc_preg (cfg);
5074 /* The corlib functions check for oob already. */
5075 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
5076 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5077 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
5078 return cfg->cbb->last_ins;
5081 } else if (cmethod->klass == mono_defaults.object_class) {
5083 if (strcmp (cmethod->name, "GetType") == 0) {
5084 int dreg = alloc_ireg_ref (cfg);
5085 int vt_reg = alloc_preg (cfg);
5086 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
5087 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
5088 type_from_op (ins, NULL, NULL);
5091 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
5092 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
5093 int dreg = alloc_ireg (cfg);
5094 int t1 = alloc_ireg (cfg);
5096 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5097 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5098 ins->type = STACK_I4;
5102 } else if (strcmp (cmethod->name, ".ctor") == 0) {
5103 MONO_INST_NEW (cfg, ins, OP_NOP);
5104 MONO_ADD_INS (cfg->cbb, ins);
5108 } else if (cmethod->klass == mono_defaults.array_class) {
5109 if (!cfg->gsharedvt && strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
5110 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
5112 #ifndef MONO_BIG_ARRAYS
5114 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5117 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5118 int dreg = alloc_ireg (cfg);
5119 int bounds_reg = alloc_ireg_mp (cfg);
5120 MonoBasicBlock *end_bb, *szarray_bb;
5121 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5123 NEW_BBLOCK (cfg, end_bb);
5124 NEW_BBLOCK (cfg, szarray_bb);
5126 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5127 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
5128 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5129 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5130 /* Non-szarray case */
5132 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5133 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
5135 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5136 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5137 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5138 MONO_START_BB (cfg, szarray_bb);
5141 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5142 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
5144 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5145 MONO_START_BB (cfg, end_bb);
5147 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5148 ins->type = STACK_I4;
5154 if (cmethod->name [0] != 'g')
5157 if (strcmp (cmethod->name, "get_Rank") == 0) {
5158 int dreg = alloc_ireg (cfg);
5159 int vtable_reg = alloc_preg (cfg);
5160 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5161 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
5162 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5163 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
5164 type_from_op (ins, NULL, NULL);
5167 } else if (strcmp (cmethod->name, "get_Length") == 0) {
5168 int dreg = alloc_ireg (cfg);
5170 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5171 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
5172 type_from_op (ins, NULL, NULL);
5177 } else if (cmethod->klass == runtime_helpers_class) {
5179 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
5180 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
5184 } else if (cmethod->klass == mono_defaults.thread_class) {
5185 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
5186 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5187 MONO_ADD_INS (cfg->cbb, ins);
5189 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
5190 return emit_memory_barrier (cfg, FullBarrier);
5192 } else if (cmethod->klass == mono_defaults.monitor_class) {
5194 /* FIXME this should be integrated to the check below once we support the trampoline version */
5195 #if defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
5196 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) {
5197 MonoMethod *fast_method = NULL;
5199 /* Avoid infinite recursion */
5200 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN && !strcmp (cfg->method->name, "FastMonitorEnterV4"))
5203 fast_method = mono_monitor_get_fast_path (cmethod);
5207 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
5211 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
5212 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
5215 if (COMPILE_LLVM (cfg)) {
5217 * Pass the argument normally, the LLVM backend will handle the
5218 * calling convention problems.
5220 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5222 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
5223 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5224 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5225 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5228 return (MonoInst*)call;
5229 } else if (strcmp (cmethod->name, "Exit") == 0) {
5232 if (COMPILE_LLVM (cfg)) {
5233 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5235 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
5236 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5237 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5238 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5241 return (MonoInst*)call;
5243 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
5245 MonoMethod *fast_method = NULL;
5247 /* Avoid infinite recursion */
5248 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
5249 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
5250 strcmp (cfg->method->name, "FastMonitorExit") == 0))
5253 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) ||
5254 strcmp (cmethod->name, "Exit") == 0)
5255 fast_method = mono_monitor_get_fast_path (cmethod);
5259 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
5262 } else if (cmethod->klass->image == mono_defaults.corlib &&
5263 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5264 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
5267 #if SIZEOF_REGISTER == 8
5268 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
5269 /* 64 bit reads are already atomic */
5270 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
5271 ins->dreg = mono_alloc_preg (cfg);
5272 ins->inst_basereg = args [0]->dreg;
5273 ins->inst_offset = 0;
5274 MONO_ADD_INS (cfg->cbb, ins);
5278 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
5279 if (strcmp (cmethod->name, "Increment") == 0) {
5280 MonoInst *ins_iconst;
5283 if (fsig->params [0]->type == MONO_TYPE_I4)
5284 opcode = OP_ATOMIC_ADD_NEW_I4;
5285 #if SIZEOF_REGISTER == 8
5286 else if (fsig->params [0]->type == MONO_TYPE_I8)
5287 opcode = OP_ATOMIC_ADD_NEW_I8;
5290 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5291 ins_iconst->inst_c0 = 1;
5292 ins_iconst->dreg = mono_alloc_ireg (cfg);
5293 MONO_ADD_INS (cfg->cbb, ins_iconst);
5295 MONO_INST_NEW (cfg, ins, opcode);
5296 ins->dreg = mono_alloc_ireg (cfg);
5297 ins->inst_basereg = args [0]->dreg;
5298 ins->inst_offset = 0;
5299 ins->sreg2 = ins_iconst->dreg;
5300 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5301 MONO_ADD_INS (cfg->cbb, ins);
5303 } else if (strcmp (cmethod->name, "Decrement") == 0) {
5304 MonoInst *ins_iconst;
5307 if (fsig->params [0]->type == MONO_TYPE_I4)
5308 opcode = OP_ATOMIC_ADD_NEW_I4;
5309 #if SIZEOF_REGISTER == 8
5310 else if (fsig->params [0]->type == MONO_TYPE_I8)
5311 opcode = OP_ATOMIC_ADD_NEW_I8;
5314 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5315 ins_iconst->inst_c0 = -1;
5316 ins_iconst->dreg = mono_alloc_ireg (cfg);
5317 MONO_ADD_INS (cfg->cbb, ins_iconst);
5319 MONO_INST_NEW (cfg, ins, opcode);
5320 ins->dreg = mono_alloc_ireg (cfg);
5321 ins->inst_basereg = args [0]->dreg;
5322 ins->inst_offset = 0;
5323 ins->sreg2 = ins_iconst->dreg;
5324 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5325 MONO_ADD_INS (cfg->cbb, ins);
5327 } else if (strcmp (cmethod->name, "Add") == 0) {
5330 if (fsig->params [0]->type == MONO_TYPE_I4)
5331 opcode = OP_ATOMIC_ADD_NEW_I4;
5332 #if SIZEOF_REGISTER == 8
5333 else if (fsig->params [0]->type == MONO_TYPE_I8)
5334 opcode = OP_ATOMIC_ADD_NEW_I8;
5338 MONO_INST_NEW (cfg, ins, opcode);
5339 ins->dreg = mono_alloc_ireg (cfg);
5340 ins->inst_basereg = args [0]->dreg;
5341 ins->inst_offset = 0;
5342 ins->sreg2 = args [1]->dreg;
5343 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5344 MONO_ADD_INS (cfg->cbb, ins);
5347 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
5349 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
5350 if (strcmp (cmethod->name, "Exchange") == 0) {
5352 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
5354 if (fsig->params [0]->type == MONO_TYPE_I4)
5355 opcode = OP_ATOMIC_EXCHANGE_I4;
5356 #if SIZEOF_REGISTER == 8
5357 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
5358 (fsig->params [0]->type == MONO_TYPE_I))
5359 opcode = OP_ATOMIC_EXCHANGE_I8;
5361 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
5362 opcode = OP_ATOMIC_EXCHANGE_I4;
5367 MONO_INST_NEW (cfg, ins, opcode);
5368 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
5369 ins->inst_basereg = args [0]->dreg;
5370 ins->inst_offset = 0;
5371 ins->sreg2 = args [1]->dreg;
5372 MONO_ADD_INS (cfg->cbb, ins);
5374 switch (fsig->params [0]->type) {
5376 ins->type = STACK_I4;
5380 ins->type = STACK_I8;
5382 case MONO_TYPE_OBJECT:
5383 ins->type = STACK_OBJ;
5386 g_assert_not_reached ();
5389 if (cfg->gen_write_barriers && is_ref)
5390 emit_write_barrier (cfg, args [0], args [1]);
5392 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
5394 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
5395 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
5397 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
5398 if (fsig->params [1]->type == MONO_TYPE_I4)
5400 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
5401 size = sizeof (gpointer);
5402 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
5405 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
5406 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5407 ins->sreg1 = args [0]->dreg;
5408 ins->sreg2 = args [1]->dreg;
5409 ins->sreg3 = args [2]->dreg;
5410 ins->type = STACK_I4;
5411 MONO_ADD_INS (cfg->cbb, ins);
5412 } else if (size == 8) {
5413 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
5414 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5415 ins->sreg1 = args [0]->dreg;
5416 ins->sreg2 = args [1]->dreg;
5417 ins->sreg3 = args [2]->dreg;
5418 ins->type = STACK_I8;
5419 MONO_ADD_INS (cfg->cbb, ins);
5421 /* g_assert_not_reached (); */
5423 if (cfg->gen_write_barriers && is_ref)
5424 emit_write_barrier (cfg, args [0], args [1]);
5426 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
5428 if (strcmp (cmethod->name, "MemoryBarrier") == 0)
5429 ins = emit_memory_barrier (cfg, FullBarrier);
5433 } else if (cmethod->klass->image == mono_defaults.corlib) {
5434 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
5435 && strcmp (cmethod->klass->name, "Debugger") == 0) {
5436 if (should_insert_brekpoint (cfg->method)) {
5437 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
5439 MONO_INST_NEW (cfg, ins, OP_NOP);
5440 MONO_ADD_INS (cfg->cbb, ins);
5444 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
5445 && strcmp (cmethod->klass->name, "Environment") == 0) {
5447 EMIT_NEW_ICONST (cfg, ins, 1);
5449 EMIT_NEW_ICONST (cfg, ins, 0);
5453 } else if (cmethod->klass == mono_defaults.math_class) {
5455 * There is general branches code for Min/Max, but it does not work for
5457 * http://everything2.com/?node_id=1051618
5461 #ifdef MONO_ARCH_SIMD_INTRINSICS
5462 if (cfg->opt & MONO_OPT_SIMD) {
5463 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5469 if (COMPILE_LLVM (cfg)) {
5470 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
5475 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
5479 * This entry point could be used later for arbitrary method
5482 inline static MonoInst*
5483 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
5484 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
5486 if (method->klass == mono_defaults.string_class) {
5487 /* managed string allocation support */
5488 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
5489 MonoInst *iargs [2];
5490 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
5491 MonoMethod *managed_alloc = NULL;
5493 g_assert (vtable); /*Should not fail since it System.String*/
5494 #ifndef MONO_CROSS_COMPILE
5495 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE);
5499 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
5500 iargs [1] = args [0];
5501 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
5508 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
5510 MonoInst *store, *temp;
5513 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
5514 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
5517 * FIXME: We should use *args++ = sp [0], but that would mean the arg
5518 * would be different than the MonoInst's used to represent arguments, and
5519 * the ldelema implementation can't deal with that.
5520 * Solution: When ldelema is used on an inline argument, create a var for
5521 * it, emit ldelema on that var, and emit the saving code below in
5522 * inline_method () if needed.
5524 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
5525 cfg->args [i] = temp;
5526 /* This uses cfg->args [i] which is set by the preceeding line */
5527 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
5528 store->cil_code = sp [0]->cil_code;
5533 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
5534 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
5536 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5538 check_inline_called_method_name_limit (MonoMethod *called_method)
5541 static char *limit = NULL;
5543 if (limit == NULL) {
5544 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
5546 if (limit_string != NULL)
5547 limit = limit_string;
5549 limit = (char *) "";
5552 if (limit [0] != '\0') {
5553 char *called_method_name = mono_method_full_name (called_method, TRUE);
5555 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
5556 g_free (called_method_name);
5558 //return (strncmp_result <= 0);
5559 return (strncmp_result == 0);
5566 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5568 check_inline_caller_method_name_limit (MonoMethod *caller_method)
5571 static char *limit = NULL;
5573 if (limit == NULL) {
5574 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
5575 if (limit_string != NULL) {
5576 limit = limit_string;
5578 limit = (char *) "";
5582 if (limit [0] != '\0') {
5583 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
5585 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
5586 g_free (caller_method_name);
5588 //return (strncmp_result <= 0);
5589 return (strncmp_result == 0);
5597 emit_init_rvar (MonoCompile *cfg, MonoInst *rvar, MonoType *rtype)
5599 static double r8_0 = 0.0;
5602 switch (rvar->type) {
5604 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
5607 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
5612 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
5615 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5616 ins->type = STACK_R8;
5617 ins->inst_p0 = (void*)&r8_0;
5618 ins->dreg = rvar->dreg;
5619 MONO_ADD_INS (cfg->cbb, ins);
5622 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (rtype));
5625 g_assert_not_reached ();
5630 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
5631 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_always)
5633 MonoInst *ins, *rvar = NULL;
5634 MonoMethodHeader *cheader;
5635 MonoBasicBlock *ebblock, *sbblock;
5637 MonoMethod *prev_inlined_method;
5638 MonoInst **prev_locals, **prev_args;
5639 MonoType **prev_arg_types;
5640 guint prev_real_offset;
5641 GHashTable *prev_cbb_hash;
5642 MonoBasicBlock **prev_cil_offset_to_bb;
5643 MonoBasicBlock *prev_cbb;
5644 unsigned char* prev_cil_start;
5645 guint32 prev_cil_offset_to_bb_len;
5646 MonoMethod *prev_current_method;
5647 MonoGenericContext *prev_generic_context;
5648 gboolean ret_var_set, prev_ret_var_set, virtual = FALSE;
5650 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
5652 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5653 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
5656 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5657 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
5661 if (cfg->verbose_level > 2)
5662 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
5664 if (!cmethod->inline_info) {
5665 cfg->stat_inlineable_methods++;
5666 cmethod->inline_info = 1;
5669 /* allocate local variables */
5670 cheader = mono_method_get_header (cmethod);
5672 if (cheader == NULL || mono_loader_get_last_error ()) {
5673 MonoLoaderError *error = mono_loader_get_last_error ();
5676 mono_metadata_free_mh (cheader);
5677 if (inline_always && error)
5678 mono_cfg_set_exception (cfg, error->exception_type);
5680 mono_loader_clear_error ();
5684 /*Must verify before creating locals as it can cause the JIT to assert.*/
5685 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
5686 mono_metadata_free_mh (cheader);
5690 /* allocate space to store the return value */
5691 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
5692 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
5695 prev_locals = cfg->locals;
5696 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
5697 for (i = 0; i < cheader->num_locals; ++i)
5698 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
5700 /* allocate start and end blocks */
5701 /* This is needed so if the inline is aborted, we can clean up */
5702 NEW_BBLOCK (cfg, sbblock);
5703 sbblock->real_offset = real_offset;
5705 NEW_BBLOCK (cfg, ebblock);
5706 ebblock->block_num = cfg->num_bblocks++;
5707 ebblock->real_offset = real_offset;
5709 prev_args = cfg->args;
5710 prev_arg_types = cfg->arg_types;
5711 prev_inlined_method = cfg->inlined_method;
5712 cfg->inlined_method = cmethod;
5713 cfg->ret_var_set = FALSE;
5714 cfg->inline_depth ++;
5715 prev_real_offset = cfg->real_offset;
5716 prev_cbb_hash = cfg->cbb_hash;
5717 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
5718 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
5719 prev_cil_start = cfg->cil_start;
5720 prev_cbb = cfg->cbb;
5721 prev_current_method = cfg->current_method;
5722 prev_generic_context = cfg->generic_context;
5723 prev_ret_var_set = cfg->ret_var_set;
5725 if (*ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
5728 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, virtual);
5730 ret_var_set = cfg->ret_var_set;
5732 cfg->inlined_method = prev_inlined_method;
5733 cfg->real_offset = prev_real_offset;
5734 cfg->cbb_hash = prev_cbb_hash;
5735 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
5736 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
5737 cfg->cil_start = prev_cil_start;
5738 cfg->locals = prev_locals;
5739 cfg->args = prev_args;
5740 cfg->arg_types = prev_arg_types;
5741 cfg->current_method = prev_current_method;
5742 cfg->generic_context = prev_generic_context;
5743 cfg->ret_var_set = prev_ret_var_set;
5744 cfg->inline_depth --;
5746 if ((costs >= 0 && costs < 60) || inline_always) {
5747 if (cfg->verbose_level > 2)
5748 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
5750 cfg->stat_inlined_methods++;
5752 /* always add some code to avoid block split failures */
5753 MONO_INST_NEW (cfg, ins, OP_NOP);
5754 MONO_ADD_INS (prev_cbb, ins);
5756 prev_cbb->next_bb = sbblock;
5757 link_bblock (cfg, prev_cbb, sbblock);
5760 * Get rid of the begin and end bblocks if possible to aid local
5763 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
5765 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
5766 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
5768 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
5769 MonoBasicBlock *prev = ebblock->in_bb [0];
5770 mono_merge_basic_blocks (cfg, prev, ebblock);
5772 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
5773 mono_merge_basic_blocks (cfg, prev_cbb, prev);
5774 cfg->cbb = prev_cbb;
5778 * Its possible that the rvar is set in some prev bblock, but not in others.
5784 for (i = 0; i < ebblock->in_count; ++i) {
5785 bb = ebblock->in_bb [i];
5787 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
5790 emit_init_rvar (cfg, rvar, fsig->ret);
5800 * If the inlined method contains only a throw, then the ret var is not
5801 * set, so set it to a dummy value.
5804 emit_init_rvar (cfg, rvar, fsig->ret);
5806 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
5809 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5812 if (cfg->verbose_level > 2)
5813 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
5814 cfg->exception_type = MONO_EXCEPTION_NONE;
5815 mono_loader_clear_error ();
5817 /* This gets rid of the newly added bblocks */
5818 cfg->cbb = prev_cbb;
5820 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5825 * Some of these comments may well be out-of-date.
5826 * Design decisions: we do a single pass over the IL code (and we do bblock
5827 * splitting/merging in the few cases when it's required: a back jump to an IL
5828 * address that was not already seen as bblock starting point).
5829 * Code is validated as we go (full verification is still better left to metadata/verify.c).
5830 * Complex operations are decomposed in simpler ones right away. We need to let the
5831 * arch-specific code peek and poke inside this process somehow (except when the
5832 * optimizations can take advantage of the full semantic info of coarse opcodes).
5833 * All the opcodes of the form opcode.s are 'normalized' to opcode.
5834 * MonoInst->opcode initially is the IL opcode or some simplification of that
5835 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
5836 * opcode with value bigger than OP_LAST.
5837 * At this point the IR can be handed over to an interpreter, a dumb code generator
5838 * or to the optimizing code generator that will translate it to SSA form.
5840 * Profiling directed optimizations.
5841 * We may compile by default with few or no optimizations and instrument the code
5842 * or the user may indicate what methods to optimize the most either in a config file
5843 * or through repeated runs where the compiler applies offline the optimizations to
5844 * each method and then decides if it was worth it.
5847 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
5848 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
5849 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
5850 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
5851 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
5852 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
5853 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
5854 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; LOAD_ERROR;}
5856 /* offset from br.s -> br like opcodes */
5857 #define BIG_BRANCH_OFFSET 13
5860 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
5862 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
5864 return b == NULL || b == bb;
5868 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
5870 unsigned char *ip = start;
5871 unsigned char *target;
5874 MonoBasicBlock *bblock;
5875 const MonoOpcode *opcode;
5878 cli_addr = ip - start;
5879 i = mono_opcode_value ((const guint8 **)&ip, end);
5882 opcode = &mono_opcodes [i];
5883 switch (opcode->argument) {
5884 case MonoInlineNone:
5887 case MonoInlineString:
5888 case MonoInlineType:
5889 case MonoInlineField:
5890 case MonoInlineMethod:
5893 case MonoShortInlineR:
5900 case MonoShortInlineVar:
5901 case MonoShortInlineI:
5904 case MonoShortInlineBrTarget:
5905 target = start + cli_addr + 2 + (signed char)ip [1];
5906 GET_BBLOCK (cfg, bblock, target);
5909 GET_BBLOCK (cfg, bblock, ip);
5911 case MonoInlineBrTarget:
5912 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
5913 GET_BBLOCK (cfg, bblock, target);
5916 GET_BBLOCK (cfg, bblock, ip);
5918 case MonoInlineSwitch: {
5919 guint32 n = read32 (ip + 1);
5922 cli_addr += 5 + 4 * n;
5923 target = start + cli_addr;
5924 GET_BBLOCK (cfg, bblock, target);
5926 for (j = 0; j < n; ++j) {
5927 target = start + cli_addr + (gint32)read32 (ip);
5928 GET_BBLOCK (cfg, bblock, target);
5938 g_assert_not_reached ();
5941 if (i == CEE_THROW) {
5942 unsigned char *bb_start = ip - 1;
5944 /* Find the start of the bblock containing the throw */
5946 while ((bb_start >= start) && !bblock) {
5947 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
5951 bblock->out_of_line = 1;
5961 static inline MonoMethod *
5962 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5966 if (m->wrapper_type != MONO_WRAPPER_NONE) {
5967 method = mono_method_get_wrapper_data (m, token);
5969 method = mono_class_inflate_generic_method (method, context);
5971 method = mono_get_method_full (m->klass->image, token, klass, context);
5977 static inline MonoMethod *
5978 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5980 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
5982 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
5988 static inline MonoClass*
5989 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
5993 if (method->wrapper_type != MONO_WRAPPER_NONE) {
5994 klass = mono_method_get_wrapper_data (method, token);
5996 klass = mono_class_inflate_generic_class (klass, context);
5998 klass = mono_class_get_full (method->klass->image, token, context);
6001 mono_class_init (klass);
6005 static inline MonoMethodSignature*
6006 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
6008 MonoMethodSignature *fsig;
6010 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6013 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6015 fsig = mono_inflate_generic_signature (fsig, context, &error);
6017 g_assert (mono_error_ok (&error));
6020 fsig = mono_metadata_parse_signature (method->klass->image, token);
6026 * Returns TRUE if the JIT should abort inlining because "callee"
6027 * is influenced by security attributes.
6030 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
6034 if ((cfg->method != caller) && mono_security_method_has_declsec (callee)) {
6038 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
6039 if (result == MONO_JIT_SECURITY_OK)
6042 if (result == MONO_JIT_LINKDEMAND_ECMA) {
6043 /* Generate code to throw a SecurityException before the actual call/link */
6044 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6047 NEW_ICONST (cfg, args [0], 4);
6048 NEW_METHODCONST (cfg, args [1], caller);
6049 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
6050 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
6051 /* don't hide previous results */
6052 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
6053 cfg->exception_data = result;
6061 throw_exception (void)
6063 static MonoMethod *method = NULL;
6066 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6067 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
6074 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
6076 MonoMethod *thrower = throw_exception ();
6079 EMIT_NEW_PCONST (cfg, args [0], ex);
6080 mono_emit_method_call (cfg, thrower, args, NULL);
6084 * Return the original method is a wrapper is specified. We can only access
6085 * the custom attributes from the original method.
6088 get_original_method (MonoMethod *method)
6090 if (method->wrapper_type == MONO_WRAPPER_NONE)
6093 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
6094 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
6097 /* in other cases we need to find the original method */
6098 return mono_marshal_method_from_wrapper (method);
6102 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
6103 MonoBasicBlock *bblock, unsigned char *ip)
6105 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6106 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
6108 emit_throw_exception (cfg, ex);
6112 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
6113 MonoBasicBlock *bblock, unsigned char *ip)
6115 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6116 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
6118 emit_throw_exception (cfg, ex);
6122 * Check that the IL instructions at ip are the array initialization
6123 * sequence and return the pointer to the data and the size.
6126 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
6129 * newarr[System.Int32]
6131 * ldtoken field valuetype ...
6132 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
6134 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
6135 guint32 token = read32 (ip + 7);
6136 guint32 field_token = read32 (ip + 2);
6137 guint32 field_index = field_token & 0xffffff;
6139 const char *data_ptr;
6141 MonoMethod *cmethod;
6142 MonoClass *dummy_class;
6143 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
6149 *out_field_token = field_token;
6151 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
6154 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
6156 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
6157 case MONO_TYPE_BOOLEAN:
6161 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
6162 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
6163 case MONO_TYPE_CHAR:
6173 return NULL; /* stupid ARM FP swapped format */
6183 if (size > mono_type_size (field->type, &dummy_align))
6186 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
6187 if (!method->klass->image->dynamic) {
6188 field_index = read32 (ip + 2) & 0xffffff;
6189 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
6190 data_ptr = mono_image_rva_map (method->klass->image, rva);
6191 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
6192 /* for aot code we do the lookup on load */
6193 if (aot && data_ptr)
6194 return GUINT_TO_POINTER (rva);
6196 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
6198 data_ptr = mono_field_get_data (field);
6206 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
6208 char *method_fname = mono_method_full_name (method, TRUE);
6210 MonoMethodHeader *header = mono_method_get_header (method);
6212 if (header->code_size == 0)
6213 method_code = g_strdup ("method body is empty.");
6215 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
6216 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
6217 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
6218 g_free (method_fname);
6219 g_free (method_code);
6220 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
6224 set_exception_object (MonoCompile *cfg, MonoException *exception)
6226 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
6227 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
6228 cfg->exception_ptr = exception;
6232 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
6235 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
6236 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
6237 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
6238 /* Optimize reg-reg moves away */
6240 * Can't optimize other opcodes, since sp[0] might point to
6241 * the last ins of a decomposed opcode.
6243 sp [0]->dreg = (cfg)->locals [n]->dreg;
6245 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
6250 * ldloca inhibits many optimizations so try to get rid of it in common
6253 static inline unsigned char *
6254 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
6263 local = read16 (ip + 2);
6267 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
6268 gboolean skip = FALSE;
6270 /* From the INITOBJ case */
6271 token = read32 (ip + 2);
6272 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
6273 CHECK_TYPELOAD (klass);
6274 if (mini_type_is_reference (cfg, &klass->byval_arg)) {
6275 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
6276 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
6277 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
6290 is_exception_class (MonoClass *class)
6293 if (class == mono_defaults.exception_class)
6295 class = class->parent;
6301 * is_jit_optimizer_disabled:
6303 * Determine whenever M's assembly has a DebuggableAttribute with the
6304 * IsJITOptimizerDisabled flag set.
6307 is_jit_optimizer_disabled (MonoMethod *m)
6309 MonoAssembly *ass = m->klass->image->assembly;
6310 MonoCustomAttrInfo* attrs;
6311 static MonoClass *klass;
6313 gboolean val = FALSE;
6316 if (ass->jit_optimizer_disabled_inited)
6317 return ass->jit_optimizer_disabled;
6320 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
6323 ass->jit_optimizer_disabled = FALSE;
6324 mono_memory_barrier ();
6325 ass->jit_optimizer_disabled_inited = TRUE;
6329 attrs = mono_custom_attrs_from_assembly (ass);
6331 for (i = 0; i < attrs->num_attrs; ++i) {
6332 MonoCustomAttrEntry *attr = &attrs->attrs [i];
6335 MonoMethodSignature *sig;
6337 if (!attr->ctor || attr->ctor->klass != klass)
6339 /* Decode the attribute. See reflection.c */
6340 len = attr->data_size;
6341 p = (const char*)attr->data;
6342 g_assert (read16 (p) == 0x0001);
6345 // FIXME: Support named parameters
6346 sig = mono_method_signature (attr->ctor);
6347 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
6349 /* Two boolean arguments */
6353 mono_custom_attrs_free (attrs);
6356 ass->jit_optimizer_disabled = val;
6357 mono_memory_barrier ();
6358 ass->jit_optimizer_disabled_inited = TRUE;
6364 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig)
6366 gboolean supported_tail_call;
6369 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6370 supported_tail_call = MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
6372 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6375 for (i = 0; i < fsig->param_count; ++i) {
6376 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
6377 /* These can point to the current method's stack */
6378 supported_tail_call = FALSE;
6380 if (fsig->hasthis && cmethod->klass->valuetype)
6381 /* this might point to the current method's stack */
6382 supported_tail_call = FALSE;
6383 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
6384 supported_tail_call = FALSE;
6385 if (cfg->method->save_lmf)
6386 supported_tail_call = FALSE;
6387 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
6388 supported_tail_call = FALSE;
6390 /* Debugging support */
6392 if (supported_tail_call) {
6393 if (!mono_debug_count ())
6394 supported_tail_call = FALSE;
6398 return supported_tail_call;
6401 /* the JIT intercepts ldflda instructions to the tlsdata field in ThreadLocal<T> and redirects
6402 * it to the thread local value based on the tls_offset field. Every other kind of access to
6403 * the field causes an assert.
6406 is_magic_tls_access (MonoClassField *field)
6408 if (strcmp (field->name, "tlsdata"))
6410 if (strcmp (field->parent->name, "ThreadLocal`1"))
6412 return field->parent->image == mono_defaults.corlib;
6415 /* emits the code needed to access a managed tls var (like ThreadStatic)
6416 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
6417 * pointer for the current thread.
6418 * Returns the MonoInst* representing the address of the tls var.
6421 emit_managed_static_data_access (MonoCompile *cfg, MonoInst *thread_ins, int offset_reg)
6424 int static_data_reg, array_reg, dreg;
6425 int offset2_reg, idx_reg;
6426 // inlined access to the tls data
6427 // idx = (offset >> 24) - 1;
6428 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
6429 static_data_reg = alloc_ireg (cfg);
6430 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
6431 idx_reg = alloc_ireg (cfg);
6432 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
6433 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
6434 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
6435 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
6436 array_reg = alloc_ireg (cfg);
6437 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
6438 offset2_reg = alloc_ireg (cfg);
6439 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
6440 dreg = alloc_ireg (cfg);
6441 EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
6446 * redirect access to the tlsdata field to the tls var given by the tls_offset field.
6447 * this address is cached per-method in cached_tls_addr.
6450 create_magic_tls_access (MonoCompile *cfg, MonoClassField *tls_field, MonoInst **cached_tls_addr, MonoInst *thread_local)
6452 MonoInst *load, *addr, *temp, *store, *thread_ins;
6453 MonoClassField *offset_field;
6455 if (*cached_tls_addr) {
6456 EMIT_NEW_TEMPLOAD (cfg, addr, (*cached_tls_addr)->inst_c0);
6459 thread_ins = mono_get_thread_intrinsic (cfg);
6460 offset_field = mono_class_get_field_from_name (tls_field->parent, "tls_offset");
6462 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, offset_field->type, thread_local->dreg, offset_field->offset);
6464 MONO_ADD_INS (cfg->cbb, thread_ins);
6466 MonoMethod *thread_method;
6467 thread_method = mono_class_get_method_from_name (mono_get_thread_class(), "CurrentInternalThread_internal", 0);
6468 thread_ins = mono_emit_method_call (cfg, thread_method, NULL, NULL);
6470 addr = emit_managed_static_data_access (cfg, thread_ins, load->dreg);
6471 addr->klass = mono_class_from_mono_type (tls_field->type);
6472 addr->type = STACK_MP;
6473 *cached_tls_addr = temp = mono_compile_create_var (cfg, type_from_stack_type (addr), OP_LOCAL);
6474 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, addr);
6476 EMIT_NEW_TEMPLOAD (cfg, addr, temp->inst_c0);
6481 * mono_method_to_ir:
6483 * Translate the .net IL into linear IR.
6486 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
6487 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
6488 guint inline_offset, gboolean is_virtual_call)
6491 MonoInst *ins, **sp, **stack_start;
6492 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
6493 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
6494 MonoMethod *cmethod, *method_definition;
6495 MonoInst **arg_array;
6496 MonoMethodHeader *header;
6498 guint32 token, ins_flag;
6500 MonoClass *constrained_call = NULL;
6501 unsigned char *ip, *end, *target, *err_pos;
6502 static double r8_0 = 0.0;
6503 MonoMethodSignature *sig;
6504 MonoGenericContext *generic_context = NULL;
6505 MonoGenericContainer *generic_container = NULL;
6506 MonoType **param_types;
6507 int i, n, start_new_bblock, dreg;
6508 int num_calls = 0, inline_costs = 0;
6509 int breakpoint_id = 0;
6511 MonoBoolean security, pinvoke;
6512 MonoSecurityManager* secman = NULL;
6513 MonoDeclSecurityActions actions;
6514 GSList *class_inits = NULL;
6515 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
6517 gboolean init_locals, seq_points, skip_dead_blocks;
6518 gboolean disable_inline, sym_seq_points = FALSE;
6519 MonoInst *cached_tls_addr = NULL;
6520 MonoDebugMethodInfo *minfo;
6521 MonoBitSet *seq_point_locs = NULL;
6522 MonoBitSet *seq_point_set_locs = NULL;
6524 disable_inline = is_jit_optimizer_disabled (method);
6526 /* serialization and xdomain stuff may need access to private fields and methods */
6527 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
6528 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
6529 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
6530 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
6531 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
6532 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
6534 dont_verify |= mono_security_smcs_hack_enabled ();
6536 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
6537 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
6538 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
6539 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
6540 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
6542 image = method->klass->image;
6543 header = mono_method_get_header (method);
6545 MonoLoaderError *error;
6547 if ((error = mono_loader_get_last_error ())) {
6548 mono_cfg_set_exception (cfg, error->exception_type);
6550 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
6551 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
6553 goto exception_exit;
6555 generic_container = mono_method_get_generic_container (method);
6556 sig = mono_method_signature (method);
6557 num_args = sig->hasthis + sig->param_count;
6558 ip = (unsigned char*)header->code;
6559 cfg->cil_start = ip;
6560 end = ip + header->code_size;
6561 cfg->stat_cil_code_size += header->code_size;
6562 init_locals = header->init_locals;
6564 seq_points = cfg->gen_seq_points && cfg->method == method;
6565 #ifdef PLATFORM_ANDROID
6566 seq_points &= cfg->method->wrapper_type == MONO_WRAPPER_NONE;
6569 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
6570 /* We could hit a seq point before attaching to the JIT (#8338) */
6574 if (cfg->gen_seq_points && cfg->method == method) {
6575 minfo = mono_debug_lookup_method (method);
6577 int i, n_il_offsets;
6581 mono_debug_symfile_get_line_numbers_full (minfo, NULL, NULL, &n_il_offsets, &il_offsets, &line_numbers, NULL, NULL);
6582 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6583 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6584 sym_seq_points = TRUE;
6585 for (i = 0; i < n_il_offsets; ++i) {
6586 if (il_offsets [i] < header->code_size)
6587 mono_bitset_set_fast (seq_point_locs, il_offsets [i]);
6593 * Methods without init_locals set could cause asserts in various passes
6598 method_definition = method;
6599 while (method_definition->is_inflated) {
6600 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
6601 method_definition = imethod->declaring;
6604 /* SkipVerification is not allowed if core-clr is enabled */
6605 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
6607 dont_verify_stloc = TRUE;
6610 if (mono_debug_using_mono_debugger ())
6611 cfg->keep_cil_nops = TRUE;
6613 if (sig->is_inflated)
6614 generic_context = mono_method_get_context (method);
6615 else if (generic_container)
6616 generic_context = &generic_container->context;
6617 cfg->generic_context = generic_context;
6619 if (!cfg->generic_sharing_context)
6620 g_assert (!sig->has_type_parameters);
6622 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
6623 g_assert (method->is_inflated);
6624 g_assert (mono_method_get_context (method)->method_inst);
6626 if (method->is_inflated && mono_method_get_context (method)->method_inst)
6627 g_assert (sig->generic_param_count);
6629 if (cfg->method == method) {
6630 cfg->real_offset = 0;
6632 cfg->real_offset = inline_offset;
6635 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
6636 cfg->cil_offset_to_bb_len = header->code_size;
6638 cfg->current_method = method;
6640 if (cfg->verbose_level > 2)
6641 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
6643 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
6645 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
6646 for (n = 0; n < sig->param_count; ++n)
6647 param_types [n + sig->hasthis] = sig->params [n];
6648 cfg->arg_types = param_types;
6650 dont_inline = g_list_prepend (dont_inline, method);
6651 if (cfg->method == method) {
6653 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
6654 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
6657 NEW_BBLOCK (cfg, start_bblock);
6658 cfg->bb_entry = start_bblock;
6659 start_bblock->cil_code = NULL;
6660 start_bblock->cil_length = 0;
6661 #if defined(__native_client_codegen__)
6662 MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
6663 ins->dreg = alloc_dreg (cfg, STACK_I4);
6664 MONO_ADD_INS (start_bblock, ins);
6668 NEW_BBLOCK (cfg, end_bblock);
6669 cfg->bb_exit = end_bblock;
6670 end_bblock->cil_code = NULL;
6671 end_bblock->cil_length = 0;
6672 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
6673 g_assert (cfg->num_bblocks == 2);
6675 arg_array = cfg->args;
6677 if (header->num_clauses) {
6678 cfg->spvars = g_hash_table_new (NULL, NULL);
6679 cfg->exvars = g_hash_table_new (NULL, NULL);
6681 /* handle exception clauses */
6682 for (i = 0; i < header->num_clauses; ++i) {
6683 MonoBasicBlock *try_bb;
6684 MonoExceptionClause *clause = &header->clauses [i];
6685 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
6686 try_bb->real_offset = clause->try_offset;
6687 try_bb->try_start = TRUE;
6688 try_bb->region = ((i + 1) << 8) | clause->flags;
6689 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
6690 tblock->real_offset = clause->handler_offset;
6691 tblock->flags |= BB_EXCEPTION_HANDLER;
6693 link_bblock (cfg, try_bb, tblock);
6695 if (*(ip + clause->handler_offset) == CEE_POP)
6696 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
6698 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
6699 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
6700 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
6701 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
6702 MONO_ADD_INS (tblock, ins);
6704 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) {
6705 /* finally clauses already have a seq point */
6706 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
6707 MONO_ADD_INS (tblock, ins);
6710 /* todo: is a fault block unsafe to optimize? */
6711 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
6712 tblock->flags |= BB_EXCEPTION_UNSAFE;
6716 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
6718 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
6720 /* catch and filter blocks get the exception object on the stack */
6721 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
6722 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
6723 MonoInst *dummy_use;
6725 /* mostly like handle_stack_args (), but just sets the input args */
6726 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
6727 tblock->in_scount = 1;
6728 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
6729 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
6732 * Add a dummy use for the exvar so its liveness info will be
6736 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
6738 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
6739 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
6740 tblock->flags |= BB_EXCEPTION_HANDLER;
6741 tblock->real_offset = clause->data.filter_offset;
6742 tblock->in_scount = 1;
6743 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
6744 /* The filter block shares the exvar with the handler block */
6745 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
6746 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
6747 MONO_ADD_INS (tblock, ins);
6751 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
6752 clause->data.catch_class &&
6753 cfg->generic_sharing_context &&
6754 mono_class_check_context_used (clause->data.catch_class)) {
6756 * In shared generic code with catch
6757 * clauses containing type variables
6758 * the exception handling code has to
6759 * be able to get to the rgctx.
6760 * Therefore we have to make sure that
6761 * the vtable/mrgctx argument (for
6762 * static or generic methods) or the
6763 * "this" argument (for non-static
6764 * methods) are live.
6766 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
6767 mini_method_get_context (method)->method_inst ||
6768 method->klass->valuetype) {
6769 mono_get_vtable_var (cfg);
6771 MonoInst *dummy_use;
6773 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
6778 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
6779 cfg->cbb = start_bblock;
6780 cfg->args = arg_array;
6781 mono_save_args (cfg, sig, inline_args);
6784 /* FIRST CODE BLOCK */
6785 NEW_BBLOCK (cfg, bblock);
6786 bblock->cil_code = ip;
6790 ADD_BBLOCK (cfg, bblock);
6792 if (cfg->method == method) {
6793 breakpoint_id = mono_debugger_method_has_breakpoint (method);
6794 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
6795 MONO_INST_NEW (cfg, ins, OP_BREAK);
6796 MONO_ADD_INS (bblock, ins);
6800 if (mono_security_cas_enabled ())
6801 secman = mono_security_manager_get_methods ();
6803 security = (secman && mono_security_method_has_declsec (method));
6804 /* at this point having security doesn't mean we have any code to generate */
6805 if (security && (cfg->method == method)) {
6806 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
6807 * And we do not want to enter the next section (with allocation) if we
6808 * have nothing to generate */
6809 security = mono_declsec_get_demands (method, &actions);
6812 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
6813 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
6815 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
6816 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6817 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
6819 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
6820 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
6824 mono_custom_attrs_free (custom);
6827 custom = mono_custom_attrs_from_class (wrapped->klass);
6828 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
6832 mono_custom_attrs_free (custom);
6835 /* not a P/Invoke after all */
6840 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
6841 /* we use a separate basic block for the initialization code */
6842 NEW_BBLOCK (cfg, init_localsbb);
6843 cfg->bb_init = init_localsbb;
6844 init_localsbb->real_offset = cfg->real_offset;
6845 start_bblock->next_bb = init_localsbb;
6846 init_localsbb->next_bb = bblock;
6847 link_bblock (cfg, start_bblock, init_localsbb);
6848 link_bblock (cfg, init_localsbb, bblock);
6850 cfg->cbb = init_localsbb;
6852 start_bblock->next_bb = bblock;
6853 link_bblock (cfg, start_bblock, bblock);
6856 if (cfg->gsharedvt && cfg->method == method) {
6857 MonoGSharedVtMethodInfo *info;
6858 MonoInst *var, *locals_var;
6861 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
6862 info->method = cfg->method;
6864 info->entries = g_ptr_array_new ();
6865 cfg->gsharedvt_info = info;
6867 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6868 /* prevent it from being register allocated */
6869 //var->flags |= MONO_INST_INDIRECT;
6870 cfg->gsharedvt_info_var = var;
6872 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
6873 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
6875 /* Allocate locals */
6876 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6877 /* prevent it from being register allocated */
6878 //locals_var->flags |= MONO_INST_INDIRECT;
6879 cfg->gsharedvt_locals_var = locals_var;
6881 dreg = alloc_ireg (cfg);
6882 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
6884 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
6885 ins->dreg = locals_var->dreg;
6887 MONO_ADD_INS (cfg->cbb, ins);
6888 cfg->gsharedvt_locals_var_ins = ins;
6890 cfg->flags |= MONO_CFG_HAS_ALLOCA;
6893 ins->flags |= MONO_INST_INIT;
6897 /* at this point we know, if security is TRUE, that some code needs to be generated */
6898 if (security && (cfg->method == method)) {
6901 cfg->stat_cas_demand_generation++;
6903 if (actions.demand.blob) {
6904 /* Add code for SecurityAction.Demand */
6905 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
6906 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
6907 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
6908 mono_emit_method_call (cfg, secman->demand, args, NULL);
6910 if (actions.noncasdemand.blob) {
6911 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
6912 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
6913 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
6914 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
6915 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
6916 mono_emit_method_call (cfg, secman->demand, args, NULL);
6918 if (actions.demandchoice.blob) {
6919 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
6920 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
6921 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
6922 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
6923 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
6927 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
6929 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
6932 if (mono_security_core_clr_enabled ()) {
6933 /* check if this is native code, e.g. an icall or a p/invoke */
6934 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
6935 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
6937 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
6938 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
6940 /* if this ia a native call then it can only be JITted from platform code */
6941 if ((icall || pinvk) && method->klass && method->klass->image) {
6942 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
6943 MonoException *ex = icall ? mono_get_exception_security () :
6944 mono_get_exception_method_access ();
6945 emit_throw_exception (cfg, ex);
6952 if (header->code_size == 0)
6955 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
6960 if (cfg->method == method)
6961 mono_debug_init_method (cfg, bblock, breakpoint_id);
6963 for (n = 0; n < header->num_locals; ++n) {
6964 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
6969 /* We force the vtable variable here for all shared methods
6970 for the possibility that they might show up in a stack
6971 trace where their exact instantiation is needed. */
6972 if (cfg->generic_sharing_context && method == cfg->method) {
6973 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
6974 mini_method_get_context (method)->method_inst ||
6975 method->klass->valuetype) {
6976 mono_get_vtable_var (cfg);
6978 /* FIXME: Is there a better way to do this?
6979 We need the variable live for the duration
6980 of the whole method. */
6981 cfg->args [0]->flags |= MONO_INST_INDIRECT;
6985 /* add a check for this != NULL to inlined methods */
6986 if (is_virtual_call) {
6989 NEW_ARGLOAD (cfg, arg_ins, 0);
6990 MONO_ADD_INS (cfg->cbb, arg_ins);
6991 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
6994 skip_dead_blocks = !dont_verify;
6995 if (skip_dead_blocks) {
6996 original_bb = bb = mono_basic_block_split (method, &error);
6997 if (!mono_error_ok (&error)) {
6998 mono_error_cleanup (&error);
7004 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
7005 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
7008 start_new_bblock = 0;
7011 if (cfg->method == method)
7012 cfg->real_offset = ip - header->code;
7014 cfg->real_offset = inline_offset;
7019 if (start_new_bblock) {
7020 bblock->cil_length = ip - bblock->cil_code;
7021 if (start_new_bblock == 2) {
7022 g_assert (ip == tblock->cil_code);
7024 GET_BBLOCK (cfg, tblock, ip);
7026 bblock->next_bb = tblock;
7029 start_new_bblock = 0;
7030 for (i = 0; i < bblock->in_scount; ++i) {
7031 if (cfg->verbose_level > 3)
7032 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
7033 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
7037 g_slist_free (class_inits);
7040 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
7041 link_bblock (cfg, bblock, tblock);
7042 if (sp != stack_start) {
7043 handle_stack_args (cfg, stack_start, sp - stack_start);
7045 CHECK_UNVERIFIABLE (cfg);
7047 bblock->next_bb = tblock;
7050 for (i = 0; i < bblock->in_scount; ++i) {
7051 if (cfg->verbose_level > 3)
7052 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
7053 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
7056 g_slist_free (class_inits);
7061 if (skip_dead_blocks) {
7062 int ip_offset = ip - header->code;
7064 if (ip_offset == bb->end)
7068 int op_size = mono_opcode_size (ip, end);
7069 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
7071 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
7073 if (ip_offset + op_size == bb->end) {
7074 MONO_INST_NEW (cfg, ins, OP_NOP);
7075 MONO_ADD_INS (bblock, ins);
7076 start_new_bblock = 1;
7084 * Sequence points are points where the debugger can place a breakpoint.
7085 * Currently, we generate these automatically at points where the IL
7088 if (seq_points && ((sp == stack_start) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
7090 * Make methods interruptable at the beginning, and at the targets of
7091 * backward branches.
7092 * Also, do this at the start of every bblock in methods with clauses too,
7093 * to be able to handle instructions with inprecise control flow like
7095 * Backward branches are handled at the end of method-to-ir ().
7097 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
7099 /* Avoid sequence points on empty IL like .volatile */
7100 // FIXME: Enable this
7101 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
7102 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
7103 MONO_ADD_INS (cfg->cbb, ins);
7106 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
7109 bblock->real_offset = cfg->real_offset;
7111 if ((cfg->method == method) && cfg->coverage_info) {
7112 guint32 cil_offset = ip - header->code;
7113 cfg->coverage_info->data [cil_offset].cil_code = ip;
7115 /* TODO: Use an increment here */
7116 #if defined(TARGET_X86)
7117 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
7118 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
7120 MONO_ADD_INS (cfg->cbb, ins);
7122 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
7123 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
7127 if (cfg->verbose_level > 3)
7128 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
7132 if (seq_points && !sym_seq_points && sp != stack_start) {
7134 * The C# compiler uses these nops to notify the JIT that it should
7135 * insert seq points.
7137 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
7138 MONO_ADD_INS (cfg->cbb, ins);
7140 if (cfg->keep_cil_nops)
7141 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
7143 MONO_INST_NEW (cfg, ins, OP_NOP);
7145 MONO_ADD_INS (bblock, ins);
7148 if (should_insert_brekpoint (cfg->method)) {
7149 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
7151 MONO_INST_NEW (cfg, ins, OP_NOP);
7154 MONO_ADD_INS (bblock, ins);
7160 CHECK_STACK_OVF (1);
7161 n = (*ip)-CEE_LDARG_0;
7163 EMIT_NEW_ARGLOAD (cfg, ins, n);
7171 CHECK_STACK_OVF (1);
7172 n = (*ip)-CEE_LDLOC_0;
7174 EMIT_NEW_LOCLOAD (cfg, ins, n);
7183 n = (*ip)-CEE_STLOC_0;
7186 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
7188 emit_stloc_ir (cfg, sp, header, n);
7195 CHECK_STACK_OVF (1);
7198 EMIT_NEW_ARGLOAD (cfg, ins, n);
7204 CHECK_STACK_OVF (1);
7207 NEW_ARGLOADA (cfg, ins, n);
7208 MONO_ADD_INS (cfg->cbb, ins);
7218 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
7220 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
7225 CHECK_STACK_OVF (1);
7228 EMIT_NEW_LOCLOAD (cfg, ins, n);
7232 case CEE_LDLOCA_S: {
7233 unsigned char *tmp_ip;
7235 CHECK_STACK_OVF (1);
7236 CHECK_LOCAL (ip [1]);
7238 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
7244 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
7253 CHECK_LOCAL (ip [1]);
7254 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
7256 emit_stloc_ir (cfg, sp, header, ip [1]);
7261 CHECK_STACK_OVF (1);
7262 EMIT_NEW_PCONST (cfg, ins, NULL);
7263 ins->type = STACK_OBJ;
7268 CHECK_STACK_OVF (1);
7269 EMIT_NEW_ICONST (cfg, ins, -1);
7282 CHECK_STACK_OVF (1);
7283 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
7289 CHECK_STACK_OVF (1);
7291 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
7297 CHECK_STACK_OVF (1);
7298 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
7304 CHECK_STACK_OVF (1);
7305 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7306 ins->type = STACK_I8;
7307 ins->dreg = alloc_dreg (cfg, STACK_I8);
7309 ins->inst_l = (gint64)read64 (ip);
7310 MONO_ADD_INS (bblock, ins);
7316 gboolean use_aotconst = FALSE;
7318 #ifdef TARGET_POWERPC
7319 /* FIXME: Clean this up */
7320 if (cfg->compile_aot)
7321 use_aotconst = TRUE;
7324 /* FIXME: we should really allocate this only late in the compilation process */
7325 f = mono_domain_alloc (cfg->domain, sizeof (float));
7327 CHECK_STACK_OVF (1);
7333 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
7335 dreg = alloc_freg (cfg);
7336 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
7337 ins->type = STACK_R8;
7339 MONO_INST_NEW (cfg, ins, OP_R4CONST);
7340 ins->type = STACK_R8;
7341 ins->dreg = alloc_dreg (cfg, STACK_R8);
7343 MONO_ADD_INS (bblock, ins);
7353 gboolean use_aotconst = FALSE;
7355 #ifdef TARGET_POWERPC
7356 /* FIXME: Clean this up */
7357 if (cfg->compile_aot)
7358 use_aotconst = TRUE;
7361 /* FIXME: we should really allocate this only late in the compilation process */
7362 d = mono_domain_alloc (cfg->domain, sizeof (double));
7364 CHECK_STACK_OVF (1);
7370 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
7372 dreg = alloc_freg (cfg);
7373 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
7374 ins->type = STACK_R8;
7376 MONO_INST_NEW (cfg, ins, OP_R8CONST);
7377 ins->type = STACK_R8;
7378 ins->dreg = alloc_dreg (cfg, STACK_R8);
7380 MONO_ADD_INS (bblock, ins);
7389 MonoInst *temp, *store;
7391 CHECK_STACK_OVF (1);
7395 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
7396 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
7398 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
7401 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
7414 if (sp [0]->type == STACK_R8)
7415 /* we need to pop the value from the x86 FP stack */
7416 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
7422 INLINE_FAILURE ("jmp");
7423 GSHAREDVT_FAILURE (*ip);
7426 if (stack_start != sp)
7428 token = read32 (ip + 1);
7429 /* FIXME: check the signature matches */
7430 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7432 if (!cmethod || mono_loader_get_last_error ())
7435 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
7436 GENERIC_SHARING_FAILURE (CEE_JMP);
7438 if (mono_security_cas_enabled ())
7439 CHECK_CFG_EXCEPTION;
7441 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
7443 MonoMethodSignature *fsig = mono_method_signature (cmethod);
7446 /* Handle tail calls similarly to calls */
7447 n = fsig->param_count + fsig->hasthis;
7449 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
7450 call->method = cmethod;
7451 call->tail_call = TRUE;
7452 call->signature = mono_method_signature (cmethod);
7453 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
7454 call->inst.inst_p0 = cmethod;
7455 for (i = 0; i < n; ++i)
7456 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
7458 mono_arch_emit_call (cfg, call);
7459 MONO_ADD_INS (bblock, (MonoInst*)call);
7462 for (i = 0; i < num_args; ++i)
7463 /* Prevent arguments from being optimized away */
7464 arg_array [i]->flags |= MONO_INST_VOLATILE;
7466 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
7467 ins = (MonoInst*)call;
7468 ins->inst_p0 = cmethod;
7469 MONO_ADD_INS (bblock, ins);
7473 start_new_bblock = 1;
7478 case CEE_CALLVIRT: {
7479 MonoInst *addr = NULL;
7480 MonoMethodSignature *fsig = NULL;
7482 int virtual = *ip == CEE_CALLVIRT;
7483 int calli = *ip == CEE_CALLI;
7484 gboolean pass_imt_from_rgctx = FALSE;
7485 MonoInst *imt_arg = NULL;
7486 MonoInst *keep_this_alive = NULL;
7487 gboolean pass_vtable = FALSE;
7488 gboolean pass_mrgctx = FALSE;
7489 MonoInst *vtable_arg = NULL;
7490 gboolean check_this = FALSE;
7491 gboolean supported_tail_call = FALSE;
7492 gboolean tail_call = FALSE;
7493 gboolean need_seq_point = FALSE;
7494 guint32 call_opcode = *ip;
7495 gboolean emit_widen = TRUE;
7496 gboolean push_res = TRUE;
7497 gboolean skip_ret = FALSE;
7498 gboolean delegate_invoke = FALSE;
7501 token = read32 (ip + 1);
7506 //GSHAREDVT_FAILURE (*ip);
7511 fsig = mini_get_signature (method, token, generic_context);
7512 n = fsig->param_count + fsig->hasthis;
7514 if (method->dynamic && fsig->pinvoke) {
7518 * This is a call through a function pointer using a pinvoke
7519 * signature. Have to create a wrapper and call that instead.
7520 * FIXME: This is very slow, need to create a wrapper at JIT time
7521 * instead based on the signature.
7523 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
7524 EMIT_NEW_PCONST (cfg, args [1], fsig);
7526 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
7529 MonoMethod *cil_method;
7531 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7532 cil_method = cmethod;
7534 if (constrained_call) {
7535 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7536 if (cfg->verbose_level > 2)
7537 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_call));
7538 if (!((constrained_call->byval_arg.type == MONO_TYPE_VAR ||
7539 constrained_call->byval_arg.type == MONO_TYPE_MVAR) &&
7540 cfg->generic_sharing_context)) {
7541 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_call, generic_context);
7544 if (cfg->verbose_level > 2)
7545 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_call));
7547 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
7549 * This is needed since get_method_constrained can't find
7550 * the method in klass representing a type var.
7551 * The type var is guaranteed to be a reference type in this
7554 if (!mini_is_gsharedvt_klass (cfg, constrained_call))
7555 g_assert (!cmethod->klass->valuetype);
7557 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
7562 if (!cmethod || mono_loader_get_last_error ())
7564 if (!dont_verify && !cfg->skip_visibility) {
7565 MonoMethod *target_method = cil_method;
7566 if (method->is_inflated) {
7567 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
7569 if (!mono_method_can_access_method (method_definition, target_method) &&
7570 !mono_method_can_access_method (method, cil_method))
7571 METHOD_ACCESS_FAILURE;
7574 if (mono_security_core_clr_enabled ())
7575 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
7577 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
7578 /* MS.NET seems to silently convert this to a callvirt */
7583 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
7584 * converts to a callvirt.
7586 * tests/bug-515884.il is an example of this behavior
7588 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
7589 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
7590 if (!virtual && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
7594 if (!cmethod->klass->inited)
7595 if (!mono_class_init (cmethod->klass))
7596 TYPE_LOAD_ERROR (cmethod->klass);
7598 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
7599 mini_class_is_system_array (cmethod->klass)) {
7600 array_rank = cmethod->klass->rank;
7601 fsig = mono_method_signature (cmethod);
7603 fsig = mono_method_signature (cmethod);
7608 if (fsig->pinvoke) {
7609 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
7610 check_for_pending_exc, FALSE);
7611 fsig = mono_method_signature (wrapper);
7612 } else if (constrained_call) {
7613 fsig = mono_method_signature (cmethod);
7615 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
7619 mono_save_token_info (cfg, image, token, cil_method);
7621 if (!MONO_TYPE_IS_VOID (fsig->ret) && !sym_seq_points) {
7623 * Need to emit an implicit seq point after every non-void call so single stepping through nested calls like
7624 * foo (bar (), baz ())
7625 * works correctly. MS does this also:
7626 * http://stackoverflow.com/questions/6937198/making-your-net-language-step-correctly-in-the-debugger
7627 * The problem with this approach is that the debugger will stop after all calls returning a value,
7628 * even for simple cases, like:
7631 /* Special case a few common successor opcodes */
7632 if (!(ip + 5 < end && ip [5] == CEE_POP))
7633 need_seq_point = TRUE;
7636 n = fsig->param_count + fsig->hasthis;
7638 /* Don't support calls made using type arguments for now */
7640 if (cfg->gsharedvt) {
7641 if (mini_is_gsharedvt_signature (cfg, fsig))
7642 GSHAREDVT_FAILURE (*ip);
7646 if (mono_security_cas_enabled ()) {
7647 if (check_linkdemand (cfg, method, cmethod))
7648 INLINE_FAILURE ("linkdemand");
7649 CHECK_CFG_EXCEPTION;
7652 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
7653 g_assert_not_reached ();
7656 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
7659 if (!cfg->generic_sharing_context && cmethod)
7660 g_assert (!mono_method_check_context_used (cmethod));
7664 //g_assert (!virtual || fsig->hasthis);
7668 if (constrained_call) {
7669 if (mini_is_gsharedvt_klass (cfg, constrained_call)) {
7671 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
7673 /* Special case Object methods as they are easy to implement */
7674 if (cmethod->klass == mono_defaults.object_class) {
7675 MonoInst *args [16];
7678 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
7679 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_call), constrained_call, MONO_RGCTX_INFO_KLASS);
7681 if (!strcmp (cmethod->name, "ToString")) {
7682 ins = mono_emit_jit_icall (cfg, mono_object_tostring_gsharedvt, args);
7683 } else if (!strcmp (cmethod->name, "Equals")) {
7685 ins = mono_emit_jit_icall (cfg, mono_object_equals_gsharedvt, args);
7686 } else if (!strcmp (cmethod->name, "GetHashCode")) {
7687 ins = mono_emit_jit_icall (cfg, mono_object_gethashcode_gsharedvt, args);
7689 GSHAREDVT_FAILURE (*ip);
7692 } else if (constrained_call->valuetype && cmethod->klass->valuetype) {
7693 /* The 'Own method' case below */
7694 } else if ((cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && MONO_TYPE_IS_VOID (fsig->ret) && (fsig->param_count == 0 || (fsig->param_count == 1 && MONO_TYPE_IS_REFERENCE (fsig->params [0])))) {
7695 MonoInst *args [16];
7698 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
7699 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_call), constrained_call, MONO_RGCTX_INFO_KLASS);
7701 if (fsig->param_count) {
7702 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
7703 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
7704 ins->dreg = alloc_preg (cfg);
7705 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
7706 MONO_ADD_INS (cfg->cbb, ins);
7709 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [3]->dreg, 0, sp [1]->dreg);
7711 EMIT_NEW_ICONST (cfg, args [3], 0);
7714 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
7717 GSHAREDVT_FAILURE (*ip);
7721 * We have the `constrained.' prefix opcode.
7723 if (constrained_call->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
7725 * The type parameter is instantiated as a valuetype,
7726 * but that type doesn't override the method we're
7727 * calling, so we need to box `this'.
7729 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
7730 ins->klass = constrained_call;
7731 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
7732 CHECK_CFG_EXCEPTION;
7733 } else if (!constrained_call->valuetype) {
7734 int dreg = alloc_ireg_ref (cfg);
7737 * The type parameter is instantiated as a reference
7738 * type. We have a managed pointer on the stack, so
7739 * we need to dereference it here.
7741 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
7742 ins->type = STACK_OBJ;
7745 if (cmethod->klass->valuetype) {
7748 /* Interface method */
7751 mono_class_setup_vtable (constrained_call);
7752 CHECK_TYPELOAD (constrained_call);
7753 ioffset = mono_class_interface_offset (constrained_call, cmethod->klass);
7755 TYPE_LOAD_ERROR (constrained_call);
7756 slot = mono_method_get_vtable_slot (cmethod);
7758 TYPE_LOAD_ERROR (cmethod->klass);
7759 cmethod = constrained_call->vtable [ioffset + slot];
7761 if (cmethod->klass == mono_defaults.enum_class) {
7762 /* Enum implements some interfaces, so treat this as the first case */
7763 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
7764 ins->klass = constrained_call;
7765 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
7766 CHECK_CFG_EXCEPTION;
7771 constrained_call = NULL;
7774 if (!calli && check_call_signature (cfg, fsig, sp))
7777 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
7778 if (cmethod && (cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
7779 delegate_invoke = TRUE;
7782 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
7784 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7785 type_to_eval_stack_type ((cfg), fsig->ret, ins);
7793 * If the callee is a shared method, then its static cctor
7794 * might not get called after the call was patched.
7796 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
7797 emit_generic_class_init (cfg, cmethod->klass);
7798 CHECK_TYPELOAD (cmethod->klass);
7802 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
7804 if (cfg->generic_sharing_context && cmethod) {
7805 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
7807 context_used = mini_method_check_context_used (cfg, cmethod);
7809 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7810 /* Generic method interface
7811 calls are resolved via a
7812 helper function and don't
7814 if (!cmethod_context || !cmethod_context->method_inst)
7815 pass_imt_from_rgctx = TRUE;
7819 * If a shared method calls another
7820 * shared method then the caller must
7821 * have a generic sharing context
7822 * because the magic trampoline
7823 * requires it. FIXME: We shouldn't
7824 * have to force the vtable/mrgctx
7825 * variable here. Instead there
7826 * should be a flag in the cfg to
7827 * request a generic sharing context.
7830 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
7831 mono_get_vtable_var (cfg);
7836 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7838 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7840 CHECK_TYPELOAD (cmethod->klass);
7841 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7846 g_assert (!vtable_arg);
7848 if (!cfg->compile_aot) {
7850 * emit_get_rgctx_method () calls mono_class_vtable () so check
7851 * for type load errors before.
7853 mono_class_setup_vtable (cmethod->klass);
7854 CHECK_TYPELOAD (cmethod->klass);
7857 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7859 /* !marshalbyref is needed to properly handle generic methods + remoting */
7860 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
7861 MONO_METHOD_IS_FINAL (cmethod)) &&
7862 !mono_class_is_marshalbyref (cmethod->klass)) {
7869 if (pass_imt_from_rgctx) {
7870 g_assert (!pass_vtable);
7873 imt_arg = emit_get_rgctx_method (cfg, context_used,
7874 cmethod, MONO_RGCTX_INFO_METHOD);
7878 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
7880 /* Calling virtual generic methods */
7881 if (cmethod && virtual &&
7882 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
7883 !(MONO_METHOD_IS_FINAL (cmethod) &&
7884 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
7885 fsig->generic_param_count &&
7886 !(cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))) {
7887 MonoInst *this_temp, *this_arg_temp, *store;
7888 MonoInst *iargs [4];
7889 gboolean use_imt = FALSE;
7891 g_assert (fsig->is_inflated);
7893 /* Prevent inlining of methods that contain indirect calls */
7894 INLINE_FAILURE ("virtual generic call");
7896 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
7897 GSHAREDVT_FAILURE (*ip);
7899 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
7900 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt)
7905 g_assert (!imt_arg);
7907 g_assert (cmethod->is_inflated);
7908 imt_arg = emit_get_rgctx_method (cfg, context_used,
7909 cmethod, MONO_RGCTX_INFO_METHOD);
7910 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
7912 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
7913 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
7914 MONO_ADD_INS (bblock, store);
7916 /* FIXME: This should be a managed pointer */
7917 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7919 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
7920 iargs [1] = emit_get_rgctx_method (cfg, context_used,
7921 cmethod, MONO_RGCTX_INFO_METHOD);
7922 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
7923 addr = mono_emit_jit_icall (cfg,
7924 mono_helper_compile_generic_method, iargs);
7926 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
7928 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
7935 * Implement a workaround for the inherent races involved in locking:
7941 * If a thread abort happens between the call to Monitor.Enter () and the start of the
7942 * try block, the Exit () won't be executed, see:
7943 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
7944 * To work around this, we extend such try blocks to include the last x bytes
7945 * of the Monitor.Enter () call.
7947 if (cmethod && cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
7948 MonoBasicBlock *tbb;
7950 GET_BBLOCK (cfg, tbb, ip + 5);
7952 * Only extend try blocks with a finally, to avoid catching exceptions thrown
7953 * from Monitor.Enter like ArgumentNullException.
7955 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
7956 /* Mark this bblock as needing to be extended */
7957 tbb->extend_try_block = TRUE;
7961 /* Conversion to a JIT intrinsic */
7962 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
7964 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7965 type_to_eval_stack_type ((cfg), fsig->ret, ins);
7972 if (cmethod && (cfg->opt & MONO_OPT_INLINE) &&
7973 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
7974 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
7975 !g_list_find (dont_inline, cmethod)) {
7977 gboolean always = FALSE;
7979 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
7980 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
7981 /* Prevent inlining of methods that call wrappers */
7982 INLINE_FAILURE ("wrapper call");
7983 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
7987 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, always);
7989 cfg->real_offset += 5;
7992 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7993 /* *sp is already set by inline_method */
7998 inline_costs += costs;
8004 /* Tail recursion elimination */
8005 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
8006 gboolean has_vtargs = FALSE;
8009 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8010 INLINE_FAILURE ("tail call");
8012 /* keep it simple */
8013 for (i = fsig->param_count - 1; i >= 0; i--) {
8014 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
8019 for (i = 0; i < n; ++i)
8020 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8021 MONO_INST_NEW (cfg, ins, OP_BR);
8022 MONO_ADD_INS (bblock, ins);
8023 tblock = start_bblock->out_bb [0];
8024 link_bblock (cfg, bblock, tblock);
8025 ins->inst_target_bb = tblock;
8026 start_new_bblock = 1;
8028 /* skip the CEE_RET, too */
8029 if (ip_in_bb (cfg, bblock, ip + 5))
8036 inline_costs += 10 * num_calls++;
8039 * Making generic calls out of gsharedvt methods.
8041 if (cmethod && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
8042 MonoRgctxInfoType info_type;
8045 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
8046 //GSHAREDVT_FAILURE (*ip);
8047 // disable for possible remoting calls
8048 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
8049 GSHAREDVT_FAILURE (*ip);
8050 if (fsig->generic_param_count) {
8051 /* virtual generic call */
8052 g_assert (mono_use_imt);
8053 g_assert (!imt_arg);
8054 /* Same as the virtual generic case above */
8055 imt_arg = emit_get_rgctx_method (cfg, context_used,
8056 cmethod, MONO_RGCTX_INFO_METHOD);
8057 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
8062 if (cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)
8063 /* test_0_multi_dim_arrays () in gshared.cs */
8064 GSHAREDVT_FAILURE (*ip);
8066 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
8067 keep_this_alive = sp [0];
8069 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
8070 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
8072 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
8073 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
8075 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8077 } else if (calli && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
8079 * We pass the address to the gsharedvt trampoline in the rgctx reg
8081 MonoInst *callee = addr;
8083 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8085 GSHAREDVT_FAILURE (*ip);
8087 addr = emit_get_rgctx_sig (cfg, context_used,
8088 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8089 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8093 /* Generic sharing */
8094 /* FIXME: only do this for generic methods if
8095 they are not shared! */
8096 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
8097 (!mono_method_is_generic_sharable (cmethod, TRUE) ||
8098 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
8099 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
8100 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
8101 INLINE_FAILURE ("gshared");
8103 g_assert (cfg->generic_sharing_context && cmethod);
8107 * We are compiling a call to a
8108 * generic method from shared code,
8109 * which means that we have to look up
8110 * the method in the rgctx and do an
8114 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8116 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8117 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8121 /* Indirect calls */
8123 if (call_opcode == CEE_CALL)
8124 g_assert (context_used);
8125 else if (call_opcode == CEE_CALLI)
8126 g_assert (!vtable_arg);
8128 /* FIXME: what the hell is this??? */
8129 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
8130 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
8132 /* Prevent inlining of methods with indirect calls */
8133 INLINE_FAILURE ("indirect call");
8135 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8140 * Instead of emitting an indirect call, emit a direct call
8141 * with the contents of the aotconst as the patch info.
8143 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8144 info_type = addr->inst_c1;
8145 info_data = addr->inst_p0;
8147 info_type = addr->inst_right->inst_c1;
8148 info_data = addr->inst_right->inst_left;
8151 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8152 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8157 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8165 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
8166 MonoInst *val = sp [fsig->param_count];
8168 if (val->type == STACK_OBJ) {
8169 MonoInst *iargs [2];
8174 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
8177 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
8178 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
8179 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
8180 emit_write_barrier (cfg, addr, val);
8181 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
8182 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
8184 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
8185 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
8186 if (!cmethod->klass->element_class->valuetype && !readonly)
8187 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
8188 CHECK_TYPELOAD (cmethod->klass);
8191 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
8194 g_assert_not_reached ();
8201 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
8205 /* Tail prefix / tail call optimization */
8207 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
8208 /* FIXME: runtime generic context pointer for jumps? */
8209 /* FIXME: handle this for generic sharing eventually */
8210 if (cmethod && (ins_flag & MONO_INST_TAILCALL) &&
8211 !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig))
8212 supported_tail_call = TRUE;
8213 if (supported_tail_call) {
8214 if (call_opcode != CEE_CALL)
8215 supported_tail_call = FALSE;
8218 if (supported_tail_call) {
8221 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8222 INLINE_FAILURE ("tail call");
8224 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
8226 if (ARCH_USE_OP_TAIL_CALL) {
8227 /* Handle tail calls similarly to normal calls */
8230 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8231 call->tail_call = TRUE;
8232 call->method = cmethod;
8233 call->signature = mono_method_signature (cmethod);
8236 * We implement tail calls by storing the actual arguments into the
8237 * argument variables, then emitting a CEE_JMP.
8239 for (i = 0; i < n; ++i) {
8240 /* Prevent argument from being register allocated */
8241 arg_array [i]->flags |= MONO_INST_VOLATILE;
8242 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8244 ins = (MonoInst*)call;
8245 ins->inst_p0 = cmethod;
8246 ins->inst_p1 = arg_array [0];
8247 MONO_ADD_INS (bblock, ins);
8248 link_bblock (cfg, bblock, end_bblock);
8249 start_new_bblock = 1;
8251 // FIXME: Eliminate unreachable epilogs
8254 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
8255 * only reachable from this call.
8257 GET_BBLOCK (cfg, tblock, ip + 5);
8258 if (tblock == bblock || tblock->in_count == 0)
8267 * Synchronized wrappers.
8268 * Its hard to determine where to replace a method with its synchronized
8269 * wrapper without causing an infinite recursion. The current solution is
8270 * to add the synchronized wrapper in the trampolines, and to
8271 * change the called method to a dummy wrapper, and resolve that wrapper
8272 * to the real method in mono_jit_compile_method ().
8274 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8275 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
8276 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
8277 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
8281 INLINE_FAILURE ("call");
8282 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual ? sp [0] : NULL,
8283 imt_arg, vtable_arg);
8286 link_bblock (cfg, bblock, end_bblock);
8287 start_new_bblock = 1;
8289 // FIXME: Eliminate unreachable epilogs
8292 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
8293 * only reachable from this call.
8295 GET_BBLOCK (cfg, tblock, ip + 5);
8296 if (tblock == bblock || tblock->in_count == 0)
8303 /* End of call, INS should contain the result of the call, if any */
8305 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
8308 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8313 if (keep_this_alive) {
8314 MonoInst *dummy_use;
8316 /* See mono_emit_method_call_full () */
8317 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
8320 CHECK_CFG_EXCEPTION;
8324 g_assert (*ip == CEE_RET);
8328 constrained_call = NULL;
8330 emit_seq_point (cfg, method, ip, FALSE);
8334 if (cfg->method != method) {
8335 /* return from inlined method */
8337 * If in_count == 0, that means the ret is unreachable due to
8338 * being preceeded by a throw. In that case, inline_method () will
8339 * handle setting the return value
8340 * (test case: test_0_inline_throw ()).
8342 if (return_var && cfg->cbb->in_count) {
8343 MonoType *ret_type = mono_method_signature (method)->ret;
8349 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
8352 //g_assert (returnvar != -1);
8353 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
8354 cfg->ret_var_set = TRUE;
8357 if (cfg->lmf_var && cfg->cbb->in_count)
8361 MonoType *ret_type = mono_method_signature (method)->ret;
8363 if (seq_points && !sym_seq_points) {
8365 * Place a seq point here too even through the IL stack is not
8366 * empty, so a step over on
8369 * will work correctly.
8371 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
8372 MONO_ADD_INS (cfg->cbb, ins);
8375 g_assert (!return_var);
8379 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
8382 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
8385 if (!cfg->vret_addr) {
8388 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
8390 EMIT_NEW_RETLOADA (cfg, ret_addr);
8392 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
8393 ins->klass = mono_class_from_mono_type (ret_type);
8396 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
8397 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
8398 MonoInst *iargs [1];
8402 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
8403 mono_arch_emit_setret (cfg, method, conv);
8405 mono_arch_emit_setret (cfg, method, *sp);
8408 mono_arch_emit_setret (cfg, method, *sp);
8413 if (sp != stack_start)
8415 MONO_INST_NEW (cfg, ins, OP_BR);
8417 ins->inst_target_bb = end_bblock;
8418 MONO_ADD_INS (bblock, ins);
8419 link_bblock (cfg, bblock, end_bblock);
8420 start_new_bblock = 1;
8424 MONO_INST_NEW (cfg, ins, OP_BR);
8426 target = ip + 1 + (signed char)(*ip);
8428 GET_BBLOCK (cfg, tblock, target);
8429 link_bblock (cfg, bblock, tblock);
8430 ins->inst_target_bb = tblock;
8431 if (sp != stack_start) {
8432 handle_stack_args (cfg, stack_start, sp - stack_start);
8434 CHECK_UNVERIFIABLE (cfg);
8436 MONO_ADD_INS (bblock, ins);
8437 start_new_bblock = 1;
8438 inline_costs += BRANCH_COST;
8452 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
8454 target = ip + 1 + *(signed char*)ip;
8460 inline_costs += BRANCH_COST;
8464 MONO_INST_NEW (cfg, ins, OP_BR);
8467 target = ip + 4 + (gint32)read32(ip);
8469 GET_BBLOCK (cfg, tblock, target);
8470 link_bblock (cfg, bblock, tblock);
8471 ins->inst_target_bb = tblock;
8472 if (sp != stack_start) {
8473 handle_stack_args (cfg, stack_start, sp - stack_start);
8475 CHECK_UNVERIFIABLE (cfg);
8478 MONO_ADD_INS (bblock, ins);
8480 start_new_bblock = 1;
8481 inline_costs += BRANCH_COST;
8488 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
8489 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
8490 guint32 opsize = is_short ? 1 : 4;
8492 CHECK_OPSIZE (opsize);
8494 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
8497 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
8502 GET_BBLOCK (cfg, tblock, target);
8503 link_bblock (cfg, bblock, tblock);
8504 GET_BBLOCK (cfg, tblock, ip);
8505 link_bblock (cfg, bblock, tblock);
8507 if (sp != stack_start) {
8508 handle_stack_args (cfg, stack_start, sp - stack_start);
8509 CHECK_UNVERIFIABLE (cfg);
8512 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
8513 cmp->sreg1 = sp [0]->dreg;
8514 type_from_op (cmp, sp [0], NULL);
8517 #if SIZEOF_REGISTER == 4
8518 if (cmp->opcode == OP_LCOMPARE_IMM) {
8519 /* Convert it to OP_LCOMPARE */
8520 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8521 ins->type = STACK_I8;
8522 ins->dreg = alloc_dreg (cfg, STACK_I8);
8524 MONO_ADD_INS (bblock, ins);
8525 cmp->opcode = OP_LCOMPARE;
8526 cmp->sreg2 = ins->dreg;
8529 MONO_ADD_INS (bblock, cmp);
8531 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
8532 type_from_op (ins, sp [0], NULL);
8533 MONO_ADD_INS (bblock, ins);
8534 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
8535 GET_BBLOCK (cfg, tblock, target);
8536 ins->inst_true_bb = tblock;
8537 GET_BBLOCK (cfg, tblock, ip);
8538 ins->inst_false_bb = tblock;
8539 start_new_bblock = 2;
8542 inline_costs += BRANCH_COST;
8557 MONO_INST_NEW (cfg, ins, *ip);
8559 target = ip + 4 + (gint32)read32(ip);
8565 inline_costs += BRANCH_COST;
8569 MonoBasicBlock **targets;
8570 MonoBasicBlock *default_bblock;
8571 MonoJumpInfoBBTable *table;
8572 int offset_reg = alloc_preg (cfg);
8573 int target_reg = alloc_preg (cfg);
8574 int table_reg = alloc_preg (cfg);
8575 int sum_reg = alloc_preg (cfg);
8576 gboolean use_op_switch;
8580 n = read32 (ip + 1);
8583 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
8587 CHECK_OPSIZE (n * sizeof (guint32));
8588 target = ip + n * sizeof (guint32);
8590 GET_BBLOCK (cfg, default_bblock, target);
8591 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
8593 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
8594 for (i = 0; i < n; ++i) {
8595 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
8596 targets [i] = tblock;
8597 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
8601 if (sp != stack_start) {
8603 * Link the current bb with the targets as well, so handle_stack_args
8604 * will set their in_stack correctly.
8606 link_bblock (cfg, bblock, default_bblock);
8607 for (i = 0; i < n; ++i)
8608 link_bblock (cfg, bblock, targets [i]);
8610 handle_stack_args (cfg, stack_start, sp - stack_start);
8612 CHECK_UNVERIFIABLE (cfg);
8615 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
8616 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
8619 for (i = 0; i < n; ++i)
8620 link_bblock (cfg, bblock, targets [i]);
8622 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
8623 table->table = targets;
8624 table->table_size = n;
8626 use_op_switch = FALSE;
8628 /* ARM implements SWITCH statements differently */
8629 /* FIXME: Make it use the generic implementation */
8630 if (!cfg->compile_aot)
8631 use_op_switch = TRUE;
8634 if (COMPILE_LLVM (cfg))
8635 use_op_switch = TRUE;
8637 cfg->cbb->has_jump_table = 1;
8639 if (use_op_switch) {
8640 MONO_INST_NEW (cfg, ins, OP_SWITCH);
8641 ins->sreg1 = src1->dreg;
8642 ins->inst_p0 = table;
8643 ins->inst_many_bb = targets;
8644 ins->klass = GUINT_TO_POINTER (n);
8645 MONO_ADD_INS (cfg->cbb, ins);
8647 if (sizeof (gpointer) == 8)
8648 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
8650 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
8652 #if SIZEOF_REGISTER == 8
8653 /* The upper word might not be zero, and we add it to a 64 bit address later */
8654 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
8657 if (cfg->compile_aot) {
8658 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
8660 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
8661 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
8662 ins->inst_p0 = table;
8663 ins->dreg = table_reg;
8664 MONO_ADD_INS (cfg->cbb, ins);
8667 /* FIXME: Use load_memindex */
8668 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
8669 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
8670 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
8672 start_new_bblock = 1;
8673 inline_costs += (BRANCH_COST * 2);
8693 dreg = alloc_freg (cfg);
8696 dreg = alloc_lreg (cfg);
8699 dreg = alloc_ireg_ref (cfg);
8702 dreg = alloc_preg (cfg);
8705 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
8706 ins->type = ldind_type [*ip - CEE_LDIND_I1];
8707 ins->flags |= ins_flag;
8709 MONO_ADD_INS (bblock, ins);
8711 if (ins->flags & MONO_INST_VOLATILE) {
8712 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
8713 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
8714 emit_memory_barrier (cfg, FullBarrier);
8729 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
8730 ins->flags |= ins_flag;
8733 if (ins->flags & MONO_INST_VOLATILE) {
8734 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
8735 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
8736 emit_memory_barrier (cfg, FullBarrier);
8739 MONO_ADD_INS (bblock, ins);
8741 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
8742 emit_write_barrier (cfg, sp [0], sp [1]);
8751 MONO_INST_NEW (cfg, ins, (*ip));
8753 ins->sreg1 = sp [0]->dreg;
8754 ins->sreg2 = sp [1]->dreg;
8755 type_from_op (ins, sp [0], sp [1]);
8757 ins->dreg = alloc_dreg ((cfg), (ins)->type);
8759 /* Use the immediate opcodes if possible */
8760 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
8761 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
8762 if (imm_opcode != -1) {
8763 ins->opcode = imm_opcode;
8764 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
8767 sp [1]->opcode = OP_NOP;
8771 MONO_ADD_INS ((cfg)->cbb, (ins));
8773 *sp++ = mono_decompose_opcode (cfg, ins);
8790 MONO_INST_NEW (cfg, ins, (*ip));
8792 ins->sreg1 = sp [0]->dreg;
8793 ins->sreg2 = sp [1]->dreg;
8794 type_from_op (ins, sp [0], sp [1]);
8796 ADD_WIDEN_OP (ins, sp [0], sp [1]);
8797 ins->dreg = alloc_dreg ((cfg), (ins)->type);
8799 /* FIXME: Pass opcode to is_inst_imm */
8801 /* Use the immediate opcodes if possible */
8802 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
8805 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
8806 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
8807 /* Keep emulated opcodes which are optimized away later */
8808 if ((ins->opcode == OP_IREM_UN || ins->opcode == OP_IDIV_UN_IMM) && (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) && sp [1]->opcode == OP_ICONST && mono_is_power_of_two (sp [1]->inst_c0) >= 0) {
8809 imm_opcode = mono_op_to_op_imm (ins->opcode);
8812 if (imm_opcode != -1) {
8813 ins->opcode = imm_opcode;
8814 if (sp [1]->opcode == OP_I8CONST) {
8815 #if SIZEOF_REGISTER == 8
8816 ins->inst_imm = sp [1]->inst_l;
8818 ins->inst_ls_word = sp [1]->inst_ls_word;
8819 ins->inst_ms_word = sp [1]->inst_ms_word;
8823 ins->inst_imm = (gssize)(sp [1]->inst_c0);
8826 /* Might be followed by an instruction added by ADD_WIDEN_OP */
8827 if (sp [1]->next == NULL)
8828 sp [1]->opcode = OP_NOP;
8831 MONO_ADD_INS ((cfg)->cbb, (ins));
8833 *sp++ = mono_decompose_opcode (cfg, ins);
8846 case CEE_CONV_OVF_I8:
8847 case CEE_CONV_OVF_U8:
8851 /* Special case this earlier so we have long constants in the IR */
8852 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
8853 int data = sp [-1]->inst_c0;
8854 sp [-1]->opcode = OP_I8CONST;
8855 sp [-1]->type = STACK_I8;
8856 #if SIZEOF_REGISTER == 8
8857 if ((*ip) == CEE_CONV_U8)
8858 sp [-1]->inst_c0 = (guint32)data;
8860 sp [-1]->inst_c0 = data;
8862 sp [-1]->inst_ls_word = data;
8863 if ((*ip) == CEE_CONV_U8)
8864 sp [-1]->inst_ms_word = 0;
8866 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
8868 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
8875 case CEE_CONV_OVF_I4:
8876 case CEE_CONV_OVF_I1:
8877 case CEE_CONV_OVF_I2:
8878 case CEE_CONV_OVF_I:
8879 case CEE_CONV_OVF_U:
8882 if (sp [-1]->type == STACK_R8) {
8883 ADD_UNOP (CEE_CONV_OVF_I8);
8890 case CEE_CONV_OVF_U1:
8891 case CEE_CONV_OVF_U2:
8892 case CEE_CONV_OVF_U4:
8895 if (sp [-1]->type == STACK_R8) {
8896 ADD_UNOP (CEE_CONV_OVF_U8);
8903 case CEE_CONV_OVF_I1_UN:
8904 case CEE_CONV_OVF_I2_UN:
8905 case CEE_CONV_OVF_I4_UN:
8906 case CEE_CONV_OVF_I8_UN:
8907 case CEE_CONV_OVF_U1_UN:
8908 case CEE_CONV_OVF_U2_UN:
8909 case CEE_CONV_OVF_U4_UN:
8910 case CEE_CONV_OVF_U8_UN:
8911 case CEE_CONV_OVF_I_UN:
8912 case CEE_CONV_OVF_U_UN:
8919 CHECK_CFG_EXCEPTION;
8923 case CEE_ADD_OVF_UN:
8925 case CEE_MUL_OVF_UN:
8927 case CEE_SUB_OVF_UN:
8933 GSHAREDVT_FAILURE (*ip);
8936 token = read32 (ip + 1);
8937 klass = mini_get_class (method, token, generic_context);
8938 CHECK_TYPELOAD (klass);
8940 if (generic_class_is_reference_type (cfg, klass)) {
8941 MonoInst *store, *load;
8942 int dreg = alloc_ireg_ref (cfg);
8944 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
8945 load->flags |= ins_flag;
8946 MONO_ADD_INS (cfg->cbb, load);
8948 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
8949 store->flags |= ins_flag;
8950 MONO_ADD_INS (cfg->cbb, store);
8952 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
8953 emit_write_barrier (cfg, sp [0], sp [1]);
8955 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
8967 token = read32 (ip + 1);
8968 klass = mini_get_class (method, token, generic_context);
8969 CHECK_TYPELOAD (klass);
8971 /* Optimize the common ldobj+stloc combination */
8981 loc_index = ip [5] - CEE_STLOC_0;
8988 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
8989 CHECK_LOCAL (loc_index);
8991 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
8992 ins->dreg = cfg->locals [loc_index]->dreg;
8998 /* Optimize the ldobj+stobj combination */
8999 /* The reference case ends up being a load+store anyway */
9000 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
9005 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
9012 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9021 CHECK_STACK_OVF (1);
9023 n = read32 (ip + 1);
9025 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
9026 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
9027 ins->type = STACK_OBJ;
9030 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
9031 MonoInst *iargs [1];
9033 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
9034 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
9036 if (cfg->opt & MONO_OPT_SHARED) {
9037 MonoInst *iargs [3];
9039 if (cfg->compile_aot) {
9040 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
9042 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9043 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
9044 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
9045 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
9046 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
9048 if (bblock->out_of_line) {
9049 MonoInst *iargs [2];
9051 if (image == mono_defaults.corlib) {
9053 * Avoid relocations in AOT and save some space by using a
9054 * version of helper_ldstr specialized to mscorlib.
9056 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
9057 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
9059 /* Avoid creating the string object */
9060 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9061 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
9062 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
9066 if (cfg->compile_aot) {
9067 NEW_LDSTRCONST (cfg, ins, image, n);
9069 MONO_ADD_INS (bblock, ins);
9072 NEW_PCONST (cfg, ins, NULL);
9073 ins->type = STACK_OBJ;
9074 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
9076 OUT_OF_MEMORY_FAILURE;
9079 MONO_ADD_INS (bblock, ins);
9088 MonoInst *iargs [2];
9089 MonoMethodSignature *fsig;
9092 MonoInst *vtable_arg = NULL;
9095 token = read32 (ip + 1);
9096 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
9097 if (!cmethod || mono_loader_get_last_error ())
9099 fsig = mono_method_get_signature (cmethod, image, token);
9103 mono_save_token_info (cfg, image, token, cmethod);
9105 if (!mono_class_init (cmethod->klass))
9106 TYPE_LOAD_ERROR (cmethod->klass);
9108 context_used = mini_method_check_context_used (cfg, cmethod);
9110 if (mono_security_cas_enabled ()) {
9111 if (check_linkdemand (cfg, method, cmethod))
9112 INLINE_FAILURE ("linkdemand");
9113 CHECK_CFG_EXCEPTION;
9114 } else if (mono_security_core_clr_enabled ()) {
9115 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9118 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9119 emit_generic_class_init (cfg, cmethod->klass);
9120 CHECK_TYPELOAD (cmethod->klass);
9124 if (cfg->gsharedvt) {
9125 if (mini_is_gsharedvt_variable_signature (sig))
9126 GSHAREDVT_FAILURE (*ip);
9130 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
9131 mono_method_is_generic_sharable (cmethod, TRUE)) {
9132 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
9133 mono_class_vtable (cfg->domain, cmethod->klass);
9134 CHECK_TYPELOAD (cmethod->klass);
9136 vtable_arg = emit_get_rgctx_method (cfg, context_used,
9137 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9140 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
9141 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9143 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9145 CHECK_TYPELOAD (cmethod->klass);
9146 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9151 n = fsig->param_count;
9155 * Generate smaller code for the common newobj <exception> instruction in
9156 * argument checking code.
9158 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
9159 is_exception_class (cmethod->klass) && n <= 2 &&
9160 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
9161 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
9162 MonoInst *iargs [3];
9164 g_assert (!vtable_arg);
9168 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
9171 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
9175 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
9180 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
9183 g_assert_not_reached ();
9191 /* move the args to allow room for 'this' in the first position */
9197 /* check_call_signature () requires sp[0] to be set */
9198 this_ins.type = STACK_OBJ;
9200 if (check_call_signature (cfg, fsig, sp))
9205 if (mini_class_is_system_array (cmethod->klass)) {
9206 g_assert (!vtable_arg);
9208 *sp = emit_get_rgctx_method (cfg, context_used,
9209 cmethod, MONO_RGCTX_INFO_METHOD);
9211 /* Avoid varargs in the common case */
9212 if (fsig->param_count == 1)
9213 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
9214 else if (fsig->param_count == 2)
9215 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
9216 else if (fsig->param_count == 3)
9217 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
9218 else if (fsig->param_count == 4)
9219 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
9221 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
9222 } else if (cmethod->string_ctor) {
9223 g_assert (!context_used);
9224 g_assert (!vtable_arg);
9225 /* we simply pass a null pointer */
9226 EMIT_NEW_PCONST (cfg, *sp, NULL);
9227 /* now call the string ctor */
9228 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
9230 MonoInst* callvirt_this_arg = NULL;
9232 if (cmethod->klass->valuetype) {
9233 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
9234 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
9235 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
9240 * The code generated by mini_emit_virtual_call () expects
9241 * iargs [0] to be a boxed instance, but luckily the vcall
9242 * will be transformed into a normal call there.
9244 } else if (context_used) {
9245 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
9248 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9250 CHECK_TYPELOAD (cmethod->klass);
9253 * TypeInitializationExceptions thrown from the mono_runtime_class_init
9254 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
9255 * As a workaround, we call class cctors before allocating objects.
9257 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
9258 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
9259 if (cfg->verbose_level > 2)
9260 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
9261 class_inits = g_slist_prepend (class_inits, vtable);
9264 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
9267 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
9270 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
9272 /* Now call the actual ctor */
9273 /* Avoid virtual calls to ctors if possible */
9274 if (mono_class_is_marshalbyref (cmethod->klass))
9275 callvirt_this_arg = sp [0];
9278 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
9279 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9280 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9285 CHECK_CFG_EXCEPTION;
9286 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
9287 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
9288 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
9289 !g_list_find (dont_inline, cmethod)) {
9292 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
9293 cfg->real_offset += 5;
9296 inline_costs += costs - 5;
9298 INLINE_FAILURE ("inline failure");
9299 // FIXME-VT: Clean this up
9300 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
9301 GSHAREDVT_FAILURE(*ip);
9302 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
9304 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
9307 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
9308 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
9309 } else if (context_used &&
9310 (!mono_method_is_generic_sharable (cmethod, TRUE) ||
9311 !mono_class_generic_sharing_enabled (cmethod->klass))) {
9312 MonoInst *cmethod_addr;
9314 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
9315 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9317 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
9319 INLINE_FAILURE ("ctor call");
9320 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
9321 callvirt_this_arg, NULL, vtable_arg);
9325 if (alloc == NULL) {
9327 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
9328 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
9342 token = read32 (ip + 1);
9343 klass = mini_get_class (method, token, generic_context);
9344 CHECK_TYPELOAD (klass);
9345 if (sp [0]->type != STACK_OBJ)
9348 context_used = mini_class_check_context_used (cfg, klass);
9350 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9351 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
9358 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9361 if (cfg->compile_aot)
9362 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9364 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9366 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
9367 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
9370 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9371 MonoMethod *mono_castclass;
9372 MonoInst *iargs [1];
9375 mono_castclass = mono_marshal_get_castclass (klass);
9378 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
9379 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9380 CHECK_CFG_EXCEPTION;
9381 g_assert (costs > 0);
9384 cfg->real_offset += 5;
9389 inline_costs += costs;
9392 ins = handle_castclass (cfg, klass, *sp, context_used);
9393 CHECK_CFG_EXCEPTION;
9403 token = read32 (ip + 1);
9404 klass = mini_get_class (method, token, generic_context);
9405 CHECK_TYPELOAD (klass);
9406 if (sp [0]->type != STACK_OBJ)
9409 context_used = mini_class_check_context_used (cfg, klass);
9411 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9412 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
9419 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9422 if (cfg->compile_aot)
9423 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9425 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9427 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
9430 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9431 MonoMethod *mono_isinst;
9432 MonoInst *iargs [1];
9435 mono_isinst = mono_marshal_get_isinst (klass);
9438 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
9439 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9440 CHECK_CFG_EXCEPTION;
9441 g_assert (costs > 0);
9444 cfg->real_offset += 5;
9449 inline_costs += costs;
9452 ins = handle_isinst (cfg, klass, *sp, context_used);
9453 CHECK_CFG_EXCEPTION;
9460 case CEE_UNBOX_ANY: {
9464 token = read32 (ip + 1);
9465 klass = mini_get_class (method, token, generic_context);
9466 CHECK_TYPELOAD (klass);
9468 mono_save_token_info (cfg, image, token, klass);
9470 context_used = mini_class_check_context_used (cfg, klass);
9472 if (mini_is_gsharedvt_klass (cfg, klass)) {
9473 *sp = handle_unbox_gsharedvt (cfg, context_used, klass, *sp, &bblock);
9481 if (generic_class_is_reference_type (cfg, klass)) {
9482 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
9483 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9484 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
9491 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9494 /*FIXME AOT support*/
9495 if (cfg->compile_aot)
9496 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9498 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9500 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
9501 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
9504 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9505 MonoMethod *mono_castclass;
9506 MonoInst *iargs [1];
9509 mono_castclass = mono_marshal_get_castclass (klass);
9512 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
9513 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9514 CHECK_CFG_EXCEPTION;
9515 g_assert (costs > 0);
9518 cfg->real_offset += 5;
9522 inline_costs += costs;
9524 ins = handle_castclass (cfg, klass, *sp, context_used);
9525 CHECK_CFG_EXCEPTION;
9533 if (mono_class_is_nullable (klass)) {
9534 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
9541 ins = handle_unbox (cfg, klass, sp, context_used);
9547 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9560 token = read32 (ip + 1);
9561 klass = mini_get_class (method, token, generic_context);
9562 CHECK_TYPELOAD (klass);
9564 mono_save_token_info (cfg, image, token, klass);
9566 context_used = mini_class_check_context_used (cfg, klass);
9568 if (generic_class_is_reference_type (cfg, klass)) {
9574 if (klass == mono_defaults.void_class)
9576 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
9578 /* frequent check in generic code: box (struct), brtrue */
9580 // FIXME: LLVM can't handle the inconsistent bb linking
9581 if (!mono_class_is_nullable (klass) &&
9582 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
9583 (ip [5] == CEE_BRTRUE ||
9584 ip [5] == CEE_BRTRUE_S ||
9585 ip [5] == CEE_BRFALSE ||
9586 ip [5] == CEE_BRFALSE_S)) {
9587 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
9589 MonoBasicBlock *true_bb, *false_bb;
9593 if (cfg->verbose_level > 3) {
9594 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9595 printf ("<box+brtrue opt>\n");
9603 target = ip + 1 + (signed char)(*ip);
9610 target = ip + 4 + (gint)(read32 (ip));
9614 g_assert_not_reached ();
9618 * We need to link both bblocks, since it is needed for handling stack
9619 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
9620 * Branching to only one of them would lead to inconsistencies, so
9621 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
9623 GET_BBLOCK (cfg, true_bb, target);
9624 GET_BBLOCK (cfg, false_bb, ip);
9626 mono_link_bblock (cfg, cfg->cbb, true_bb);
9627 mono_link_bblock (cfg, cfg->cbb, false_bb);
9629 if (sp != stack_start) {
9630 handle_stack_args (cfg, stack_start, sp - stack_start);
9632 CHECK_UNVERIFIABLE (cfg);
9635 if (COMPILE_LLVM (cfg)) {
9636 dreg = alloc_ireg (cfg);
9637 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
9638 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
9640 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
9642 /* The JIT can't eliminate the iconst+compare */
9643 MONO_INST_NEW (cfg, ins, OP_BR);
9644 ins->inst_target_bb = is_true ? true_bb : false_bb;
9645 MONO_ADD_INS (cfg->cbb, ins);
9648 start_new_bblock = 1;
9652 *sp++ = handle_box (cfg, val, klass, context_used, &bblock);
9654 CHECK_CFG_EXCEPTION;
9663 token = read32 (ip + 1);
9664 klass = mini_get_class (method, token, generic_context);
9665 CHECK_TYPELOAD (klass);
9667 mono_save_token_info (cfg, image, token, klass);
9669 context_used = mini_class_check_context_used (cfg, klass);
9671 if (mono_class_is_nullable (klass)) {
9674 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
9675 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
9679 ins = handle_unbox (cfg, klass, sp, context_used);
9692 MonoClassField *field;
9693 #ifndef DISABLE_REMOTING
9697 gboolean is_instance;
9699 gpointer addr = NULL;
9700 gboolean is_special_static;
9702 MonoInst *store_val = NULL;
9705 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
9707 if (op == CEE_STFLD) {
9715 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
9717 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
9720 if (op == CEE_STSFLD) {
9728 token = read32 (ip + 1);
9729 if (method->wrapper_type != MONO_WRAPPER_NONE) {
9730 field = mono_method_get_wrapper_data (method, token);
9731 klass = field->parent;
9734 field = mono_field_from_token (image, token, &klass, generic_context);
9738 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
9739 FIELD_ACCESS_FAILURE;
9740 mono_class_init (klass);
9742 if (is_instance && *ip != CEE_LDFLDA && is_magic_tls_access (field))
9745 /* if the class is Critical then transparent code cannot access it's fields */
9746 if (!is_instance && mono_security_core_clr_enabled ())
9747 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
9749 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
9750 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
9751 if (mono_security_core_clr_enabled ())
9752 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
9756 * LDFLD etc. is usable on static fields as well, so convert those cases to
9759 if (is_instance && field->type->attrs & FIELD_ATTRIBUTE_STATIC) {
9771 g_assert_not_reached ();
9773 is_instance = FALSE;
9776 context_used = mini_class_check_context_used (cfg, klass);
9780 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
9781 if (op == CEE_STFLD) {
9782 if (target_type_is_incompatible (cfg, field->type, sp [1]))
9784 #ifndef DISABLE_REMOTING
9785 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
9786 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
9787 MonoInst *iargs [5];
9789 GSHAREDVT_FAILURE (op);
9792 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9793 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
9794 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
9798 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
9799 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
9800 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9801 CHECK_CFG_EXCEPTION;
9802 g_assert (costs > 0);
9804 cfg->real_offset += 5;
9807 inline_costs += costs;
9809 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
9816 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
9818 if (mini_is_gsharedvt_klass (cfg, klass)) {
9819 MonoInst *offset_ins;
9821 context_used = mini_class_check_context_used (cfg, klass);
9823 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9824 dreg = alloc_ireg_mp (cfg);
9825 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
9826 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
9827 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
9829 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
9831 if (sp [0]->opcode != OP_LDADDR)
9832 store->flags |= MONO_INST_FAULT;
9834 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
9835 /* insert call to write barrier */
9839 dreg = alloc_ireg_mp (cfg);
9840 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
9841 emit_write_barrier (cfg, ptr, sp [1]);
9844 store->flags |= ins_flag;
9851 #ifndef DISABLE_REMOTING
9852 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
9853 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
9854 MonoInst *iargs [4];
9856 GSHAREDVT_FAILURE (op);
9859 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9860 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
9861 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
9862 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
9863 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
9864 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9865 CHECK_CFG_EXCEPTION;
9867 g_assert (costs > 0);
9869 cfg->real_offset += 5;
9873 inline_costs += costs;
9875 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
9881 if (sp [0]->type == STACK_VTYPE) {
9884 /* Have to compute the address of the variable */
9886 var = get_vreg_to_inst (cfg, sp [0]->dreg);
9888 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
9890 g_assert (var->klass == klass);
9892 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
9896 if (op == CEE_LDFLDA) {
9897 if (is_magic_tls_access (field)) {
9898 GSHAREDVT_FAILURE (*ip);
9900 *sp++ = create_magic_tls_access (cfg, field, &cached_tls_addr, ins);
9902 if (sp [0]->type == STACK_OBJ) {
9903 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
9904 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
9907 dreg = alloc_ireg_mp (cfg);
9909 if (mini_is_gsharedvt_klass (cfg, klass)) {
9910 MonoInst *offset_ins;
9912 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9913 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
9915 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
9917 ins->klass = mono_class_from_mono_type (field->type);
9918 ins->type = STACK_MP;
9924 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
9926 if (mini_is_gsharedvt_klass (cfg, klass)) {
9927 MonoInst *offset_ins;
9929 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9930 dreg = alloc_ireg_mp (cfg);
9931 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
9932 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
9934 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
9936 load->flags |= ins_flag;
9937 if (sp [0]->opcode != OP_LDADDR)
9938 load->flags |= MONO_INST_FAULT;
9952 * We can only support shared generic static
9953 * field access on architectures where the
9954 * trampoline code has been extended to handle
9955 * the generic class init.
9957 #ifndef MONO_ARCH_VTABLE_REG
9958 GENERIC_SHARING_FAILURE (op);
9961 context_used = mini_class_check_context_used (cfg, klass);
9963 ftype = mono_field_get_type (field);
9965 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
9968 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
9969 * to be called here.
9971 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
9972 mono_class_vtable (cfg->domain, klass);
9973 CHECK_TYPELOAD (klass);
9975 mono_domain_lock (cfg->domain);
9976 if (cfg->domain->special_static_fields)
9977 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
9978 mono_domain_unlock (cfg->domain);
9980 is_special_static = mono_class_field_is_special_static (field);
9982 /* Generate IR to compute the field address */
9983 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
9985 * Fast access to TLS data
9986 * Inline version of get_thread_static_data () in
9990 int idx, static_data_reg, array_reg, dreg;
9991 MonoInst *thread_ins;
9993 GSHAREDVT_FAILURE (op);
9995 // offset &= 0x7fffffff;
9996 // idx = (offset >> 24) - 1;
9997 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
9999 thread_ins = mono_get_thread_intrinsic (cfg);
10000 MONO_ADD_INS (cfg->cbb, thread_ins);
10001 static_data_reg = alloc_ireg (cfg);
10002 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
10004 if (cfg->compile_aot) {
10005 int offset_reg, offset2_reg, idx_reg;
10007 /* For TLS variables, this will return the TLS offset */
10008 EMIT_NEW_SFLDACONST (cfg, ins, field);
10009 offset_reg = ins->dreg;
10010 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
10011 idx_reg = alloc_ireg (cfg);
10012 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
10013 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
10014 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
10015 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
10016 array_reg = alloc_ireg (cfg);
10017 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
10018 offset2_reg = alloc_ireg (cfg);
10019 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
10020 dreg = alloc_ireg (cfg);
10021 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
10023 offset = (gsize)addr & 0x7fffffff;
10024 idx = (offset >> 24) - 1;
10026 array_reg = alloc_ireg (cfg);
10027 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
10028 dreg = alloc_ireg (cfg);
10029 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
10031 } else if ((cfg->opt & MONO_OPT_SHARED) ||
10032 (cfg->compile_aot && is_special_static) ||
10033 (context_used && is_special_static)) {
10034 MonoInst *iargs [2];
10036 g_assert (field->parent);
10037 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10038 if (context_used) {
10039 iargs [1] = emit_get_rgctx_field (cfg, context_used,
10040 field, MONO_RGCTX_INFO_CLASS_FIELD);
10042 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10044 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10045 } else if (context_used) {
10046 MonoInst *static_data;
10049 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
10050 method->klass->name_space, method->klass->name, method->name,
10051 depth, field->offset);
10054 if (mono_class_needs_cctor_run (klass, method))
10055 emit_generic_class_init (cfg, klass);
10058 * The pointer we're computing here is
10060 * super_info.static_data + field->offset
10062 static_data = emit_get_rgctx_klass (cfg, context_used,
10063 klass, MONO_RGCTX_INFO_STATIC_DATA);
10065 if (mini_is_gsharedvt_klass (cfg, klass)) {
10066 MonoInst *offset_ins;
10068 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10069 dreg = alloc_ireg_mp (cfg);
10070 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
10071 } else if (field->offset == 0) {
10074 int addr_reg = mono_alloc_preg (cfg);
10075 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
10077 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
10078 MonoInst *iargs [2];
10080 g_assert (field->parent);
10081 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10082 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10083 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10085 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
10087 CHECK_TYPELOAD (klass);
10089 if (mini_field_access_needs_cctor_run (cfg, method, vtable)) {
10090 if (!(g_slist_find (class_inits, vtable))) {
10091 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
10092 if (cfg->verbose_level > 2)
10093 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
10094 class_inits = g_slist_prepend (class_inits, vtable);
10097 if (cfg->run_cctors) {
10099 /* This makes so that inline cannot trigger */
10100 /* .cctors: too many apps depend on them */
10101 /* running with a specific order... */
10102 if (! vtable->initialized)
10103 INLINE_FAILURE ("class init");
10104 ex = mono_runtime_class_init_full (vtable, FALSE);
10106 set_exception_object (cfg, ex);
10107 goto exception_exit;
10111 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10113 if (cfg->compile_aot)
10114 EMIT_NEW_SFLDACONST (cfg, ins, field);
10116 EMIT_NEW_PCONST (cfg, ins, addr);
10118 MonoInst *iargs [1];
10119 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
10120 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
10124 /* Generate IR to do the actual load/store operation */
10126 if (op == CEE_LDSFLDA) {
10127 ins->klass = mono_class_from_mono_type (ftype);
10128 ins->type = STACK_PTR;
10130 } else if (op == CEE_STSFLD) {
10133 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
10134 store->flags |= ins_flag;
10136 gboolean is_const = FALSE;
10137 MonoVTable *vtable = NULL;
10138 gpointer addr = NULL;
10140 if (!context_used) {
10141 vtable = mono_class_vtable (cfg->domain, klass);
10142 CHECK_TYPELOAD (klass);
10144 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
10145 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
10146 int ro_type = ftype->type;
10148 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10149 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
10150 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
10153 GSHAREDVT_FAILURE (op);
10155 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
10158 case MONO_TYPE_BOOLEAN:
10160 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
10164 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
10167 case MONO_TYPE_CHAR:
10169 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
10173 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
10178 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
10182 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
10187 case MONO_TYPE_PTR:
10188 case MONO_TYPE_FNPTR:
10189 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10190 type_to_eval_stack_type ((cfg), field->type, *sp);
10193 case MONO_TYPE_STRING:
10194 case MONO_TYPE_OBJECT:
10195 case MONO_TYPE_CLASS:
10196 case MONO_TYPE_SZARRAY:
10197 case MONO_TYPE_ARRAY:
10198 if (!mono_gc_is_moving ()) {
10199 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10200 type_to_eval_stack_type ((cfg), field->type, *sp);
10208 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
10213 case MONO_TYPE_VALUETYPE:
10223 CHECK_STACK_OVF (1);
10225 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
10226 load->flags |= ins_flag;
10239 token = read32 (ip + 1);
10240 klass = mini_get_class (method, token, generic_context);
10241 CHECK_TYPELOAD (klass);
10242 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
10243 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
10244 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
10245 generic_class_is_reference_type (cfg, klass)) {
10246 /* insert call to write barrier */
10247 emit_write_barrier (cfg, sp [0], sp [1]);
10259 const char *data_ptr;
10261 guint32 field_token;
10267 token = read32 (ip + 1);
10269 klass = mini_get_class (method, token, generic_context);
10270 CHECK_TYPELOAD (klass);
10272 context_used = mini_class_check_context_used (cfg, klass);
10274 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
10275 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
10276 ins->sreg1 = sp [0]->dreg;
10277 ins->type = STACK_I4;
10278 ins->dreg = alloc_ireg (cfg);
10279 MONO_ADD_INS (cfg->cbb, ins);
10280 *sp = mono_decompose_opcode (cfg, ins);
10283 if (context_used) {
10284 MonoInst *args [3];
10285 MonoClass *array_class = mono_array_class_get (klass, 1);
10286 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
10288 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
10291 args [0] = emit_get_rgctx_klass (cfg, context_used,
10292 array_class, MONO_RGCTX_INFO_VTABLE);
10297 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
10299 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
10301 if (cfg->opt & MONO_OPT_SHARED) {
10302 /* Decompose now to avoid problems with references to the domainvar */
10303 MonoInst *iargs [3];
10305 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10306 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10307 iargs [2] = sp [0];
10309 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
10311 /* Decompose later since it is needed by abcrem */
10312 MonoClass *array_type = mono_array_class_get (klass, 1);
10313 mono_class_vtable (cfg->domain, array_type);
10314 CHECK_TYPELOAD (array_type);
10316 MONO_INST_NEW (cfg, ins, OP_NEWARR);
10317 ins->dreg = alloc_ireg_ref (cfg);
10318 ins->sreg1 = sp [0]->dreg;
10319 ins->inst_newa_class = klass;
10320 ins->type = STACK_OBJ;
10321 ins->klass = array_type;
10322 MONO_ADD_INS (cfg->cbb, ins);
10323 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10324 cfg->cbb->has_array_access = TRUE;
10326 /* Needed so mono_emit_load_get_addr () gets called */
10327 mono_get_got_var (cfg);
10337 * we inline/optimize the initialization sequence if possible.
10338 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
10339 * for small sizes open code the memcpy
10340 * ensure the rva field is big enough
10342 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
10343 MonoMethod *memcpy_method = get_memcpy_method ();
10344 MonoInst *iargs [3];
10345 int add_reg = alloc_ireg_mp (cfg);
10347 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
10348 if (cfg->compile_aot) {
10349 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
10351 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
10353 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
10354 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
10363 if (sp [0]->type != STACK_OBJ)
10366 MONO_INST_NEW (cfg, ins, OP_LDLEN);
10367 ins->dreg = alloc_preg (cfg);
10368 ins->sreg1 = sp [0]->dreg;
10369 ins->type = STACK_I4;
10370 /* This flag will be inherited by the decomposition */
10371 ins->flags |= MONO_INST_FAULT;
10372 MONO_ADD_INS (cfg->cbb, ins);
10373 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10374 cfg->cbb->has_array_access = TRUE;
10382 if (sp [0]->type != STACK_OBJ)
10385 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10387 klass = mini_get_class (method, read32 (ip + 1), generic_context);
10388 CHECK_TYPELOAD (klass);
10389 /* we need to make sure that this array is exactly the type it needs
10390 * to be for correctness. the wrappers are lax with their usage
10391 * so we need to ignore them here
10393 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
10394 MonoClass *array_class = mono_array_class_get (klass, 1);
10395 mini_emit_check_array_type (cfg, sp [0], array_class);
10396 CHECK_TYPELOAD (array_class);
10400 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10405 case CEE_LDELEM_I1:
10406 case CEE_LDELEM_U1:
10407 case CEE_LDELEM_I2:
10408 case CEE_LDELEM_U2:
10409 case CEE_LDELEM_I4:
10410 case CEE_LDELEM_U4:
10411 case CEE_LDELEM_I8:
10413 case CEE_LDELEM_R4:
10414 case CEE_LDELEM_R8:
10415 case CEE_LDELEM_REF: {
10421 if (*ip == CEE_LDELEM) {
10423 token = read32 (ip + 1);
10424 klass = mini_get_class (method, token, generic_context);
10425 CHECK_TYPELOAD (klass);
10426 mono_class_init (klass);
10429 klass = array_access_to_klass (*ip);
10431 if (sp [0]->type != STACK_OBJ)
10434 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10436 if (mini_is_gsharedvt_klass (cfg, klass)) {
10437 // FIXME-VT: OP_ICONST optimization
10438 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10439 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10440 ins->opcode = OP_LOADV_MEMBASE;
10441 } else if (sp [1]->opcode == OP_ICONST) {
10442 int array_reg = sp [0]->dreg;
10443 int index_reg = sp [1]->dreg;
10444 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
10446 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
10447 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
10449 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10450 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10453 if (*ip == CEE_LDELEM)
10460 case CEE_STELEM_I1:
10461 case CEE_STELEM_I2:
10462 case CEE_STELEM_I4:
10463 case CEE_STELEM_I8:
10464 case CEE_STELEM_R4:
10465 case CEE_STELEM_R8:
10466 case CEE_STELEM_REF:
10471 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10473 if (*ip == CEE_STELEM) {
10475 token = read32 (ip + 1);
10476 klass = mini_get_class (method, token, generic_context);
10477 CHECK_TYPELOAD (klass);
10478 mono_class_init (klass);
10481 klass = array_access_to_klass (*ip);
10483 if (sp [0]->type != STACK_OBJ)
10486 emit_array_store (cfg, klass, sp, TRUE);
10488 if (*ip == CEE_STELEM)
10495 case CEE_CKFINITE: {
10499 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
10500 ins->sreg1 = sp [0]->dreg;
10501 ins->dreg = alloc_freg (cfg);
10502 ins->type = STACK_R8;
10503 MONO_ADD_INS (bblock, ins);
10505 *sp++ = mono_decompose_opcode (cfg, ins);
10510 case CEE_REFANYVAL: {
10511 MonoInst *src_var, *src;
10513 int klass_reg = alloc_preg (cfg);
10514 int dreg = alloc_preg (cfg);
10516 GSHAREDVT_FAILURE (*ip);
10519 MONO_INST_NEW (cfg, ins, *ip);
10522 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
10523 CHECK_TYPELOAD (klass);
10524 mono_class_init (klass);
10526 context_used = mini_class_check_context_used (cfg, klass);
10529 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10531 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
10532 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
10533 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
10535 if (context_used) {
10536 MonoInst *klass_ins;
10538 klass_ins = emit_get_rgctx_klass (cfg, context_used,
10539 klass, MONO_RGCTX_INFO_KLASS);
10542 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
10543 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
10545 mini_emit_class_check (cfg, klass_reg, klass);
10547 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
10548 ins->type = STACK_MP;
10553 case CEE_MKREFANY: {
10554 MonoInst *loc, *addr;
10556 GSHAREDVT_FAILURE (*ip);
10559 MONO_INST_NEW (cfg, ins, *ip);
10562 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
10563 CHECK_TYPELOAD (klass);
10564 mono_class_init (klass);
10566 context_used = mini_class_check_context_used (cfg, klass);
10568 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
10569 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
10571 if (context_used) {
10572 MonoInst *const_ins;
10573 int type_reg = alloc_preg (cfg);
10575 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
10576 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
10577 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
10578 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
10579 } else if (cfg->compile_aot) {
10580 int const_reg = alloc_preg (cfg);
10581 int type_reg = alloc_preg (cfg);
10583 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
10584 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
10585 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
10586 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
10588 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
10589 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
10591 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
10593 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
10594 ins->type = STACK_VTYPE;
10595 ins->klass = mono_defaults.typed_reference_class;
10600 case CEE_LDTOKEN: {
10602 MonoClass *handle_class;
10604 CHECK_STACK_OVF (1);
10607 n = read32 (ip + 1);
10609 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
10610 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
10611 handle = mono_method_get_wrapper_data (method, n);
10612 handle_class = mono_method_get_wrapper_data (method, n + 1);
10613 if (handle_class == mono_defaults.typehandle_class)
10614 handle = &((MonoClass*)handle)->byval_arg;
10617 handle = mono_ldtoken (image, n, &handle_class, generic_context);
10621 mono_class_init (handle_class);
10622 if (cfg->generic_sharing_context) {
10623 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
10624 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
10625 /* This case handles ldtoken
10626 of an open type, like for
10629 } else if (handle_class == mono_defaults.typehandle_class) {
10630 /* If we get a MONO_TYPE_CLASS
10631 then we need to provide the
10633 instantiation of it. */
10634 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
10637 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type (handle));
10638 } else if (handle_class == mono_defaults.fieldhandle_class)
10639 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
10640 else if (handle_class == mono_defaults.methodhandle_class)
10641 context_used = mini_method_check_context_used (cfg, handle);
10643 g_assert_not_reached ();
10646 if ((cfg->opt & MONO_OPT_SHARED) &&
10647 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
10648 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
10649 MonoInst *addr, *vtvar, *iargs [3];
10650 int method_context_used;
10652 method_context_used = mini_method_check_context_used (cfg, method);
10654 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
10656 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10657 EMIT_NEW_ICONST (cfg, iargs [1], n);
10658 if (method_context_used) {
10659 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
10660 method, MONO_RGCTX_INFO_METHOD);
10661 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
10663 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
10664 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
10666 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
10668 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
10670 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
10672 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
10673 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
10674 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
10675 (cmethod->klass == mono_defaults.monotype_class->parent) &&
10676 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
10677 MonoClass *tclass = mono_class_from_mono_type (handle);
10679 mono_class_init (tclass);
10680 if (context_used) {
10681 ins = emit_get_rgctx_klass (cfg, context_used,
10682 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
10683 } else if (cfg->compile_aot) {
10684 if (method->wrapper_type) {
10685 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
10686 /* Special case for static synchronized wrappers */
10687 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
10689 /* FIXME: n is not a normal token */
10690 cfg->disable_aot = TRUE;
10691 EMIT_NEW_PCONST (cfg, ins, NULL);
10694 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
10697 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
10699 ins->type = STACK_OBJ;
10700 ins->klass = cmethod->klass;
10703 MonoInst *addr, *vtvar;
10705 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
10707 if (context_used) {
10708 if (handle_class == mono_defaults.typehandle_class) {
10709 ins = emit_get_rgctx_klass (cfg, context_used,
10710 mono_class_from_mono_type (handle),
10711 MONO_RGCTX_INFO_TYPE);
10712 } else if (handle_class == mono_defaults.methodhandle_class) {
10713 ins = emit_get_rgctx_method (cfg, context_used,
10714 handle, MONO_RGCTX_INFO_METHOD);
10715 } else if (handle_class == mono_defaults.fieldhandle_class) {
10716 ins = emit_get_rgctx_field (cfg, context_used,
10717 handle, MONO_RGCTX_INFO_CLASS_FIELD);
10719 g_assert_not_reached ();
10721 } else if (cfg->compile_aot) {
10722 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
10724 EMIT_NEW_PCONST (cfg, ins, handle);
10726 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
10727 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
10728 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
10738 MONO_INST_NEW (cfg, ins, OP_THROW);
10740 ins->sreg1 = sp [0]->dreg;
10742 bblock->out_of_line = TRUE;
10743 MONO_ADD_INS (bblock, ins);
10744 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
10745 MONO_ADD_INS (bblock, ins);
10748 link_bblock (cfg, bblock, end_bblock);
10749 start_new_bblock = 1;
10751 case CEE_ENDFINALLY:
10752 /* mono_save_seq_point_info () depends on this */
10753 if (sp != stack_start)
10754 emit_seq_point (cfg, method, ip, FALSE);
10755 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
10756 MONO_ADD_INS (bblock, ins);
10758 start_new_bblock = 1;
10761 * Control will leave the method so empty the stack, otherwise
10762 * the next basic block will start with a nonempty stack.
10764 while (sp != stack_start) {
10769 case CEE_LEAVE_S: {
10772 if (*ip == CEE_LEAVE) {
10774 target = ip + 5 + (gint32)read32(ip + 1);
10777 target = ip + 2 + (signed char)(ip [1]);
10780 /* empty the stack */
10781 while (sp != stack_start) {
10786 * If this leave statement is in a catch block, check for a
10787 * pending exception, and rethrow it if necessary.
10788 * We avoid doing this in runtime invoke wrappers, since those are called
10789 * by native code which excepts the wrapper to catch all exceptions.
10791 for (i = 0; i < header->num_clauses; ++i) {
10792 MonoExceptionClause *clause = &header->clauses [i];
10795 * Use <= in the final comparison to handle clauses with multiple
10796 * leave statements, like in bug #78024.
10797 * The ordering of the exception clauses guarantees that we find the
10798 * innermost clause.
10800 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
10802 MonoBasicBlock *dont_throw;
10807 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
10810 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
10812 NEW_BBLOCK (cfg, dont_throw);
10815 * Currently, we always rethrow the abort exception, despite the
10816 * fact that this is not correct. See thread6.cs for an example.
10817 * But propagating the abort exception is more important than
10818 * getting the sematics right.
10820 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
10821 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
10822 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
10824 MONO_START_BB (cfg, dont_throw);
10829 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
10831 MonoExceptionClause *clause;
10833 for (tmp = handlers; tmp; tmp = tmp->next) {
10834 clause = tmp->data;
10835 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
10837 link_bblock (cfg, bblock, tblock);
10838 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
10839 ins->inst_target_bb = tblock;
10840 ins->inst_eh_block = clause;
10841 MONO_ADD_INS (bblock, ins);
10842 bblock->has_call_handler = 1;
10843 if (COMPILE_LLVM (cfg)) {
10844 MonoBasicBlock *target_bb;
10847 * Link the finally bblock with the target, since it will
10848 * conceptually branch there.
10849 * FIXME: Have to link the bblock containing the endfinally.
10851 GET_BBLOCK (cfg, target_bb, target);
10852 link_bblock (cfg, tblock, target_bb);
10855 g_list_free (handlers);
10858 MONO_INST_NEW (cfg, ins, OP_BR);
10859 MONO_ADD_INS (bblock, ins);
10860 GET_BBLOCK (cfg, tblock, target);
10861 link_bblock (cfg, bblock, tblock);
10862 ins->inst_target_bb = tblock;
10863 start_new_bblock = 1;
10865 if (*ip == CEE_LEAVE)
10874 * Mono specific opcodes
10876 case MONO_CUSTOM_PREFIX: {
10878 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10882 case CEE_MONO_ICALL: {
10884 MonoJitICallInfo *info;
10886 token = read32 (ip + 2);
10887 func = mono_method_get_wrapper_data (method, token);
10888 info = mono_find_jit_icall_by_addr (func);
10890 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
10893 CHECK_STACK (info->sig->param_count);
10894 sp -= info->sig->param_count;
10896 ins = mono_emit_jit_icall (cfg, info->func, sp);
10897 if (!MONO_TYPE_IS_VOID (info->sig->ret))
10901 inline_costs += 10 * num_calls++;
10905 case CEE_MONO_LDPTR: {
10908 CHECK_STACK_OVF (1);
10910 token = read32 (ip + 2);
10912 ptr = mono_method_get_wrapper_data (method, token);
10913 /* FIXME: Generalize this */
10914 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
10915 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
10920 EMIT_NEW_PCONST (cfg, ins, ptr);
10923 inline_costs += 10 * num_calls++;
10924 /* Can't embed random pointers into AOT code */
10925 cfg->disable_aot = 1;
10928 case CEE_MONO_JIT_ICALL_ADDR: {
10929 MonoJitICallInfo *callinfo;
10932 CHECK_STACK_OVF (1);
10934 token = read32 (ip + 2);
10936 ptr = mono_method_get_wrapper_data (method, token);
10937 callinfo = mono_find_jit_icall_by_addr (ptr);
10938 g_assert (callinfo);
10939 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
10942 inline_costs += 10 * num_calls++;
10945 case CEE_MONO_ICALL_ADDR: {
10946 MonoMethod *cmethod;
10949 CHECK_STACK_OVF (1);
10951 token = read32 (ip + 2);
10953 cmethod = mono_method_get_wrapper_data (method, token);
10955 if (cfg->compile_aot) {
10956 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
10958 ptr = mono_lookup_internal_call (cmethod);
10960 EMIT_NEW_PCONST (cfg, ins, ptr);
10966 case CEE_MONO_VTADDR: {
10967 MonoInst *src_var, *src;
10973 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10974 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
10979 case CEE_MONO_NEWOBJ: {
10980 MonoInst *iargs [2];
10982 CHECK_STACK_OVF (1);
10984 token = read32 (ip + 2);
10985 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
10986 mono_class_init (klass);
10987 NEW_DOMAINCONST (cfg, iargs [0]);
10988 MONO_ADD_INS (cfg->cbb, iargs [0]);
10989 NEW_CLASSCONST (cfg, iargs [1], klass);
10990 MONO_ADD_INS (cfg->cbb, iargs [1]);
10991 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
10993 inline_costs += 10 * num_calls++;
10996 case CEE_MONO_OBJADDR:
10999 MONO_INST_NEW (cfg, ins, OP_MOVE);
11000 ins->dreg = alloc_ireg_mp (cfg);
11001 ins->sreg1 = sp [0]->dreg;
11002 ins->type = STACK_MP;
11003 MONO_ADD_INS (cfg->cbb, ins);
11007 case CEE_MONO_LDNATIVEOBJ:
11009 * Similar to LDOBJ, but instead load the unmanaged
11010 * representation of the vtype to the stack.
11015 token = read32 (ip + 2);
11016 klass = mono_method_get_wrapper_data (method, token);
11017 g_assert (klass->valuetype);
11018 mono_class_init (klass);
11021 MonoInst *src, *dest, *temp;
11024 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
11025 temp->backend.is_pinvoke = 1;
11026 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
11027 mini_emit_stobj (cfg, dest, src, klass, TRUE);
11029 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
11030 dest->type = STACK_VTYPE;
11031 dest->klass = klass;
11037 case CEE_MONO_RETOBJ: {
11039 * Same as RET, but return the native representation of a vtype
11042 g_assert (cfg->ret);
11043 g_assert (mono_method_signature (method)->pinvoke);
11048 token = read32 (ip + 2);
11049 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11051 if (!cfg->vret_addr) {
11052 g_assert (cfg->ret_var_is_local);
11054 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
11056 EMIT_NEW_RETLOADA (cfg, ins);
11058 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
11060 if (sp != stack_start)
11063 MONO_INST_NEW (cfg, ins, OP_BR);
11064 ins->inst_target_bb = end_bblock;
11065 MONO_ADD_INS (bblock, ins);
11066 link_bblock (cfg, bblock, end_bblock);
11067 start_new_bblock = 1;
11071 case CEE_MONO_CISINST:
11072 case CEE_MONO_CCASTCLASS: {
11077 token = read32 (ip + 2);
11078 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11079 if (ip [1] == CEE_MONO_CISINST)
11080 ins = handle_cisinst (cfg, klass, sp [0]);
11082 ins = handle_ccastclass (cfg, klass, sp [0]);
11088 case CEE_MONO_SAVE_LMF:
11089 case CEE_MONO_RESTORE_LMF:
11090 #ifdef MONO_ARCH_HAVE_LMF_OPS
11091 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
11092 MONO_ADD_INS (bblock, ins);
11093 cfg->need_lmf_area = TRUE;
11097 case CEE_MONO_CLASSCONST:
11098 CHECK_STACK_OVF (1);
11100 token = read32 (ip + 2);
11101 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
11104 inline_costs += 10 * num_calls++;
11106 case CEE_MONO_NOT_TAKEN:
11107 bblock->out_of_line = TRUE;
11111 CHECK_STACK_OVF (1);
11113 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
11114 ins->dreg = alloc_preg (cfg);
11115 ins->inst_offset = (gint32)read32 (ip + 2);
11116 ins->type = STACK_PTR;
11117 MONO_ADD_INS (bblock, ins);
11121 case CEE_MONO_DYN_CALL: {
11122 MonoCallInst *call;
11124 /* It would be easier to call a trampoline, but that would put an
11125 * extra frame on the stack, confusing exception handling. So
11126 * implement it inline using an opcode for now.
11129 if (!cfg->dyn_call_var) {
11130 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11131 /* prevent it from being register allocated */
11132 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
11135 /* Has to use a call inst since it local regalloc expects it */
11136 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
11137 ins = (MonoInst*)call;
11139 ins->sreg1 = sp [0]->dreg;
11140 ins->sreg2 = sp [1]->dreg;
11141 MONO_ADD_INS (bblock, ins);
11143 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
11144 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
11148 inline_costs += 10 * num_calls++;
11152 case CEE_MONO_MEMORY_BARRIER: {
11154 emit_memory_barrier (cfg, (int)read32 (ip + 1));
11158 case CEE_MONO_JIT_ATTACH: {
11159 MonoInst *args [16];
11160 MonoInst *ad_ins, *lmf_ins;
11161 MonoBasicBlock *next_bb = NULL;
11163 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11165 EMIT_NEW_PCONST (cfg, ins, NULL);
11166 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11172 ad_ins = mono_get_domain_intrinsic (cfg);
11173 lmf_ins = mono_get_lmf_intrinsic (cfg);
11176 if (MONO_ARCH_HAVE_TLS_GET && ad_ins && lmf_ins) {
11177 NEW_BBLOCK (cfg, next_bb);
11179 MONO_ADD_INS (cfg->cbb, ad_ins);
11180 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ad_ins->dreg, 0);
11181 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
11183 MONO_ADD_INS (cfg->cbb, lmf_ins);
11184 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, lmf_ins->dreg, 0);
11185 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
11188 if (cfg->compile_aot) {
11189 /* AOT code is only used in the root domain */
11190 EMIT_NEW_PCONST (cfg, args [0], NULL);
11192 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
11194 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
11195 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11198 MONO_START_BB (cfg, next_bb);
11204 case CEE_MONO_JIT_DETACH: {
11205 MonoInst *args [16];
11207 /* Restore the original domain */
11208 dreg = alloc_ireg (cfg);
11209 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
11210 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
11215 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
11221 case CEE_PREFIX1: {
11224 case CEE_ARGLIST: {
11225 /* somewhat similar to LDTOKEN */
11226 MonoInst *addr, *vtvar;
11227 CHECK_STACK_OVF (1);
11228 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
11230 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11231 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
11233 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11234 ins->type = STACK_VTYPE;
11235 ins->klass = mono_defaults.argumenthandle_class;
11248 * The following transforms:
11249 * CEE_CEQ into OP_CEQ
11250 * CEE_CGT into OP_CGT
11251 * CEE_CGT_UN into OP_CGT_UN
11252 * CEE_CLT into OP_CLT
11253 * CEE_CLT_UN into OP_CLT_UN
11255 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
11257 MONO_INST_NEW (cfg, ins, cmp->opcode);
11259 cmp->sreg1 = sp [0]->dreg;
11260 cmp->sreg2 = sp [1]->dreg;
11261 type_from_op (cmp, sp [0], sp [1]);
11263 if ((sp [0]->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
11264 cmp->opcode = OP_LCOMPARE;
11265 else if (sp [0]->type == STACK_R8)
11266 cmp->opcode = OP_FCOMPARE;
11268 cmp->opcode = OP_ICOMPARE;
11269 MONO_ADD_INS (bblock, cmp);
11270 ins->type = STACK_I4;
11271 ins->dreg = alloc_dreg (cfg, ins->type);
11272 type_from_op (ins, sp [0], sp [1]);
11274 if (cmp->opcode == OP_FCOMPARE) {
11276 * The backends expect the fceq opcodes to do the
11279 cmp->opcode = OP_NOP;
11280 ins->sreg1 = cmp->sreg1;
11281 ins->sreg2 = cmp->sreg2;
11283 MONO_ADD_INS (bblock, ins);
11289 MonoInst *argconst;
11290 MonoMethod *cil_method;
11292 CHECK_STACK_OVF (1);
11294 n = read32 (ip + 2);
11295 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
11296 if (!cmethod || mono_loader_get_last_error ())
11298 mono_class_init (cmethod->klass);
11300 mono_save_token_info (cfg, image, n, cmethod);
11302 context_used = mini_method_check_context_used (cfg, cmethod);
11304 cil_method = cmethod;
11305 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
11306 METHOD_ACCESS_FAILURE;
11308 if (mono_security_cas_enabled ()) {
11309 if (check_linkdemand (cfg, method, cmethod))
11310 INLINE_FAILURE ("linkdemand");
11311 CHECK_CFG_EXCEPTION;
11312 } else if (mono_security_core_clr_enabled ()) {
11313 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
11317 * Optimize the common case of ldftn+delegate creation
11319 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
11320 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
11321 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
11322 MonoInst *target_ins;
11323 MonoMethod *invoke;
11324 int invoke_context_used;
11326 invoke = mono_get_delegate_invoke (ctor_method->klass);
11327 if (!invoke || !mono_method_signature (invoke))
11330 invoke_context_used = mini_method_check_context_used (cfg, invoke);
11332 target_ins = sp [-1];
11334 if (mono_security_core_clr_enabled ())
11335 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
11337 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
11338 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
11339 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
11340 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
11341 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
11345 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
11346 /* FIXME: SGEN support */
11347 if (invoke_context_used == 0) {
11349 if (cfg->verbose_level > 3)
11350 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
11352 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
11353 CHECK_CFG_EXCEPTION;
11362 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
11363 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
11367 inline_costs += 10 * num_calls++;
11370 case CEE_LDVIRTFTN: {
11371 MonoInst *args [2];
11375 n = read32 (ip + 2);
11376 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
11377 if (!cmethod || mono_loader_get_last_error ())
11379 mono_class_init (cmethod->klass);
11381 context_used = mini_method_check_context_used (cfg, cmethod);
11383 if (mono_security_cas_enabled ()) {
11384 if (check_linkdemand (cfg, method, cmethod))
11385 INLINE_FAILURE ("linkdemand");
11386 CHECK_CFG_EXCEPTION;
11387 } else if (mono_security_core_clr_enabled ()) {
11388 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
11394 args [1] = emit_get_rgctx_method (cfg, context_used,
11395 cmethod, MONO_RGCTX_INFO_METHOD);
11398 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
11400 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
11403 inline_costs += 10 * num_calls++;
11407 CHECK_STACK_OVF (1);
11409 n = read16 (ip + 2);
11411 EMIT_NEW_ARGLOAD (cfg, ins, n);
11416 CHECK_STACK_OVF (1);
11418 n = read16 (ip + 2);
11420 NEW_ARGLOADA (cfg, ins, n);
11421 MONO_ADD_INS (cfg->cbb, ins);
11429 n = read16 (ip + 2);
11431 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
11433 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
11437 CHECK_STACK_OVF (1);
11439 n = read16 (ip + 2);
11441 EMIT_NEW_LOCLOAD (cfg, ins, n);
11446 unsigned char *tmp_ip;
11447 CHECK_STACK_OVF (1);
11449 n = read16 (ip + 2);
11452 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
11458 EMIT_NEW_LOCLOADA (cfg, ins, n);
11467 n = read16 (ip + 2);
11469 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
11471 emit_stloc_ir (cfg, sp, header, n);
11478 if (sp != stack_start)
11480 if (cfg->method != method)
11482 * Inlining this into a loop in a parent could lead to
11483 * stack overflows which is different behavior than the
11484 * non-inlined case, thus disable inlining in this case.
11486 goto inline_failure;
11488 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
11489 ins->dreg = alloc_preg (cfg);
11490 ins->sreg1 = sp [0]->dreg;
11491 ins->type = STACK_PTR;
11492 MONO_ADD_INS (cfg->cbb, ins);
11494 cfg->flags |= MONO_CFG_HAS_ALLOCA;
11496 ins->flags |= MONO_INST_INIT;
11501 case CEE_ENDFILTER: {
11502 MonoExceptionClause *clause, *nearest;
11503 int cc, nearest_num;
11507 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
11509 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
11510 ins->sreg1 = (*sp)->dreg;
11511 MONO_ADD_INS (bblock, ins);
11512 start_new_bblock = 1;
11517 for (cc = 0; cc < header->num_clauses; ++cc) {
11518 clause = &header->clauses [cc];
11519 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
11520 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
11521 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
11526 g_assert (nearest);
11527 if ((ip - header->code) != nearest->handler_offset)
11532 case CEE_UNALIGNED_:
11533 ins_flag |= MONO_INST_UNALIGNED;
11534 /* FIXME: record alignment? we can assume 1 for now */
11538 case CEE_VOLATILE_:
11539 ins_flag |= MONO_INST_VOLATILE;
11543 ins_flag |= MONO_INST_TAILCALL;
11544 cfg->flags |= MONO_CFG_HAS_TAIL;
11545 /* Can't inline tail calls at this time */
11546 inline_costs += 100000;
11553 token = read32 (ip + 2);
11554 klass = mini_get_class (method, token, generic_context);
11555 CHECK_TYPELOAD (klass);
11556 if (generic_class_is_reference_type (cfg, klass))
11557 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
11559 mini_emit_initobj (cfg, *sp, NULL, klass);
11563 case CEE_CONSTRAINED_:
11565 token = read32 (ip + 2);
11566 constrained_call = mini_get_class (method, token, generic_context);
11567 CHECK_TYPELOAD (constrained_call);
11571 case CEE_INITBLK: {
11572 MonoInst *iargs [3];
11576 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
11577 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
11578 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
11579 /* emit_memset only works when val == 0 */
11580 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
11582 iargs [0] = sp [0];
11583 iargs [1] = sp [1];
11584 iargs [2] = sp [2];
11585 if (ip [1] == CEE_CPBLK) {
11586 MonoMethod *memcpy_method = get_memcpy_method ();
11587 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11589 MonoMethod *memset_method = get_memset_method ();
11590 mono_emit_method_call (cfg, memset_method, iargs, NULL);
11600 ins_flag |= MONO_INST_NOTYPECHECK;
11602 ins_flag |= MONO_INST_NORANGECHECK;
11603 /* we ignore the no-nullcheck for now since we
11604 * really do it explicitly only when doing callvirt->call
11608 case CEE_RETHROW: {
11610 int handler_offset = -1;
11612 for (i = 0; i < header->num_clauses; ++i) {
11613 MonoExceptionClause *clause = &header->clauses [i];
11614 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
11615 handler_offset = clause->handler_offset;
11620 bblock->flags |= BB_EXCEPTION_UNSAFE;
11622 g_assert (handler_offset != -1);
11624 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
11625 MONO_INST_NEW (cfg, ins, OP_RETHROW);
11626 ins->sreg1 = load->dreg;
11627 MONO_ADD_INS (bblock, ins);
11629 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11630 MONO_ADD_INS (bblock, ins);
11633 link_bblock (cfg, bblock, end_bblock);
11634 start_new_bblock = 1;
11642 GSHAREDVT_FAILURE (*ip);
11644 CHECK_STACK_OVF (1);
11646 token = read32 (ip + 2);
11647 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic && !generic_context) {
11648 MonoType *type = mono_type_create_from_typespec (image, token);
11649 val = mono_type_size (type, &ialign);
11651 MonoClass *klass = mono_class_get_full (image, token, generic_context);
11652 CHECK_TYPELOAD (klass);
11653 mono_class_init (klass);
11654 val = mono_type_size (&klass->byval_arg, &ialign);
11656 EMIT_NEW_ICONST (cfg, ins, val);
11661 case CEE_REFANYTYPE: {
11662 MonoInst *src_var, *src;
11664 GSHAREDVT_FAILURE (*ip);
11670 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11672 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11673 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11674 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
11679 case CEE_READONLY_:
11692 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
11702 g_warning ("opcode 0x%02x not handled", *ip);
11706 if (start_new_bblock != 1)
11709 bblock->cil_length = ip - bblock->cil_code;
11710 if (bblock->next_bb) {
11711 /* This could already be set because of inlining, #693905 */
11712 MonoBasicBlock *bb = bblock;
11714 while (bb->next_bb)
11716 bb->next_bb = end_bblock;
11718 bblock->next_bb = end_bblock;
11721 if (cfg->lmf_var) {
11722 cfg->cbb = init_localsbb;
11723 emit_push_lmf (cfg);
11726 if (cfg->method == method && cfg->domainvar) {
11728 MonoInst *get_domain;
11730 cfg->cbb = init_localsbb;
11732 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
11733 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
11736 get_domain->dreg = alloc_preg (cfg);
11737 MONO_ADD_INS (cfg->cbb, get_domain);
11739 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
11740 MONO_ADD_INS (cfg->cbb, store);
11743 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
11744 if (cfg->compile_aot)
11745 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
11746 mono_get_got_var (cfg);
11749 if (cfg->method == method && cfg->got_var)
11750 mono_emit_load_got_addr (cfg);
11755 cfg->cbb = init_localsbb;
11757 for (i = 0; i < header->num_locals; ++i) {
11758 MonoType *ptype = header->locals [i];
11759 int t = ptype->type;
11760 dreg = cfg->locals [i]->dreg;
11762 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
11763 t = mono_class_enum_basetype (ptype->data.klass)->type;
11764 if (ptype->byref) {
11765 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
11766 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
11767 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
11768 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
11769 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
11770 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
11771 MONO_INST_NEW (cfg, ins, OP_R8CONST);
11772 ins->type = STACK_R8;
11773 ins->inst_p0 = (void*)&r8_0;
11774 ins->dreg = alloc_dreg (cfg, STACK_R8);
11775 MONO_ADD_INS (init_localsbb, ins);
11776 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
11777 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
11778 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
11779 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
11780 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, ptype)) {
11781 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
11783 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
11788 if (cfg->init_ref_vars && cfg->method == method) {
11789 /* Emit initialization for ref vars */
11790 // FIXME: Avoid duplication initialization for IL locals.
11791 for (i = 0; i < cfg->num_varinfo; ++i) {
11792 MonoInst *ins = cfg->varinfo [i];
11794 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
11795 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
11800 MonoBasicBlock *bb;
11803 * Make seq points at backward branch targets interruptable.
11805 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
11806 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
11807 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
11810 /* Add a sequence point for method entry/exit events */
11812 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
11813 MONO_ADD_INS (init_localsbb, ins);
11814 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
11815 MONO_ADD_INS (cfg->bb_exit, ins);
11819 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
11820 * the code they refer to was dead (#11880).
11822 if (sym_seq_points) {
11823 for (i = 0; i < header->code_size; ++i) {
11824 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
11827 NEW_SEQ_POINT (cfg, ins, i, FALSE);
11828 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
11835 if (cfg->method == method) {
11836 MonoBasicBlock *bb;
11837 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
11838 bb->region = mono_find_block_region (cfg, bb->real_offset);
11840 mono_create_spvar_for_region (cfg, bb->region);
11841 if (cfg->verbose_level > 2)
11842 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
11846 g_slist_free (class_inits);
11847 dont_inline = g_list_remove (dont_inline, method);
11849 if (inline_costs < 0) {
11852 /* Method is too large */
11853 mname = mono_method_full_name (method, TRUE);
11854 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
11855 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
11857 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
11858 mono_basic_block_free (original_bb);
11862 if ((cfg->verbose_level > 2) && (cfg->method == method))
11863 mono_print_code (cfg, "AFTER METHOD-TO-IR");
11865 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
11866 mono_basic_block_free (original_bb);
11867 return inline_costs;
11870 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
11877 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
11881 set_exception_type_from_invalid_il (cfg, method, ip);
11885 g_slist_free (class_inits);
11886 mono_basic_block_free (original_bb);
11887 dont_inline = g_list_remove (dont_inline, method);
11888 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
11893 store_membase_reg_to_store_membase_imm (int opcode)
11896 case OP_STORE_MEMBASE_REG:
11897 return OP_STORE_MEMBASE_IMM;
11898 case OP_STOREI1_MEMBASE_REG:
11899 return OP_STOREI1_MEMBASE_IMM;
11900 case OP_STOREI2_MEMBASE_REG:
11901 return OP_STOREI2_MEMBASE_IMM;
11902 case OP_STOREI4_MEMBASE_REG:
11903 return OP_STOREI4_MEMBASE_IMM;
11904 case OP_STOREI8_MEMBASE_REG:
11905 return OP_STOREI8_MEMBASE_IMM;
11907 g_assert_not_reached ();
11914 mono_op_to_op_imm (int opcode)
11918 return OP_IADD_IMM;
11920 return OP_ISUB_IMM;
11922 return OP_IDIV_IMM;
11924 return OP_IDIV_UN_IMM;
11926 return OP_IREM_IMM;
11928 return OP_IREM_UN_IMM;
11930 return OP_IMUL_IMM;
11932 return OP_IAND_IMM;
11936 return OP_IXOR_IMM;
11938 return OP_ISHL_IMM;
11940 return OP_ISHR_IMM;
11942 return OP_ISHR_UN_IMM;
11945 return OP_LADD_IMM;
11947 return OP_LSUB_IMM;
11949 return OP_LAND_IMM;
11953 return OP_LXOR_IMM;
11955 return OP_LSHL_IMM;
11957 return OP_LSHR_IMM;
11959 return OP_LSHR_UN_IMM;
11962 return OP_COMPARE_IMM;
11964 return OP_ICOMPARE_IMM;
11966 return OP_LCOMPARE_IMM;
11968 case OP_STORE_MEMBASE_REG:
11969 return OP_STORE_MEMBASE_IMM;
11970 case OP_STOREI1_MEMBASE_REG:
11971 return OP_STOREI1_MEMBASE_IMM;
11972 case OP_STOREI2_MEMBASE_REG:
11973 return OP_STOREI2_MEMBASE_IMM;
11974 case OP_STOREI4_MEMBASE_REG:
11975 return OP_STOREI4_MEMBASE_IMM;
11977 #if defined(TARGET_X86) || defined (TARGET_AMD64)
11979 return OP_X86_PUSH_IMM;
11980 case OP_X86_COMPARE_MEMBASE_REG:
11981 return OP_X86_COMPARE_MEMBASE_IMM;
11983 #if defined(TARGET_AMD64)
11984 case OP_AMD64_ICOMPARE_MEMBASE_REG:
11985 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
11987 case OP_VOIDCALL_REG:
11988 return OP_VOIDCALL;
11996 return OP_LOCALLOC_IMM;
12003 ldind_to_load_membase (int opcode)
12007 return OP_LOADI1_MEMBASE;
12009 return OP_LOADU1_MEMBASE;
12011 return OP_LOADI2_MEMBASE;
12013 return OP_LOADU2_MEMBASE;
12015 return OP_LOADI4_MEMBASE;
12017 return OP_LOADU4_MEMBASE;
12019 return OP_LOAD_MEMBASE;
12020 case CEE_LDIND_REF:
12021 return OP_LOAD_MEMBASE;
12023 return OP_LOADI8_MEMBASE;
12025 return OP_LOADR4_MEMBASE;
12027 return OP_LOADR8_MEMBASE;
12029 g_assert_not_reached ();
12036 stind_to_store_membase (int opcode)
12040 return OP_STOREI1_MEMBASE_REG;
12042 return OP_STOREI2_MEMBASE_REG;
12044 return OP_STOREI4_MEMBASE_REG;
12046 case CEE_STIND_REF:
12047 return OP_STORE_MEMBASE_REG;
12049 return OP_STOREI8_MEMBASE_REG;
12051 return OP_STORER4_MEMBASE_REG;
12053 return OP_STORER8_MEMBASE_REG;
12055 g_assert_not_reached ();
12062 mono_load_membase_to_load_mem (int opcode)
12064 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
12065 #if defined(TARGET_X86) || defined(TARGET_AMD64)
12067 case OP_LOAD_MEMBASE:
12068 return OP_LOAD_MEM;
12069 case OP_LOADU1_MEMBASE:
12070 return OP_LOADU1_MEM;
12071 case OP_LOADU2_MEMBASE:
12072 return OP_LOADU2_MEM;
12073 case OP_LOADI4_MEMBASE:
12074 return OP_LOADI4_MEM;
12075 case OP_LOADU4_MEMBASE:
12076 return OP_LOADU4_MEM;
12077 #if SIZEOF_REGISTER == 8
12078 case OP_LOADI8_MEMBASE:
12079 return OP_LOADI8_MEM;
12088 op_to_op_dest_membase (int store_opcode, int opcode)
12090 #if defined(TARGET_X86)
12091 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
12096 return OP_X86_ADD_MEMBASE_REG;
12098 return OP_X86_SUB_MEMBASE_REG;
12100 return OP_X86_AND_MEMBASE_REG;
12102 return OP_X86_OR_MEMBASE_REG;
12104 return OP_X86_XOR_MEMBASE_REG;
12107 return OP_X86_ADD_MEMBASE_IMM;
12110 return OP_X86_SUB_MEMBASE_IMM;
12113 return OP_X86_AND_MEMBASE_IMM;
12116 return OP_X86_OR_MEMBASE_IMM;
12119 return OP_X86_XOR_MEMBASE_IMM;
12125 #if defined(TARGET_AMD64)
12126 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
12131 return OP_X86_ADD_MEMBASE_REG;
12133 return OP_X86_SUB_MEMBASE_REG;
12135 return OP_X86_AND_MEMBASE_REG;
12137 return OP_X86_OR_MEMBASE_REG;
12139 return OP_X86_XOR_MEMBASE_REG;
12141 return OP_X86_ADD_MEMBASE_IMM;
12143 return OP_X86_SUB_MEMBASE_IMM;
12145 return OP_X86_AND_MEMBASE_IMM;
12147 return OP_X86_OR_MEMBASE_IMM;
12149 return OP_X86_XOR_MEMBASE_IMM;
12151 return OP_AMD64_ADD_MEMBASE_REG;
12153 return OP_AMD64_SUB_MEMBASE_REG;
12155 return OP_AMD64_AND_MEMBASE_REG;
12157 return OP_AMD64_OR_MEMBASE_REG;
12159 return OP_AMD64_XOR_MEMBASE_REG;
12162 return OP_AMD64_ADD_MEMBASE_IMM;
12165 return OP_AMD64_SUB_MEMBASE_IMM;
12168 return OP_AMD64_AND_MEMBASE_IMM;
12171 return OP_AMD64_OR_MEMBASE_IMM;
12174 return OP_AMD64_XOR_MEMBASE_IMM;
12184 op_to_op_store_membase (int store_opcode, int opcode)
12186 #if defined(TARGET_X86) || defined(TARGET_AMD64)
12189 if (store_opcode == OP_STOREI1_MEMBASE_REG)
12190 return OP_X86_SETEQ_MEMBASE;
12192 if (store_opcode == OP_STOREI1_MEMBASE_REG)
12193 return OP_X86_SETNE_MEMBASE;
12201 op_to_op_src1_membase (int load_opcode, int opcode)
12204 /* FIXME: This has sign extension issues */
12206 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
12207 return OP_X86_COMPARE_MEMBASE8_IMM;
12210 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
12215 return OP_X86_PUSH_MEMBASE;
12216 case OP_COMPARE_IMM:
12217 case OP_ICOMPARE_IMM:
12218 return OP_X86_COMPARE_MEMBASE_IMM;
12221 return OP_X86_COMPARE_MEMBASE_REG;
12225 #ifdef TARGET_AMD64
12226 /* FIXME: This has sign extension issues */
12228 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
12229 return OP_X86_COMPARE_MEMBASE8_IMM;
12234 #ifdef __mono_ilp32__
12235 if (load_opcode == OP_LOADI8_MEMBASE)
12237 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12239 return OP_X86_PUSH_MEMBASE;
12241 /* FIXME: This only works for 32 bit immediates
12242 case OP_COMPARE_IMM:
12243 case OP_LCOMPARE_IMM:
12244 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12245 return OP_AMD64_COMPARE_MEMBASE_IMM;
12247 case OP_ICOMPARE_IMM:
12248 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
12249 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
12253 #ifdef __mono_ilp32__
12254 if (load_opcode == OP_LOAD_MEMBASE)
12255 return OP_AMD64_ICOMPARE_MEMBASE_REG;
12256 if (load_opcode == OP_LOADI8_MEMBASE)
12258 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12260 return OP_AMD64_COMPARE_MEMBASE_REG;
12263 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
12264 return OP_AMD64_ICOMPARE_MEMBASE_REG;
12273 op_to_op_src2_membase (int load_opcode, int opcode)
12276 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
12282 return OP_X86_COMPARE_REG_MEMBASE;
12284 return OP_X86_ADD_REG_MEMBASE;
12286 return OP_X86_SUB_REG_MEMBASE;
12288 return OP_X86_AND_REG_MEMBASE;
12290 return OP_X86_OR_REG_MEMBASE;
12292 return OP_X86_XOR_REG_MEMBASE;
12296 #ifdef TARGET_AMD64
12297 #ifdef __mono_ilp32__
12298 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
12300 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
12304 return OP_AMD64_ICOMPARE_REG_MEMBASE;
12306 return OP_X86_ADD_REG_MEMBASE;
12308 return OP_X86_SUB_REG_MEMBASE;
12310 return OP_X86_AND_REG_MEMBASE;
12312 return OP_X86_OR_REG_MEMBASE;
12314 return OP_X86_XOR_REG_MEMBASE;
12316 #ifdef __mono_ilp32__
12317 } else if (load_opcode == OP_LOADI8_MEMBASE) {
12319 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
12324 return OP_AMD64_COMPARE_REG_MEMBASE;
12326 return OP_AMD64_ADD_REG_MEMBASE;
12328 return OP_AMD64_SUB_REG_MEMBASE;
12330 return OP_AMD64_AND_REG_MEMBASE;
12332 return OP_AMD64_OR_REG_MEMBASE;
12334 return OP_AMD64_XOR_REG_MEMBASE;
12343 mono_op_to_op_imm_noemul (int opcode)
12346 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
12352 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
12359 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
12364 return mono_op_to_op_imm (opcode);
12369 * mono_handle_global_vregs:
12371 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
12375 mono_handle_global_vregs (MonoCompile *cfg)
12377 gint32 *vreg_to_bb;
12378 MonoBasicBlock *bb;
12381 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
12383 #ifdef MONO_ARCH_SIMD_INTRINSICS
12384 if (cfg->uses_simd_intrinsics)
12385 mono_simd_simplify_indirection (cfg);
12388 /* Find local vregs used in more than one bb */
12389 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12390 MonoInst *ins = bb->code;
12391 int block_num = bb->block_num;
12393 if (cfg->verbose_level > 2)
12394 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
12397 for (; ins; ins = ins->next) {
12398 const char *spec = INS_INFO (ins->opcode);
12399 int regtype = 0, regindex;
12402 if (G_UNLIKELY (cfg->verbose_level > 2))
12403 mono_print_ins (ins);
12405 g_assert (ins->opcode >= MONO_CEE_LAST);
12407 for (regindex = 0; regindex < 4; regindex ++) {
12410 if (regindex == 0) {
12411 regtype = spec [MONO_INST_DEST];
12412 if (regtype == ' ')
12415 } else if (regindex == 1) {
12416 regtype = spec [MONO_INST_SRC1];
12417 if (regtype == ' ')
12420 } else if (regindex == 2) {
12421 regtype = spec [MONO_INST_SRC2];
12422 if (regtype == ' ')
12425 } else if (regindex == 3) {
12426 regtype = spec [MONO_INST_SRC3];
12427 if (regtype == ' ')
12432 #if SIZEOF_REGISTER == 4
12433 /* In the LLVM case, the long opcodes are not decomposed */
12434 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
12436 * Since some instructions reference the original long vreg,
12437 * and some reference the two component vregs, it is quite hard
12438 * to determine when it needs to be global. So be conservative.
12440 if (!get_vreg_to_inst (cfg, vreg)) {
12441 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
12443 if (cfg->verbose_level > 2)
12444 printf ("LONG VREG R%d made global.\n", vreg);
12448 * Make the component vregs volatile since the optimizations can
12449 * get confused otherwise.
12451 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
12452 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
12456 g_assert (vreg != -1);
12458 prev_bb = vreg_to_bb [vreg];
12459 if (prev_bb == 0) {
12460 /* 0 is a valid block num */
12461 vreg_to_bb [vreg] = block_num + 1;
12462 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
12463 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
12466 if (!get_vreg_to_inst (cfg, vreg)) {
12467 if (G_UNLIKELY (cfg->verbose_level > 2))
12468 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
12472 if (vreg_is_ref (cfg, vreg))
12473 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
12475 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
12478 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
12481 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
12484 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
12487 g_assert_not_reached ();
12491 /* Flag as having been used in more than one bb */
12492 vreg_to_bb [vreg] = -1;
12498 /* If a variable is used in only one bblock, convert it into a local vreg */
12499 for (i = 0; i < cfg->num_varinfo; i++) {
12500 MonoInst *var = cfg->varinfo [i];
12501 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
12503 switch (var->type) {
12509 #if SIZEOF_REGISTER == 8
12512 #if !defined(TARGET_X86)
12513 /* Enabling this screws up the fp stack on x86 */
12516 if (mono_arch_is_soft_float ())
12519 /* Arguments are implicitly global */
12520 /* Putting R4 vars into registers doesn't work currently */
12521 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
12522 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var) {
12524 * Make that the variable's liveness interval doesn't contain a call, since
12525 * that would cause the lvreg to be spilled, making the whole optimization
12528 /* This is too slow for JIT compilation */
12530 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
12532 int def_index, call_index, ins_index;
12533 gboolean spilled = FALSE;
12538 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
12539 const char *spec = INS_INFO (ins->opcode);
12541 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
12542 def_index = ins_index;
12544 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
12545 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
12546 if (call_index > def_index) {
12552 if (MONO_IS_CALL (ins))
12553 call_index = ins_index;
12563 if (G_UNLIKELY (cfg->verbose_level > 2))
12564 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
12565 var->flags |= MONO_INST_IS_DEAD;
12566 cfg->vreg_to_inst [var->dreg] = NULL;
12573 * Compress the varinfo and vars tables so the liveness computation is faster and
12574 * takes up less space.
12577 for (i = 0; i < cfg->num_varinfo; ++i) {
12578 MonoInst *var = cfg->varinfo [i];
12579 if (pos < i && cfg->locals_start == i)
12580 cfg->locals_start = pos;
12581 if (!(var->flags & MONO_INST_IS_DEAD)) {
12583 cfg->varinfo [pos] = cfg->varinfo [i];
12584 cfg->varinfo [pos]->inst_c0 = pos;
12585 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
12586 cfg->vars [pos].idx = pos;
12587 #if SIZEOF_REGISTER == 4
12588 if (cfg->varinfo [pos]->type == STACK_I8) {
12589 /* Modify the two component vars too */
12592 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
12593 var1->inst_c0 = pos;
12594 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
12595 var1->inst_c0 = pos;
12602 cfg->num_varinfo = pos;
12603 if (cfg->locals_start > cfg->num_varinfo)
12604 cfg->locals_start = cfg->num_varinfo;
12608 * mono_spill_global_vars:
12610 * Generate spill code for variables which are not allocated to registers,
12611 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
12612 * code is generated which could be optimized by the local optimization passes.
12615 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
12617 MonoBasicBlock *bb;
12619 int orig_next_vreg;
12620 guint32 *vreg_to_lvreg;
12622 guint32 i, lvregs_len;
12623 gboolean dest_has_lvreg = FALSE;
12624 guint32 stacktypes [128];
12625 MonoInst **live_range_start, **live_range_end;
12626 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
12627 int *gsharedvt_vreg_to_idx = NULL;
12629 *need_local_opts = FALSE;
12631 memset (spec2, 0, sizeof (spec2));
12633 /* FIXME: Move this function to mini.c */
12634 stacktypes ['i'] = STACK_PTR;
12635 stacktypes ['l'] = STACK_I8;
12636 stacktypes ['f'] = STACK_R8;
12637 #ifdef MONO_ARCH_SIMD_INTRINSICS
12638 stacktypes ['x'] = STACK_VTYPE;
12641 #if SIZEOF_REGISTER == 4
12642 /* Create MonoInsts for longs */
12643 for (i = 0; i < cfg->num_varinfo; i++) {
12644 MonoInst *ins = cfg->varinfo [i];
12646 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
12647 switch (ins->type) {
12652 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
12655 g_assert (ins->opcode == OP_REGOFFSET);
12657 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
12659 tree->opcode = OP_REGOFFSET;
12660 tree->inst_basereg = ins->inst_basereg;
12661 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
12663 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
12665 tree->opcode = OP_REGOFFSET;
12666 tree->inst_basereg = ins->inst_basereg;
12667 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
12677 if (cfg->compute_gc_maps) {
12678 /* registers need liveness info even for !non refs */
12679 for (i = 0; i < cfg->num_varinfo; i++) {
12680 MonoInst *ins = cfg->varinfo [i];
12682 if (ins->opcode == OP_REGVAR)
12683 ins->flags |= MONO_INST_GC_TRACK;
12687 if (cfg->gsharedvt) {
12688 gsharedvt_vreg_to_idx = g_new0 (int, cfg->next_vreg);
12690 for (i = 0; i < cfg->num_varinfo; ++i) {
12691 MonoInst *ins = cfg->varinfo [i];
12694 if (mini_is_gsharedvt_variable_type (cfg, ins->inst_vtype)) {
12695 if (i >= cfg->locals_start) {
12697 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
12698 gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
12699 ins->opcode = OP_GSHAREDVT_LOCAL;
12700 ins->inst_imm = idx;
12703 gsharedvt_vreg_to_idx [ins->dreg] = -1;
12704 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
12710 /* FIXME: widening and truncation */
12713 * As an optimization, when a variable allocated to the stack is first loaded into
12714 * an lvreg, we will remember the lvreg and use it the next time instead of loading
12715 * the variable again.
12717 orig_next_vreg = cfg->next_vreg;
12718 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
12719 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
12723 * These arrays contain the first and last instructions accessing a given
12725 * Since we emit bblocks in the same order we process them here, and we
12726 * don't split live ranges, these will precisely describe the live range of
12727 * the variable, i.e. the instruction range where a valid value can be found
12728 * in the variables location.
12729 * The live range is computed using the liveness info computed by the liveness pass.
12730 * We can't use vmv->range, since that is an abstract live range, and we need
12731 * one which is instruction precise.
12732 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
12734 /* FIXME: Only do this if debugging info is requested */
12735 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
12736 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
12737 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
12738 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
12740 /* Add spill loads/stores */
12741 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12744 if (cfg->verbose_level > 2)
12745 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
12747 /* Clear vreg_to_lvreg array */
12748 for (i = 0; i < lvregs_len; i++)
12749 vreg_to_lvreg [lvregs [i]] = 0;
12753 MONO_BB_FOR_EACH_INS (bb, ins) {
12754 const char *spec = INS_INFO (ins->opcode);
12755 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
12756 gboolean store, no_lvreg;
12757 int sregs [MONO_MAX_SRC_REGS];
12759 if (G_UNLIKELY (cfg->verbose_level > 2))
12760 mono_print_ins (ins);
12762 if (ins->opcode == OP_NOP)
12766 * We handle LDADDR here as well, since it can only be decomposed
12767 * when variable addresses are known.
12769 if (ins->opcode == OP_LDADDR) {
12770 MonoInst *var = ins->inst_p0;
12772 if (var->opcode == OP_VTARG_ADDR) {
12773 /* Happens on SPARC/S390 where vtypes are passed by reference */
12774 MonoInst *vtaddr = var->inst_left;
12775 if (vtaddr->opcode == OP_REGVAR) {
12776 ins->opcode = OP_MOVE;
12777 ins->sreg1 = vtaddr->dreg;
12779 else if (var->inst_left->opcode == OP_REGOFFSET) {
12780 ins->opcode = OP_LOAD_MEMBASE;
12781 ins->inst_basereg = vtaddr->inst_basereg;
12782 ins->inst_offset = vtaddr->inst_offset;
12785 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg] < 0) {
12786 /* gsharedvt arg passed by ref */
12787 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
12789 ins->opcode = OP_LOAD_MEMBASE;
12790 ins->inst_basereg = var->inst_basereg;
12791 ins->inst_offset = var->inst_offset;
12792 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg]) {
12793 MonoInst *load, *load2, *load3;
12794 int idx = gsharedvt_vreg_to_idx [var->dreg] - 1;
12795 int reg1, reg2, reg3;
12796 MonoInst *info_var = cfg->gsharedvt_info_var;
12797 MonoInst *locals_var = cfg->gsharedvt_locals_var;
12801 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
12804 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
12806 g_assert (info_var);
12807 g_assert (locals_var);
12809 /* Mark the instruction used to compute the locals var as used */
12810 cfg->gsharedvt_locals_var_ins = NULL;
12812 /* Load the offset */
12813 if (info_var->opcode == OP_REGOFFSET) {
12814 reg1 = alloc_ireg (cfg);
12815 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
12816 } else if (info_var->opcode == OP_REGVAR) {
12818 reg1 = info_var->dreg;
12820 g_assert_not_reached ();
12822 reg2 = alloc_ireg (cfg);
12823 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
12824 /* Load the locals area address */
12825 reg3 = alloc_ireg (cfg);
12826 if (locals_var->opcode == OP_REGOFFSET) {
12827 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
12828 } else if (locals_var->opcode == OP_REGVAR) {
12829 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
12831 g_assert_not_reached ();
12833 /* Compute the address */
12834 ins->opcode = OP_PADD;
12838 mono_bblock_insert_before_ins (bb, ins, load3);
12839 mono_bblock_insert_before_ins (bb, load3, load2);
12841 mono_bblock_insert_before_ins (bb, load2, load);
12843 g_assert (var->opcode == OP_REGOFFSET);
12845 ins->opcode = OP_ADD_IMM;
12846 ins->sreg1 = var->inst_basereg;
12847 ins->inst_imm = var->inst_offset;
12850 *need_local_opts = TRUE;
12851 spec = INS_INFO (ins->opcode);
12854 if (ins->opcode < MONO_CEE_LAST) {
12855 mono_print_ins (ins);
12856 g_assert_not_reached ();
12860 * Store opcodes have destbasereg in the dreg, but in reality, it is an
12864 if (MONO_IS_STORE_MEMBASE (ins)) {
12865 tmp_reg = ins->dreg;
12866 ins->dreg = ins->sreg2;
12867 ins->sreg2 = tmp_reg;
12870 spec2 [MONO_INST_DEST] = ' ';
12871 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
12872 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
12873 spec2 [MONO_INST_SRC3] = ' ';
12875 } else if (MONO_IS_STORE_MEMINDEX (ins))
12876 g_assert_not_reached ();
12881 if (G_UNLIKELY (cfg->verbose_level > 2)) {
12882 printf ("\t %.3s %d", spec, ins->dreg);
12883 num_sregs = mono_inst_get_src_registers (ins, sregs);
12884 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
12885 printf (" %d", sregs [srcindex]);
12892 regtype = spec [MONO_INST_DEST];
12893 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
12896 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
12897 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
12898 MonoInst *store_ins;
12900 MonoInst *def_ins = ins;
12901 int dreg = ins->dreg; /* The original vreg */
12903 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
12905 if (var->opcode == OP_REGVAR) {
12906 ins->dreg = var->dreg;
12907 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
12909 * Instead of emitting a load+store, use a _membase opcode.
12911 g_assert (var->opcode == OP_REGOFFSET);
12912 if (ins->opcode == OP_MOVE) {
12916 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
12917 ins->inst_basereg = var->inst_basereg;
12918 ins->inst_offset = var->inst_offset;
12921 spec = INS_INFO (ins->opcode);
12925 g_assert (var->opcode == OP_REGOFFSET);
12927 prev_dreg = ins->dreg;
12929 /* Invalidate any previous lvreg for this vreg */
12930 vreg_to_lvreg [ins->dreg] = 0;
12934 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
12936 store_opcode = OP_STOREI8_MEMBASE_REG;
12939 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
12941 #if SIZEOF_REGISTER != 8
12942 if (regtype == 'l') {
12943 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
12944 mono_bblock_insert_after_ins (bb, ins, store_ins);
12945 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
12946 mono_bblock_insert_after_ins (bb, ins, store_ins);
12947 def_ins = store_ins;
12952 g_assert (store_opcode != OP_STOREV_MEMBASE);
12954 /* Try to fuse the store into the instruction itself */
12955 /* FIXME: Add more instructions */
12956 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
12957 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
12958 ins->inst_imm = ins->inst_c0;
12959 ins->inst_destbasereg = var->inst_basereg;
12960 ins->inst_offset = var->inst_offset;
12961 spec = INS_INFO (ins->opcode);
12962 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
12963 ins->opcode = store_opcode;
12964 ins->inst_destbasereg = var->inst_basereg;
12965 ins->inst_offset = var->inst_offset;
12969 tmp_reg = ins->dreg;
12970 ins->dreg = ins->sreg2;
12971 ins->sreg2 = tmp_reg;
12974 spec2 [MONO_INST_DEST] = ' ';
12975 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
12976 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
12977 spec2 [MONO_INST_SRC3] = ' ';
12979 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
12980 // FIXME: The backends expect the base reg to be in inst_basereg
12981 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
12983 ins->inst_basereg = var->inst_basereg;
12984 ins->inst_offset = var->inst_offset;
12985 spec = INS_INFO (ins->opcode);
12987 /* printf ("INS: "); mono_print_ins (ins); */
12988 /* Create a store instruction */
12989 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
12991 /* Insert it after the instruction */
12992 mono_bblock_insert_after_ins (bb, ins, store_ins);
12994 def_ins = store_ins;
12997 * We can't assign ins->dreg to var->dreg here, since the
12998 * sregs could use it. So set a flag, and do it after
13001 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
13002 dest_has_lvreg = TRUE;
13007 if (def_ins && !live_range_start [dreg]) {
13008 live_range_start [dreg] = def_ins;
13009 live_range_start_bb [dreg] = bb;
13012 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
13015 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
13016 tmp->inst_c1 = dreg;
13017 mono_bblock_insert_after_ins (bb, def_ins, tmp);
13024 num_sregs = mono_inst_get_src_registers (ins, sregs);
13025 for (srcindex = 0; srcindex < 3; ++srcindex) {
13026 regtype = spec [MONO_INST_SRC1 + srcindex];
13027 sreg = sregs [srcindex];
13029 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
13030 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
13031 MonoInst *var = get_vreg_to_inst (cfg, sreg);
13032 MonoInst *use_ins = ins;
13033 MonoInst *load_ins;
13034 guint32 load_opcode;
13036 if (var->opcode == OP_REGVAR) {
13037 sregs [srcindex] = var->dreg;
13038 //mono_inst_set_src_registers (ins, sregs);
13039 live_range_end [sreg] = use_ins;
13040 live_range_end_bb [sreg] = bb;
13042 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
13045 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
13046 /* var->dreg is a hreg */
13047 tmp->inst_c1 = sreg;
13048 mono_bblock_insert_after_ins (bb, ins, tmp);
13054 g_assert (var->opcode == OP_REGOFFSET);
13056 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
13058 g_assert (load_opcode != OP_LOADV_MEMBASE);
13060 if (vreg_to_lvreg [sreg]) {
13061 g_assert (vreg_to_lvreg [sreg] != -1);
13063 /* The variable is already loaded to an lvreg */
13064 if (G_UNLIKELY (cfg->verbose_level > 2))
13065 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
13066 sregs [srcindex] = vreg_to_lvreg [sreg];
13067 //mono_inst_set_src_registers (ins, sregs);
13071 /* Try to fuse the load into the instruction */
13072 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
13073 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
13074 sregs [0] = var->inst_basereg;
13075 //mono_inst_set_src_registers (ins, sregs);
13076 ins->inst_offset = var->inst_offset;
13077 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
13078 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
13079 sregs [1] = var->inst_basereg;
13080 //mono_inst_set_src_registers (ins, sregs);
13081 ins->inst_offset = var->inst_offset;
13083 if (MONO_IS_REAL_MOVE (ins)) {
13084 ins->opcode = OP_NOP;
13087 //printf ("%d ", srcindex); mono_print_ins (ins);
13089 sreg = alloc_dreg (cfg, stacktypes [regtype]);
13091 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
13092 if (var->dreg == prev_dreg) {
13094 * sreg refers to the value loaded by the load
13095 * emitted below, but we need to use ins->dreg
13096 * since it refers to the store emitted earlier.
13100 g_assert (sreg != -1);
13101 vreg_to_lvreg [var->dreg] = sreg;
13102 g_assert (lvregs_len < 1024);
13103 lvregs [lvregs_len ++] = var->dreg;
13107 sregs [srcindex] = sreg;
13108 //mono_inst_set_src_registers (ins, sregs);
13110 #if SIZEOF_REGISTER != 8
13111 if (regtype == 'l') {
13112 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
13113 mono_bblock_insert_before_ins (bb, ins, load_ins);
13114 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
13115 mono_bblock_insert_before_ins (bb, ins, load_ins);
13116 use_ins = load_ins;
13121 #if SIZEOF_REGISTER == 4
13122 g_assert (load_opcode != OP_LOADI8_MEMBASE);
13124 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
13125 mono_bblock_insert_before_ins (bb, ins, load_ins);
13126 use_ins = load_ins;
13130 if (var->dreg < orig_next_vreg) {
13131 live_range_end [var->dreg] = use_ins;
13132 live_range_end_bb [var->dreg] = bb;
13135 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
13138 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
13139 tmp->inst_c1 = var->dreg;
13140 mono_bblock_insert_after_ins (bb, ins, tmp);
13144 mono_inst_set_src_registers (ins, sregs);
13146 if (dest_has_lvreg) {
13147 g_assert (ins->dreg != -1);
13148 vreg_to_lvreg [prev_dreg] = ins->dreg;
13149 g_assert (lvregs_len < 1024);
13150 lvregs [lvregs_len ++] = prev_dreg;
13151 dest_has_lvreg = FALSE;
13155 tmp_reg = ins->dreg;
13156 ins->dreg = ins->sreg2;
13157 ins->sreg2 = tmp_reg;
13160 if (MONO_IS_CALL (ins)) {
13161 /* Clear vreg_to_lvreg array */
13162 for (i = 0; i < lvregs_len; i++)
13163 vreg_to_lvreg [lvregs [i]] = 0;
13165 } else if (ins->opcode == OP_NOP) {
13167 MONO_INST_NULLIFY_SREGS (ins);
13170 if (cfg->verbose_level > 2)
13171 mono_print_ins_index (1, ins);
13174 /* Extend the live range based on the liveness info */
13175 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
13176 for (i = 0; i < cfg->num_varinfo; i ++) {
13177 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
13179 if (vreg_is_volatile (cfg, vi->vreg))
13180 /* The liveness info is incomplete */
13183 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
13184 /* Live from at least the first ins of this bb */
13185 live_range_start [vi->vreg] = bb->code;
13186 live_range_start_bb [vi->vreg] = bb;
13189 if (mono_bitset_test_fast (bb->live_out_set, i)) {
13190 /* Live at least until the last ins of this bb */
13191 live_range_end [vi->vreg] = bb->last_ins;
13192 live_range_end_bb [vi->vreg] = bb;
13198 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
13200 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
13201 * by storing the current native offset into MonoMethodVar->live_range_start/end.
13203 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
13204 for (i = 0; i < cfg->num_varinfo; ++i) {
13205 int vreg = MONO_VARINFO (cfg, i)->vreg;
13208 if (live_range_start [vreg]) {
13209 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
13211 ins->inst_c1 = vreg;
13212 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
13214 if (live_range_end [vreg]) {
13215 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
13217 ins->inst_c1 = vreg;
13218 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
13219 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
13221 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
13227 if (cfg->gsharedvt_locals_var_ins) {
13228 /* Nullify if unused */
13229 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
13230 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
13233 g_free (live_range_start);
13234 g_free (live_range_end);
13235 g_free (live_range_start_bb);
13236 g_free (live_range_end_bb);
13241 * - use 'iadd' instead of 'int_add'
13242 * - handling ovf opcodes: decompose in method_to_ir.
13243 * - unify iregs/fregs
13244 * -> partly done, the missing parts are:
13245 * - a more complete unification would involve unifying the hregs as well, so
13246 * code wouldn't need if (fp) all over the place. but that would mean the hregs
13247 * would no longer map to the machine hregs, so the code generators would need to
13248 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
13249 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
13250 * fp/non-fp branches speeds it up by about 15%.
13251 * - use sext/zext opcodes instead of shifts
13253 * - get rid of TEMPLOADs if possible and use vregs instead
13254 * - clean up usage of OP_P/OP_ opcodes
13255 * - cleanup usage of DUMMY_USE
13256 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
13258 * - set the stack type and allocate a dreg in the EMIT_NEW macros
13259 * - get rid of all the <foo>2 stuff when the new JIT is ready.
13260 * - make sure handle_stack_args () is called before the branch is emitted
13261 * - when the new IR is done, get rid of all unused stuff
13262 * - COMPARE/BEQ as separate instructions or unify them ?
13263 * - keeping them separate allows specialized compare instructions like
13264 * compare_imm, compare_membase
13265 * - most back ends unify fp compare+branch, fp compare+ceq
13266 * - integrate mono_save_args into inline_method
13267 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
13268 * - handle long shift opts on 32 bit platforms somehow: they require
13269 * 3 sregs (2 for arg1 and 1 for arg2)
13270 * - make byref a 'normal' type.
13271 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
13272 * variable if needed.
13273 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
13274 * like inline_method.
13275 * - remove inlining restrictions
13276 * - fix LNEG and enable cfold of INEG
13277 * - generalize x86 optimizations like ldelema as a peephole optimization
13278 * - add store_mem_imm for amd64
13279 * - optimize the loading of the interruption flag in the managed->native wrappers
13280 * - avoid special handling of OP_NOP in passes
13281 * - move code inserting instructions into one function/macro.
13282 * - try a coalescing phase after liveness analysis
13283 * - add float -> vreg conversion + local optimizations on !x86
13284 * - figure out how to handle decomposed branches during optimizations, ie.
13285 * compare+branch, op_jump_table+op_br etc.
13286 * - promote RuntimeXHandles to vregs
13287 * - vtype cleanups:
13288 * - add a NEW_VARLOADA_VREG macro
13289 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
13290 * accessing vtype fields.
13291 * - get rid of I8CONST on 64 bit platforms
13292 * - dealing with the increase in code size due to branches created during opcode
13294 * - use extended basic blocks
13295 * - all parts of the JIT
13296 * - handle_global_vregs () && local regalloc
13297 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
13298 * - sources of increase in code size:
13301 * - isinst and castclass
13302 * - lvregs not allocated to global registers even if used multiple times
13303 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
13305 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
13306 * - add all micro optimizations from the old JIT
13307 * - put tree optimizations into the deadce pass
13308 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
13309 * specific function.
13310 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
13311 * fcompare + branchCC.
13312 * - create a helper function for allocating a stack slot, taking into account
13313 * MONO_CFG_HAS_SPILLUP.
13315 * - merge the ia64 switch changes.
13316 * - optimize mono_regstate2_alloc_int/float.
13317 * - fix the pessimistic handling of variables accessed in exception handler blocks.
13318 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
13319 * parts of the tree could be separated by other instructions, killing the tree
13320 * arguments, or stores killing loads etc. Also, should we fold loads into other
13321 * instructions if the result of the load is used multiple times ?
13322 * - make the REM_IMM optimization in mini-x86.c arch-independent.
13323 * - LAST MERGE: 108395.
13324 * - when returning vtypes in registers, generate IR and append it to the end of the
13325 * last bb instead of doing it in the epilog.
13326 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
13334 - When to decompose opcodes:
13335 - earlier: this makes some optimizations hard to implement, since the low level IR
13336 no longer contains the neccessary information. But it is easier to do.
13337 - later: harder to implement, enables more optimizations.
13338 - Branches inside bblocks:
13339 - created when decomposing complex opcodes.
13340 - branches to another bblock: harmless, but not tracked by the branch
13341 optimizations, so need to branch to a label at the start of the bblock.
13342 - branches to inside the same bblock: very problematic, trips up the local
13343 reg allocator. Can be fixed by spitting the current bblock, but that is a
13344 complex operation, since some local vregs can become global vregs etc.
13345 - Local/global vregs:
13346 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
13347 local register allocator.
13348 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
13349 structure, created by mono_create_var (). Assigned to hregs or the stack by
13350 the global register allocator.
13351 - When to do optimizations like alu->alu_imm:
13352 - earlier -> saves work later on since the IR will be smaller/simpler
13353 - later -> can work on more instructions
13354 - Handling of valuetypes:
13355 - When a vtype is pushed on the stack, a new temporary is created, an
13356 instruction computing its address (LDADDR) is emitted and pushed on
13357 the stack. Need to optimize cases when the vtype is used immediately as in
13358 argument passing, stloc etc.
13359 - Instead of the to_end stuff in the old JIT, simply call the function handling
13360 the values on the stack before emitting the last instruction of the bb.
13363 #endif /* DISABLE_JIT */