2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/assembly.h>
38 #include <mono/metadata/attrdefs.h>
39 #include <mono/metadata/loader.h>
40 #include <mono/metadata/tabledefs.h>
41 #include <mono/metadata/class.h>
42 #include <mono/metadata/object.h>
43 #include <mono/metadata/exception.h>
44 #include <mono/metadata/opcodes.h>
45 #include <mono/metadata/mono-endian.h>
46 #include <mono/metadata/tokentype.h>
47 #include <mono/metadata/tabledefs.h>
48 #include <mono/metadata/marshal.h>
49 #include <mono/metadata/debug-helpers.h>
50 #include <mono/metadata/mono-debug.h>
51 #include <mono/metadata/gc-internal.h>
52 #include <mono/metadata/security-manager.h>
53 #include <mono/metadata/threads-types.h>
54 #include <mono/metadata/security-core-clr.h>
55 #include <mono/metadata/monitor.h>
56 #include <mono/metadata/profiler-private.h>
57 #include <mono/metadata/profiler.h>
58 #include <mono/metadata/debug-mono-symfile.h>
59 #include <mono/utils/mono-compiler.h>
60 #include <mono/utils/mono-memory-model.h>
61 #include <mono/metadata/mono-basic-block.h>
68 #include "jit-icalls.h"
70 #include "debugger-agent.h"
72 #define BRANCH_COST 10
73 #define INLINE_LENGTH_LIMIT 20
74 #define INLINE_FAILURE(msg) do { \
75 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE)) { \
76 if (cfg->verbose_level >= 2) \
77 printf ("inline failed: %s\n", msg); \
78 goto inline_failure; \
81 #define CHECK_CFG_EXCEPTION do {\
82 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
85 #define METHOD_ACCESS_FAILURE do { \
86 char *method_fname = mono_method_full_name (method, TRUE); \
87 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
88 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS); \
89 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
90 g_free (method_fname); \
91 g_free (cil_method_fname); \
92 goto exception_exit; \
94 #define FIELD_ACCESS_FAILURE do { \
95 char *method_fname = mono_method_full_name (method, TRUE); \
96 char *field_fname = mono_field_full_name (field); \
97 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS); \
98 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
99 g_free (method_fname); \
100 g_free (field_fname); \
101 goto exception_exit; \
103 #define GENERIC_SHARING_FAILURE(opcode) do { \
104 if (cfg->generic_sharing_context) { \
105 if (cfg->verbose_level > 2) \
106 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
107 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
108 goto exception_exit; \
111 #define GSHAREDVT_FAILURE(opcode) do { \
112 if (cfg->gsharedvt) { \
113 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __FILE__, __LINE__); \
114 if (cfg->verbose_level >= 2) \
115 printf ("%s\n", cfg->exception_message); \
116 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
117 goto exception_exit; \
120 #define OUT_OF_MEMORY_FAILURE do { \
121 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
122 goto exception_exit; \
124 /* Determine whenever 'ins' represents a load of the 'this' argument */
125 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
127 static int ldind_to_load_membase (int opcode);
128 static int stind_to_store_membase (int opcode);
130 int mono_op_to_op_imm (int opcode);
131 int mono_op_to_op_imm_noemul (int opcode);
133 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
135 /* helper methods signatures */
136 static MonoMethodSignature *helper_sig_class_init_trampoline = NULL;
137 static MonoMethodSignature *helper_sig_domain_get = NULL;
138 static MonoMethodSignature *helper_sig_generic_class_init_trampoline = NULL;
139 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm = NULL;
140 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline = NULL;
141 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline = NULL;
142 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm = NULL;
145 * Instruction metadata
153 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
154 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
160 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
165 /* keep in sync with the enum in mini.h */
168 #include "mini-ops.h"
173 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
174 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
176 * This should contain the index of the last sreg + 1. This is not the same
177 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
179 const gint8 ins_sreg_counts[] = {
180 #include "mini-ops.h"
185 #define MONO_INIT_VARINFO(vi,id) do { \
186 (vi)->range.first_use.pos.bid = 0xffff; \
192 mono_inst_set_src_registers (MonoInst *ins, int *regs)
194 ins->sreg1 = regs [0];
195 ins->sreg2 = regs [1];
196 ins->sreg3 = regs [2];
200 mono_alloc_ireg (MonoCompile *cfg)
202 return alloc_ireg (cfg);
206 mono_alloc_freg (MonoCompile *cfg)
208 return alloc_freg (cfg);
212 mono_alloc_preg (MonoCompile *cfg)
214 return alloc_preg (cfg);
218 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
220 return alloc_dreg (cfg, stack_type);
224 * mono_alloc_ireg_ref:
226 * Allocate an IREG, and mark it as holding a GC ref.
229 mono_alloc_ireg_ref (MonoCompile *cfg)
231 return alloc_ireg_ref (cfg);
235 * mono_alloc_ireg_mp:
237 * Allocate an IREG, and mark it as holding a managed pointer.
240 mono_alloc_ireg_mp (MonoCompile *cfg)
242 return alloc_ireg_mp (cfg);
246 * mono_alloc_ireg_copy:
248 * Allocate an IREG with the same GC type as VREG.
251 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
253 if (vreg_is_ref (cfg, vreg))
254 return alloc_ireg_ref (cfg);
255 else if (vreg_is_mp (cfg, vreg))
256 return alloc_ireg_mp (cfg);
258 return alloc_ireg (cfg);
262 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
268 switch (type->type) {
271 case MONO_TYPE_BOOLEAN:
283 case MONO_TYPE_FNPTR:
285 case MONO_TYPE_CLASS:
286 case MONO_TYPE_STRING:
287 case MONO_TYPE_OBJECT:
288 case MONO_TYPE_SZARRAY:
289 case MONO_TYPE_ARRAY:
293 #if SIZEOF_REGISTER == 8
302 case MONO_TYPE_VALUETYPE:
303 if (type->data.klass->enumtype) {
304 type = mono_class_enum_basetype (type->data.klass);
307 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
310 case MONO_TYPE_TYPEDBYREF:
312 case MONO_TYPE_GENERICINST:
313 type = &type->data.generic_class->container_class->byval_arg;
317 g_assert (cfg->generic_sharing_context);
318 if (mini_type_var_is_vt (cfg, type))
323 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
329 mono_print_bb (MonoBasicBlock *bb, const char *msg)
334 printf ("\n%s %d: [IN: ", msg, bb->block_num);
335 for (i = 0; i < bb->in_count; ++i)
336 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
338 for (i = 0; i < bb->out_count; ++i)
339 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
341 for (tree = bb->code; tree; tree = tree->next)
342 mono_print_ins_index (-1, tree);
346 mono_create_helper_signatures (void)
348 helper_sig_domain_get = mono_create_icall_signature ("ptr");
349 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
350 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
351 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
352 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
353 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
354 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
358 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
359 * foo<T> (int i) { ldarg.0; box T; }
361 #define UNVERIFIED do { \
362 if (cfg->gsharedvt) { \
363 if (cfg->verbose_level > 2) \
364 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
365 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
366 goto exception_exit; \
368 if (mini_get_debug_options ()->break_on_unverified) \
374 #define LOAD_ERROR do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto load_error; } while (0)
376 #define TYPE_LOAD_ERROR(klass) do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else { cfg->exception_ptr = klass; goto load_error; } } while (0)
378 #define GET_BBLOCK(cfg,tblock,ip) do { \
379 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
381 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
382 NEW_BBLOCK (cfg, (tblock)); \
383 (tblock)->cil_code = (ip); \
384 ADD_BBLOCK (cfg, (tblock)); \
388 #if defined(TARGET_X86) || defined(TARGET_AMD64)
389 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
390 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
391 (dest)->dreg = alloc_ireg_mp ((cfg)); \
392 (dest)->sreg1 = (sr1); \
393 (dest)->sreg2 = (sr2); \
394 (dest)->inst_imm = (imm); \
395 (dest)->backend.shift_amount = (shift); \
396 MONO_ADD_INS ((cfg)->cbb, (dest)); \
400 #if SIZEOF_REGISTER == 8
401 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
402 /* FIXME: Need to add many more cases */ \
403 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
405 int dr = alloc_preg (cfg); \
406 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
407 (ins)->sreg2 = widen->dreg; \
411 #define ADD_WIDEN_OP(ins, arg1, arg2)
414 #define ADD_BINOP(op) do { \
415 MONO_INST_NEW (cfg, ins, (op)); \
417 ins->sreg1 = sp [0]->dreg; \
418 ins->sreg2 = sp [1]->dreg; \
419 type_from_op (ins, sp [0], sp [1]); \
421 /* Have to insert a widening op */ \
422 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
423 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
424 MONO_ADD_INS ((cfg)->cbb, (ins)); \
425 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
428 #define ADD_UNOP(op) do { \
429 MONO_INST_NEW (cfg, ins, (op)); \
431 ins->sreg1 = sp [0]->dreg; \
432 type_from_op (ins, sp [0], NULL); \
434 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
435 MONO_ADD_INS ((cfg)->cbb, (ins)); \
436 *sp++ = mono_decompose_opcode (cfg, ins); \
439 #define ADD_BINCOND(next_block) do { \
442 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
443 cmp->sreg1 = sp [0]->dreg; \
444 cmp->sreg2 = sp [1]->dreg; \
445 type_from_op (cmp, sp [0], sp [1]); \
447 type_from_op (ins, sp [0], sp [1]); \
448 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
449 GET_BBLOCK (cfg, tblock, target); \
450 link_bblock (cfg, bblock, tblock); \
451 ins->inst_true_bb = tblock; \
452 if ((next_block)) { \
453 link_bblock (cfg, bblock, (next_block)); \
454 ins->inst_false_bb = (next_block); \
455 start_new_bblock = 1; \
457 GET_BBLOCK (cfg, tblock, ip); \
458 link_bblock (cfg, bblock, tblock); \
459 ins->inst_false_bb = tblock; \
460 start_new_bblock = 2; \
462 if (sp != stack_start) { \
463 handle_stack_args (cfg, stack_start, sp - stack_start); \
464 CHECK_UNVERIFIABLE (cfg); \
466 MONO_ADD_INS (bblock, cmp); \
467 MONO_ADD_INS (bblock, ins); \
471 * link_bblock: Links two basic blocks
473 * links two basic blocks in the control flow graph, the 'from'
474 * argument is the starting block and the 'to' argument is the block
475 * the control flow ends to after 'from'.
478 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
480 MonoBasicBlock **newa;
484 if (from->cil_code) {
486 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
488 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
491 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
493 printf ("edge from entry to exit\n");
498 for (i = 0; i < from->out_count; ++i) {
499 if (to == from->out_bb [i]) {
505 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
506 for (i = 0; i < from->out_count; ++i) {
507 newa [i] = from->out_bb [i];
515 for (i = 0; i < to->in_count; ++i) {
516 if (from == to->in_bb [i]) {
522 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
523 for (i = 0; i < to->in_count; ++i) {
524 newa [i] = to->in_bb [i];
533 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
535 link_bblock (cfg, from, to);
539 * mono_find_block_region:
541 * We mark each basic block with a region ID. We use that to avoid BB
542 * optimizations when blocks are in different regions.
545 * A region token that encodes where this region is, and information
546 * about the clause owner for this block.
548 * The region encodes the try/catch/filter clause that owns this block
549 * as well as the type. -1 is a special value that represents a block
550 * that is in none of try/catch/filter.
553 mono_find_block_region (MonoCompile *cfg, int offset)
555 MonoMethodHeader *header = cfg->header;
556 MonoExceptionClause *clause;
559 for (i = 0; i < header->num_clauses; ++i) {
560 clause = &header->clauses [i];
561 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
562 (offset < (clause->handler_offset)))
563 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
565 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
566 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
567 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
568 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
569 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
571 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
574 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
575 return ((i + 1) << 8) | clause->flags;
582 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
584 MonoMethodHeader *header = cfg->header;
585 MonoExceptionClause *clause;
589 for (i = 0; i < header->num_clauses; ++i) {
590 clause = &header->clauses [i];
591 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
592 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
593 if (clause->flags == type)
594 res = g_list_append (res, clause);
601 mono_create_spvar_for_region (MonoCompile *cfg, int region)
605 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
609 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
610 /* prevent it from being register allocated */
611 var->flags |= MONO_INST_INDIRECT;
613 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
617 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
619 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
623 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
627 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
631 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
632 /* prevent it from being register allocated */
633 var->flags |= MONO_INST_INDIRECT;
635 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
641 * Returns the type used in the eval stack when @type is loaded.
642 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
645 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
649 inst->klass = klass = mono_class_from_mono_type (type);
651 inst->type = STACK_MP;
656 switch (type->type) {
658 inst->type = STACK_INV;
662 case MONO_TYPE_BOOLEAN:
668 inst->type = STACK_I4;
673 case MONO_TYPE_FNPTR:
674 inst->type = STACK_PTR;
676 case MONO_TYPE_CLASS:
677 case MONO_TYPE_STRING:
678 case MONO_TYPE_OBJECT:
679 case MONO_TYPE_SZARRAY:
680 case MONO_TYPE_ARRAY:
681 inst->type = STACK_OBJ;
685 inst->type = STACK_I8;
689 inst->type = STACK_R8;
691 case MONO_TYPE_VALUETYPE:
692 if (type->data.klass->enumtype) {
693 type = mono_class_enum_basetype (type->data.klass);
697 inst->type = STACK_VTYPE;
700 case MONO_TYPE_TYPEDBYREF:
701 inst->klass = mono_defaults.typed_reference_class;
702 inst->type = STACK_VTYPE;
704 case MONO_TYPE_GENERICINST:
705 type = &type->data.generic_class->container_class->byval_arg;
709 g_assert (cfg->generic_sharing_context);
710 if (mini_is_gsharedvt_type (cfg, type)) {
711 g_assert (cfg->gsharedvt);
712 inst->type = STACK_VTYPE;
714 inst->type = STACK_OBJ;
718 g_error ("unknown type 0x%02x in eval stack type", type->type);
723 * The following tables are used to quickly validate the IL code in type_from_op ().
726 bin_num_table [STACK_MAX] [STACK_MAX] = {
727 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
728 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
729 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
730 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
731 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
732 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
733 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
734 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
739 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
742 /* reduce the size of this table */
744 bin_int_table [STACK_MAX] [STACK_MAX] = {
745 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
746 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
747 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
748 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
749 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
750 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
751 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
752 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
756 bin_comp_table [STACK_MAX] [STACK_MAX] = {
757 /* Inv i L p F & O vt */
759 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
760 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
761 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
762 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
763 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
764 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
765 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
768 /* reduce the size of this table */
770 shift_table [STACK_MAX] [STACK_MAX] = {
771 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
772 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
773 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
774 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
775 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
776 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
777 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
778 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
782 * Tables to map from the non-specific opcode to the matching
783 * type-specific opcode.
785 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
787 binops_op_map [STACK_MAX] = {
788 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
791 /* handles from CEE_NEG to CEE_CONV_U8 */
793 unops_op_map [STACK_MAX] = {
794 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
797 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
799 ovfops_op_map [STACK_MAX] = {
800 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
803 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
805 ovf2ops_op_map [STACK_MAX] = {
806 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
809 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
811 ovf3ops_op_map [STACK_MAX] = {
812 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
815 /* handles from CEE_BEQ to CEE_BLT_UN */
817 beqops_op_map [STACK_MAX] = {
818 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
821 /* handles from CEE_CEQ to CEE_CLT_UN */
823 ceqops_op_map [STACK_MAX] = {
824 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
828 * Sets ins->type (the type on the eval stack) according to the
829 * type of the opcode and the arguments to it.
830 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
832 * FIXME: this function sets ins->type unconditionally in some cases, but
833 * it should set it to invalid for some types (a conv.x on an object)
836 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
838 switch (ins->opcode) {
845 /* FIXME: check unverifiable args for STACK_MP */
846 ins->type = bin_num_table [src1->type] [src2->type];
847 ins->opcode += binops_op_map [ins->type];
854 ins->type = bin_int_table [src1->type] [src2->type];
855 ins->opcode += binops_op_map [ins->type];
860 ins->type = shift_table [src1->type] [src2->type];
861 ins->opcode += binops_op_map [ins->type];
866 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
867 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
868 ins->opcode = OP_LCOMPARE;
869 else if (src1->type == STACK_R8)
870 ins->opcode = OP_FCOMPARE;
872 ins->opcode = OP_ICOMPARE;
874 case OP_ICOMPARE_IMM:
875 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
876 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
877 ins->opcode = OP_LCOMPARE_IMM;
889 ins->opcode += beqops_op_map [src1->type];
892 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
893 ins->opcode += ceqops_op_map [src1->type];
899 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
900 ins->opcode += ceqops_op_map [src1->type];
904 ins->type = neg_table [src1->type];
905 ins->opcode += unops_op_map [ins->type];
908 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
909 ins->type = src1->type;
911 ins->type = STACK_INV;
912 ins->opcode += unops_op_map [ins->type];
918 ins->type = STACK_I4;
919 ins->opcode += unops_op_map [src1->type];
922 ins->type = STACK_R8;
923 switch (src1->type) {
926 ins->opcode = OP_ICONV_TO_R_UN;
929 ins->opcode = OP_LCONV_TO_R_UN;
933 case CEE_CONV_OVF_I1:
934 case CEE_CONV_OVF_U1:
935 case CEE_CONV_OVF_I2:
936 case CEE_CONV_OVF_U2:
937 case CEE_CONV_OVF_I4:
938 case CEE_CONV_OVF_U4:
939 ins->type = STACK_I4;
940 ins->opcode += ovf3ops_op_map [src1->type];
942 case CEE_CONV_OVF_I_UN:
943 case CEE_CONV_OVF_U_UN:
944 ins->type = STACK_PTR;
945 ins->opcode += ovf2ops_op_map [src1->type];
947 case CEE_CONV_OVF_I1_UN:
948 case CEE_CONV_OVF_I2_UN:
949 case CEE_CONV_OVF_I4_UN:
950 case CEE_CONV_OVF_U1_UN:
951 case CEE_CONV_OVF_U2_UN:
952 case CEE_CONV_OVF_U4_UN:
953 ins->type = STACK_I4;
954 ins->opcode += ovf2ops_op_map [src1->type];
957 ins->type = STACK_PTR;
958 switch (src1->type) {
960 ins->opcode = OP_ICONV_TO_U;
964 #if SIZEOF_VOID_P == 8
965 ins->opcode = OP_LCONV_TO_U;
967 ins->opcode = OP_MOVE;
971 ins->opcode = OP_LCONV_TO_U;
974 ins->opcode = OP_FCONV_TO_U;
980 ins->type = STACK_I8;
981 ins->opcode += unops_op_map [src1->type];
983 case CEE_CONV_OVF_I8:
984 case CEE_CONV_OVF_U8:
985 ins->type = STACK_I8;
986 ins->opcode += ovf3ops_op_map [src1->type];
988 case CEE_CONV_OVF_U8_UN:
989 case CEE_CONV_OVF_I8_UN:
990 ins->type = STACK_I8;
991 ins->opcode += ovf2ops_op_map [src1->type];
995 ins->type = STACK_R8;
996 ins->opcode += unops_op_map [src1->type];
999 ins->type = STACK_R8;
1003 ins->type = STACK_I4;
1004 ins->opcode += ovfops_op_map [src1->type];
1007 case CEE_CONV_OVF_I:
1008 case CEE_CONV_OVF_U:
1009 ins->type = STACK_PTR;
1010 ins->opcode += ovfops_op_map [src1->type];
1013 case CEE_ADD_OVF_UN:
1015 case CEE_MUL_OVF_UN:
1017 case CEE_SUB_OVF_UN:
1018 ins->type = bin_num_table [src1->type] [src2->type];
1019 ins->opcode += ovfops_op_map [src1->type];
1020 if (ins->type == STACK_R8)
1021 ins->type = STACK_INV;
1023 case OP_LOAD_MEMBASE:
1024 ins->type = STACK_PTR;
1026 case OP_LOADI1_MEMBASE:
1027 case OP_LOADU1_MEMBASE:
1028 case OP_LOADI2_MEMBASE:
1029 case OP_LOADU2_MEMBASE:
1030 case OP_LOADI4_MEMBASE:
1031 case OP_LOADU4_MEMBASE:
1032 ins->type = STACK_PTR;
1034 case OP_LOADI8_MEMBASE:
1035 ins->type = STACK_I8;
1037 case OP_LOADR4_MEMBASE:
1038 case OP_LOADR8_MEMBASE:
1039 ins->type = STACK_R8;
1042 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1046 if (ins->type == STACK_MP)
1047 ins->klass = mono_defaults.object_class;
1052 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1058 param_table [STACK_MAX] [STACK_MAX] = {
1063 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1067 switch (args->type) {
1077 for (i = 0; i < sig->param_count; ++i) {
1078 switch (args [i].type) {
1082 if (!sig->params [i]->byref)
1086 if (sig->params [i]->byref)
1088 switch (sig->params [i]->type) {
1089 case MONO_TYPE_CLASS:
1090 case MONO_TYPE_STRING:
1091 case MONO_TYPE_OBJECT:
1092 case MONO_TYPE_SZARRAY:
1093 case MONO_TYPE_ARRAY:
1100 if (sig->params [i]->byref)
1102 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1111 /*if (!param_table [args [i].type] [sig->params [i]->type])
1119 * When we need a pointer to the current domain many times in a method, we
1120 * call mono_domain_get() once and we store the result in a local variable.
1121 * This function returns the variable that represents the MonoDomain*.
1123 inline static MonoInst *
1124 mono_get_domainvar (MonoCompile *cfg)
1126 if (!cfg->domainvar)
1127 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1128 return cfg->domainvar;
1132 * The got_var contains the address of the Global Offset Table when AOT
1136 mono_get_got_var (MonoCompile *cfg)
1138 #ifdef MONO_ARCH_NEED_GOT_VAR
1139 if (!cfg->compile_aot)
1141 if (!cfg->got_var) {
1142 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1144 return cfg->got_var;
1151 mono_get_vtable_var (MonoCompile *cfg)
1153 g_assert (cfg->generic_sharing_context);
1155 if (!cfg->rgctx_var) {
1156 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1157 /* force the var to be stack allocated */
1158 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1161 return cfg->rgctx_var;
1165 type_from_stack_type (MonoInst *ins) {
1166 switch (ins->type) {
1167 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1168 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1169 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1170 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1172 return &ins->klass->this_arg;
1173 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1174 case STACK_VTYPE: return &ins->klass->byval_arg;
1176 g_error ("stack type %d to monotype not handled\n", ins->type);
1181 static G_GNUC_UNUSED int
1182 type_to_stack_type (MonoType *t)
1184 t = mono_type_get_underlying_type (t);
1188 case MONO_TYPE_BOOLEAN:
1191 case MONO_TYPE_CHAR:
1198 case MONO_TYPE_FNPTR:
1200 case MONO_TYPE_CLASS:
1201 case MONO_TYPE_STRING:
1202 case MONO_TYPE_OBJECT:
1203 case MONO_TYPE_SZARRAY:
1204 case MONO_TYPE_ARRAY:
1212 case MONO_TYPE_VALUETYPE:
1213 case MONO_TYPE_TYPEDBYREF:
1215 case MONO_TYPE_GENERICINST:
1216 if (mono_type_generic_inst_is_valuetype (t))
1222 g_assert_not_reached ();
1229 array_access_to_klass (int opcode)
1233 return mono_defaults.byte_class;
1235 return mono_defaults.uint16_class;
1238 return mono_defaults.int_class;
1241 return mono_defaults.sbyte_class;
1244 return mono_defaults.int16_class;
1247 return mono_defaults.int32_class;
1249 return mono_defaults.uint32_class;
1252 return mono_defaults.int64_class;
1255 return mono_defaults.single_class;
1258 return mono_defaults.double_class;
1259 case CEE_LDELEM_REF:
1260 case CEE_STELEM_REF:
1261 return mono_defaults.object_class;
1263 g_assert_not_reached ();
1269 * We try to share variables when possible
1272 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1277 /* inlining can result in deeper stacks */
1278 if (slot >= cfg->header->max_stack)
1279 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1281 pos = ins->type - 1 + slot * STACK_MAX;
1283 switch (ins->type) {
1290 if ((vnum = cfg->intvars [pos]))
1291 return cfg->varinfo [vnum];
1292 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1293 cfg->intvars [pos] = res->inst_c0;
1296 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1302 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1305 * Don't use this if a generic_context is set, since that means AOT can't
1306 * look up the method using just the image+token.
1307 * table == 0 means this is a reference made from a wrapper.
1309 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1310 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1311 jump_info_token->image = image;
1312 jump_info_token->token = token;
1313 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1318 * This function is called to handle items that are left on the evaluation stack
1319 * at basic block boundaries. What happens is that we save the values to local variables
1320 * and we reload them later when first entering the target basic block (with the
1321 * handle_loaded_temps () function).
1322 * A single joint point will use the same variables (stored in the array bb->out_stack or
1323 * bb->in_stack, if the basic block is before or after the joint point).
1325 * This function needs to be called _before_ emitting the last instruction of
1326 * the bb (i.e. before emitting a branch).
1327 * If the stack merge fails at a join point, cfg->unverifiable is set.
1330 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1333 MonoBasicBlock *bb = cfg->cbb;
1334 MonoBasicBlock *outb;
1335 MonoInst *inst, **locals;
1340 if (cfg->verbose_level > 3)
1341 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1342 if (!bb->out_scount) {
1343 bb->out_scount = count;
1344 //printf ("bblock %d has out:", bb->block_num);
1346 for (i = 0; i < bb->out_count; ++i) {
1347 outb = bb->out_bb [i];
1348 /* exception handlers are linked, but they should not be considered for stack args */
1349 if (outb->flags & BB_EXCEPTION_HANDLER)
1351 //printf (" %d", outb->block_num);
1352 if (outb->in_stack) {
1354 bb->out_stack = outb->in_stack;
1360 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1361 for (i = 0; i < count; ++i) {
1363 * try to reuse temps already allocated for this purpouse, if they occupy the same
1364 * stack slot and if they are of the same type.
1365 * This won't cause conflicts since if 'local' is used to
1366 * store one of the values in the in_stack of a bblock, then
1367 * the same variable will be used for the same outgoing stack
1369 * This doesn't work when inlining methods, since the bblocks
1370 * in the inlined methods do not inherit their in_stack from
1371 * the bblock they are inlined to. See bug #58863 for an
1374 if (cfg->inlined_method)
1375 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1377 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1382 for (i = 0; i < bb->out_count; ++i) {
1383 outb = bb->out_bb [i];
1384 /* exception handlers are linked, but they should not be considered for stack args */
1385 if (outb->flags & BB_EXCEPTION_HANDLER)
1387 if (outb->in_scount) {
1388 if (outb->in_scount != bb->out_scount) {
1389 cfg->unverifiable = TRUE;
1392 continue; /* check they are the same locals */
1394 outb->in_scount = count;
1395 outb->in_stack = bb->out_stack;
1398 locals = bb->out_stack;
1400 for (i = 0; i < count; ++i) {
1401 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1402 inst->cil_code = sp [i]->cil_code;
1403 sp [i] = locals [i];
1404 if (cfg->verbose_level > 3)
1405 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1409 * It is possible that the out bblocks already have in_stack assigned, and
1410 * the in_stacks differ. In this case, we will store to all the different
1417 /* Find a bblock which has a different in_stack */
1419 while (bindex < bb->out_count) {
1420 outb = bb->out_bb [bindex];
1421 /* exception handlers are linked, but they should not be considered for stack args */
1422 if (outb->flags & BB_EXCEPTION_HANDLER) {
1426 if (outb->in_stack != locals) {
1427 for (i = 0; i < count; ++i) {
1428 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1429 inst->cil_code = sp [i]->cil_code;
1430 sp [i] = locals [i];
1431 if (cfg->verbose_level > 3)
1432 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1434 locals = outb->in_stack;
1443 /* Emit code which loads interface_offsets [klass->interface_id]
1444 * The array is stored in memory before vtable.
1447 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1449 if (cfg->compile_aot) {
1450 int ioffset_reg = alloc_preg (cfg);
1451 int iid_reg = alloc_preg (cfg);
1453 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1454 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1455 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1458 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1463 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1465 int ibitmap_reg = alloc_preg (cfg);
1466 #ifdef COMPRESSED_INTERFACE_BITMAP
1468 MonoInst *res, *ins;
1469 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1470 MONO_ADD_INS (cfg->cbb, ins);
1472 if (cfg->compile_aot)
1473 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1475 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1476 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1477 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1479 int ibitmap_byte_reg = alloc_preg (cfg);
1481 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1483 if (cfg->compile_aot) {
1484 int iid_reg = alloc_preg (cfg);
1485 int shifted_iid_reg = alloc_preg (cfg);
1486 int ibitmap_byte_address_reg = alloc_preg (cfg);
1487 int masked_iid_reg = alloc_preg (cfg);
1488 int iid_one_bit_reg = alloc_preg (cfg);
1489 int iid_bit_reg = alloc_preg (cfg);
1490 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1491 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1492 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1493 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1494 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1495 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1496 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1497 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1499 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1500 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1506 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1507 * stored in "klass_reg" implements the interface "klass".
1510 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1512 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1516 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1517 * stored in "vtable_reg" implements the interface "klass".
1520 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1522 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1526 * Emit code which checks whenever the interface id of @klass is smaller than
1527 * than the value given by max_iid_reg.
1530 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1531 MonoBasicBlock *false_target)
1533 if (cfg->compile_aot) {
1534 int iid_reg = alloc_preg (cfg);
1535 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1536 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1539 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1541 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1543 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1546 /* Same as above, but obtains max_iid from a vtable */
1548 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1549 MonoBasicBlock *false_target)
1551 int max_iid_reg = alloc_preg (cfg);
1553 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1554 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1557 /* Same as above, but obtains max_iid from a klass */
1559 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1560 MonoBasicBlock *false_target)
1562 int max_iid_reg = alloc_preg (cfg);
1564 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1565 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1569 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1571 int idepth_reg = alloc_preg (cfg);
1572 int stypes_reg = alloc_preg (cfg);
1573 int stype = alloc_preg (cfg);
1575 mono_class_setup_supertypes (klass);
1577 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1578 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1579 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1580 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1582 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1583 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1585 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1586 } else if (cfg->compile_aot) {
1587 int const_reg = alloc_preg (cfg);
1588 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1589 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1591 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1593 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1597 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1599 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1603 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1605 int intf_reg = alloc_preg (cfg);
1607 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1608 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1609 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1611 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1613 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1617 * Variant of the above that takes a register to the class, not the vtable.
1620 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1622 int intf_bit_reg = alloc_preg (cfg);
1624 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1625 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1626 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1628 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1630 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1634 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1637 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1638 } else if (cfg->compile_aot) {
1639 int const_reg = alloc_preg (cfg);
1640 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1641 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1643 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1645 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1649 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1651 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1655 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1657 if (cfg->compile_aot) {
1658 int const_reg = alloc_preg (cfg);
1659 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1660 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1662 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1664 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1668 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1671 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1674 int rank_reg = alloc_preg (cfg);
1675 int eclass_reg = alloc_preg (cfg);
1677 g_assert (!klass_inst);
1678 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1679 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1680 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1681 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1682 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1683 if (klass->cast_class == mono_defaults.object_class) {
1684 int parent_reg = alloc_preg (cfg);
1685 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1686 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1687 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1688 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1689 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1690 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1691 } else if (klass->cast_class == mono_defaults.enum_class) {
1692 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1693 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1694 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1696 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1697 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1700 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1701 /* Check that the object is a vector too */
1702 int bounds_reg = alloc_preg (cfg);
1703 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1704 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1705 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1708 int idepth_reg = alloc_preg (cfg);
1709 int stypes_reg = alloc_preg (cfg);
1710 int stype = alloc_preg (cfg);
1712 mono_class_setup_supertypes (klass);
1714 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1715 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1716 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1717 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1719 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1720 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1721 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1726 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1728 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1732 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1736 g_assert (val == 0);
1741 if ((size <= 4) && (size <= align)) {
1744 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1747 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1750 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1752 #if SIZEOF_REGISTER == 8
1754 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1760 val_reg = alloc_preg (cfg);
1762 if (SIZEOF_REGISTER == 8)
1763 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1765 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1768 /* This could be optimized further if neccesary */
1770 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1777 #if !NO_UNALIGNED_ACCESS
1778 if (SIZEOF_REGISTER == 8) {
1780 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1785 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1793 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1798 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1803 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1810 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1817 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1818 g_assert (size < 10000);
1821 /* This could be optimized further if neccesary */
1823 cur_reg = alloc_preg (cfg);
1824 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1825 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1832 #if !NO_UNALIGNED_ACCESS
1833 if (SIZEOF_REGISTER == 8) {
1835 cur_reg = alloc_preg (cfg);
1836 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1837 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1846 cur_reg = alloc_preg (cfg);
1847 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1848 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1854 cur_reg = alloc_preg (cfg);
1855 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1856 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1862 cur_reg = alloc_preg (cfg);
1863 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1864 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1872 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1875 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1878 type = mini_get_basic_type_from_generic (gsctx, type);
1879 switch (type->type) {
1880 case MONO_TYPE_VOID:
1881 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1884 case MONO_TYPE_BOOLEAN:
1887 case MONO_TYPE_CHAR:
1890 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1894 case MONO_TYPE_FNPTR:
1895 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1896 case MONO_TYPE_CLASS:
1897 case MONO_TYPE_STRING:
1898 case MONO_TYPE_OBJECT:
1899 case MONO_TYPE_SZARRAY:
1900 case MONO_TYPE_ARRAY:
1901 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1904 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1907 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1908 case MONO_TYPE_VALUETYPE:
1909 if (type->data.klass->enumtype) {
1910 type = mono_class_enum_basetype (type->data.klass);
1913 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1914 case MONO_TYPE_TYPEDBYREF:
1915 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1916 case MONO_TYPE_GENERICINST:
1917 type = &type->data.generic_class->container_class->byval_arg;
1920 case MONO_TYPE_MVAR:
1922 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1924 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1930 * target_type_is_incompatible:
1931 * @cfg: MonoCompile context
1933 * Check that the item @arg on the evaluation stack can be stored
1934 * in the target type (can be a local, or field, etc).
1935 * The cfg arg can be used to check if we need verification or just
1938 * Returns: non-0 value if arg can't be stored on a target.
1941 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1943 MonoType *simple_type;
1946 if (target->byref) {
1947 /* FIXME: check that the pointed to types match */
1948 if (arg->type == STACK_MP)
1949 return arg->klass != mono_class_from_mono_type (target);
1950 if (arg->type == STACK_PTR)
1955 simple_type = mono_type_get_underlying_type (target);
1956 switch (simple_type->type) {
1957 case MONO_TYPE_VOID:
1961 case MONO_TYPE_BOOLEAN:
1964 case MONO_TYPE_CHAR:
1967 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1971 /* STACK_MP is needed when setting pinned locals */
1972 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1977 case MONO_TYPE_FNPTR:
1979 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
1980 * in native int. (#688008).
1982 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1985 case MONO_TYPE_CLASS:
1986 case MONO_TYPE_STRING:
1987 case MONO_TYPE_OBJECT:
1988 case MONO_TYPE_SZARRAY:
1989 case MONO_TYPE_ARRAY:
1990 if (arg->type != STACK_OBJ)
1992 /* FIXME: check type compatibility */
1996 if (arg->type != STACK_I8)
2001 if (arg->type != STACK_R8)
2004 case MONO_TYPE_VALUETYPE:
2005 if (arg->type != STACK_VTYPE)
2007 klass = mono_class_from_mono_type (simple_type);
2008 if (klass != arg->klass)
2011 case MONO_TYPE_TYPEDBYREF:
2012 if (arg->type != STACK_VTYPE)
2014 klass = mono_class_from_mono_type (simple_type);
2015 if (klass != arg->klass)
2018 case MONO_TYPE_GENERICINST:
2019 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2020 if (arg->type != STACK_VTYPE)
2022 klass = mono_class_from_mono_type (simple_type);
2023 if (klass != arg->klass)
2027 if (arg->type != STACK_OBJ)
2029 /* FIXME: check type compatibility */
2033 case MONO_TYPE_MVAR:
2034 g_assert (cfg->generic_sharing_context);
2035 if (mini_type_var_is_vt (cfg, simple_type)) {
2036 if (arg->type != STACK_VTYPE)
2039 if (arg->type != STACK_OBJ)
2044 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2050 * Prepare arguments for passing to a function call.
2051 * Return a non-zero value if the arguments can't be passed to the given
2053 * The type checks are not yet complete and some conversions may need
2054 * casts on 32 or 64 bit architectures.
2056 * FIXME: implement this using target_type_is_incompatible ()
2059 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2061 MonoType *simple_type;
2065 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2069 for (i = 0; i < sig->param_count; ++i) {
2070 if (sig->params [i]->byref) {
2071 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2075 simple_type = sig->params [i];
2076 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
2078 switch (simple_type->type) {
2079 case MONO_TYPE_VOID:
2084 case MONO_TYPE_BOOLEAN:
2087 case MONO_TYPE_CHAR:
2090 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2096 case MONO_TYPE_FNPTR:
2097 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2100 case MONO_TYPE_CLASS:
2101 case MONO_TYPE_STRING:
2102 case MONO_TYPE_OBJECT:
2103 case MONO_TYPE_SZARRAY:
2104 case MONO_TYPE_ARRAY:
2105 if (args [i]->type != STACK_OBJ)
2110 if (args [i]->type != STACK_I8)
2115 if (args [i]->type != STACK_R8)
2118 case MONO_TYPE_VALUETYPE:
2119 if (simple_type->data.klass->enumtype) {
2120 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2123 if (args [i]->type != STACK_VTYPE)
2126 case MONO_TYPE_TYPEDBYREF:
2127 if (args [i]->type != STACK_VTYPE)
2130 case MONO_TYPE_GENERICINST:
2131 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2134 case MONO_TYPE_MVAR:
2136 if (args [i]->type != STACK_VTYPE)
2140 g_error ("unknown type 0x%02x in check_call_signature",
2148 callvirt_to_call (int opcode)
2153 case OP_VOIDCALLVIRT:
2162 g_assert_not_reached ();
2169 callvirt_to_call_membase (int opcode)
2173 return OP_CALL_MEMBASE;
2174 case OP_VOIDCALLVIRT:
2175 return OP_VOIDCALL_MEMBASE;
2177 return OP_FCALL_MEMBASE;
2179 return OP_LCALL_MEMBASE;
2181 return OP_VCALL_MEMBASE;
2183 g_assert_not_reached ();
2189 #ifdef MONO_ARCH_HAVE_IMT
2190 /* Either METHOD or IMT_ARG needs to be set */
2192 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2196 if (COMPILE_LLVM (cfg)) {
2197 method_reg = alloc_preg (cfg);
2200 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2201 } else if (cfg->compile_aot) {
2202 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2205 MONO_INST_NEW (cfg, ins, OP_PCONST);
2206 ins->inst_p0 = method;
2207 ins->dreg = method_reg;
2208 MONO_ADD_INS (cfg->cbb, ins);
2212 call->imt_arg_reg = method_reg;
2214 #ifdef MONO_ARCH_IMT_REG
2215 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2217 /* Need this to keep the IMT arg alive */
2218 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2223 #ifdef MONO_ARCH_IMT_REG
2224 method_reg = alloc_preg (cfg);
2227 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2228 } else if (cfg->compile_aot) {
2229 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2232 MONO_INST_NEW (cfg, ins, OP_PCONST);
2233 ins->inst_p0 = method;
2234 ins->dreg = method_reg;
2235 MONO_ADD_INS (cfg->cbb, ins);
2238 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2240 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2245 static MonoJumpInfo *
2246 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2248 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2252 ji->data.target = target;
2258 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2260 if (cfg->generic_sharing_context)
2261 return mono_class_check_context_used (klass);
2267 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2269 if (cfg->generic_sharing_context)
2270 return mono_method_check_context_used (method);
2276 * check_method_sharing:
2278 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2281 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2283 gboolean pass_vtable = FALSE;
2284 gboolean pass_mrgctx = FALSE;
2286 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2287 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2288 gboolean sharable = FALSE;
2290 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2293 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2294 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
2295 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2297 sharable = sharing_enabled && context_sharable;
2301 * Pass vtable iff target method might
2302 * be shared, which means that sharing
2303 * is enabled for its class and its
2304 * context is sharable (and it's not a
2307 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2311 if (mini_method_get_context (cmethod) &&
2312 mini_method_get_context (cmethod)->method_inst) {
2313 g_assert (!pass_vtable);
2315 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2318 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2319 MonoGenericContext *context = mini_method_get_context (cmethod);
2320 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2322 if (sharing_enabled && context_sharable)
2324 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, mono_method_signature (cmethod)))
2329 if (out_pass_vtable)
2330 *out_pass_vtable = pass_vtable;
2331 if (out_pass_mrgctx)
2332 *out_pass_mrgctx = pass_mrgctx;
2335 inline static MonoCallInst *
2336 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2337 MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
2340 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2345 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2347 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2350 call->signature = sig;
2351 call->rgctx_reg = rgctx;
2353 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2356 if (mini_type_is_vtype (cfg, sig->ret)) {
2357 call->vret_var = cfg->vret_addr;
2358 //g_assert_not_reached ();
2360 } else if (mini_type_is_vtype (cfg, sig->ret)) {
2361 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2364 temp->backend.is_pinvoke = sig->pinvoke;
2367 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2368 * address of return value to increase optimization opportunities.
2369 * Before vtype decomposition, the dreg of the call ins itself represents the
2370 * fact the call modifies the return value. After decomposition, the call will
2371 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2372 * will be transformed into an LDADDR.
2374 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2375 loada->dreg = alloc_preg (cfg);
2376 loada->inst_p0 = temp;
2377 /* We reference the call too since call->dreg could change during optimization */
2378 loada->inst_p1 = call;
2379 MONO_ADD_INS (cfg->cbb, loada);
2381 call->inst.dreg = temp->dreg;
2383 call->vret_var = loada;
2384 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2385 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2387 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2388 if (COMPILE_SOFT_FLOAT (cfg)) {
2390 * If the call has a float argument, we would need to do an r8->r4 conversion using
2391 * an icall, but that cannot be done during the call sequence since it would clobber
2392 * the call registers + the stack. So we do it before emitting the call.
2394 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2396 MonoInst *in = call->args [i];
2398 if (i >= sig->hasthis)
2399 t = sig->params [i - sig->hasthis];
2401 t = &mono_defaults.int_class->byval_arg;
2402 t = mono_type_get_underlying_type (t);
2404 if (!t->byref && t->type == MONO_TYPE_R4) {
2405 MonoInst *iargs [1];
2409 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2411 /* The result will be in an int vreg */
2412 call->args [i] = conv;
2418 call->need_unbox_trampoline = unbox_trampoline;
2421 if (COMPILE_LLVM (cfg))
2422 mono_llvm_emit_call (cfg, call);
2424 mono_arch_emit_call (cfg, call);
2426 mono_arch_emit_call (cfg, call);
2429 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2430 cfg->flags |= MONO_CFG_HAS_CALLS;
2436 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2438 #ifdef MONO_ARCH_RGCTX_REG
2439 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2440 cfg->uses_rgctx_reg = TRUE;
2441 call->rgctx_reg = TRUE;
2443 call->rgctx_arg_reg = rgctx_reg;
2450 inline static MonoInst*
2451 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2457 rgctx_reg = mono_alloc_preg (cfg);
2458 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2461 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2463 call->inst.sreg1 = addr->dreg;
2466 emit_imt_argument (cfg, call, NULL, imt_arg);
2468 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2471 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2473 return (MonoInst*)call;
2477 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2480 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2482 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2485 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2486 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2488 #ifndef DISABLE_REMOTING
2489 gboolean might_be_remote = FALSE;
2491 gboolean virtual = this != NULL;
2492 gboolean enable_for_aot = TRUE;
2496 gboolean need_unbox_trampoline;
2499 sig = mono_method_signature (method);
2502 rgctx_reg = mono_alloc_preg (cfg);
2503 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2506 if (method->string_ctor) {
2507 /* Create the real signature */
2508 /* FIXME: Cache these */
2509 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2510 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2515 context_used = mini_method_check_context_used (cfg, method);
2517 #ifndef DISABLE_REMOTING
2518 might_be_remote = this && sig->hasthis &&
2519 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2520 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2522 if (might_be_remote && context_used) {
2525 g_assert (cfg->generic_sharing_context);
2527 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2529 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2533 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2535 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2537 #ifndef DISABLE_REMOTING
2538 if (might_be_remote)
2539 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2542 call->method = method;
2543 call->inst.flags |= MONO_INST_HAS_METHOD;
2544 call->inst.inst_left = this;
2545 call->tail_call = tail;
2548 int vtable_reg, slot_reg, this_reg;
2551 this_reg = this->dreg;
2553 if (ARCH_HAVE_DELEGATE_TRAMPOLINES && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2554 MonoInst *dummy_use;
2556 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2558 /* Make a call to delegate->invoke_impl */
2559 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2560 call->inst.inst_basereg = this_reg;
2561 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2562 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2564 /* We must emit a dummy use here because the delegate trampoline will
2565 replace the 'this' argument with the delegate target making this activation
2566 no longer a root for the delegate.
2567 This is an issue for delegates that target collectible code such as dynamic
2568 methods of GC'able assemblies.
2570 For a test case look into #667921.
2572 FIXME: a dummy use is not the best way to do it as the local register allocator
2573 will put it on a caller save register and spil it around the call.
2574 Ideally, we would either put it on a callee save register or only do the store part.
2576 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2578 return (MonoInst*)call;
2581 if ((!cfg->compile_aot || enable_for_aot) &&
2582 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2583 (MONO_METHOD_IS_FINAL (method) &&
2584 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2585 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2587 * the method is not virtual, we just need to ensure this is not null
2588 * and then we can call the method directly.
2590 #ifndef DISABLE_REMOTING
2591 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2593 * The check above ensures method is not gshared, this is needed since
2594 * gshared methods can't have wrappers.
2596 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2600 if (!method->string_ctor)
2601 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2603 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2604 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2606 * the method is virtual, but we can statically dispatch since either
2607 * it's class or the method itself are sealed.
2608 * But first we need to ensure it's not a null reference.
2610 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2612 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2614 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2616 vtable_reg = alloc_preg (cfg);
2617 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2618 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2620 #ifdef MONO_ARCH_HAVE_IMT
2622 guint32 imt_slot = mono_method_get_imt_slot (method);
2623 emit_imt_argument (cfg, call, call->method, imt_arg);
2624 slot_reg = vtable_reg;
2625 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2628 if (slot_reg == -1) {
2629 slot_reg = alloc_preg (cfg);
2630 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2631 offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2634 slot_reg = vtable_reg;
2635 offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2636 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2637 #ifdef MONO_ARCH_HAVE_IMT
2639 g_assert (mono_method_signature (method)->generic_param_count);
2640 emit_imt_argument (cfg, call, call->method, imt_arg);
2645 call->inst.sreg1 = slot_reg;
2646 call->inst.inst_offset = offset;
2647 call->virtual = TRUE;
2651 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2654 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2656 return (MonoInst*)call;
2660 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2662 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this, NULL, NULL);
2666 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2673 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2676 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2678 return (MonoInst*)call;
2682 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2684 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2688 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2692 * mono_emit_abs_call:
2694 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2696 inline static MonoInst*
2697 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2698 MonoMethodSignature *sig, MonoInst **args)
2700 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2704 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2707 if (cfg->abs_patches == NULL)
2708 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2709 g_hash_table_insert (cfg->abs_patches, ji, ji);
2710 ins = mono_emit_native_call (cfg, ji, sig, args);
2711 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2716 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2718 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2719 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2723 * Native code might return non register sized integers
2724 * without initializing the upper bits.
2726 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2727 case OP_LOADI1_MEMBASE:
2728 widen_op = OP_ICONV_TO_I1;
2730 case OP_LOADU1_MEMBASE:
2731 widen_op = OP_ICONV_TO_U1;
2733 case OP_LOADI2_MEMBASE:
2734 widen_op = OP_ICONV_TO_I2;
2736 case OP_LOADU2_MEMBASE:
2737 widen_op = OP_ICONV_TO_U2;
2743 if (widen_op != -1) {
2744 int dreg = alloc_preg (cfg);
2747 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2748 widen->type = ins->type;
2758 get_memcpy_method (void)
2760 static MonoMethod *memcpy_method = NULL;
2761 if (!memcpy_method) {
2762 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2764 g_error ("Old corlib found. Install a new one");
2766 return memcpy_method;
2770 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
2772 MonoClassField *field;
2773 gpointer iter = NULL;
2775 while ((field = mono_class_get_fields (klass, &iter))) {
2778 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2780 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2781 if (mini_type_is_reference (cfg, mono_field_get_type (field))) {
2782 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2783 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2785 MonoClass *field_class = mono_class_from_mono_type (field->type);
2786 if (field_class->has_references)
2787 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
2793 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
2795 int card_table_shift_bits;
2796 gpointer card_table_mask;
2798 MonoInst *dummy_use;
2799 int nursery_shift_bits;
2800 size_t nursery_size;
2801 gboolean has_card_table_wb = FALSE;
2803 if (!cfg->gen_write_barriers)
2806 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2808 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2810 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
2811 has_card_table_wb = TRUE;
2814 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0) {
2817 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2818 wbarrier->sreg1 = ptr->dreg;
2819 wbarrier->sreg2 = value->dreg;
2820 MONO_ADD_INS (cfg->cbb, wbarrier);
2821 } else if (card_table) {
2822 int offset_reg = alloc_preg (cfg);
2823 int card_reg = alloc_preg (cfg);
2826 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2827 if (card_table_mask)
2828 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
2830 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2831 * IMM's larger than 32bits.
2833 if (cfg->compile_aot) {
2834 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
2836 MONO_INST_NEW (cfg, ins, OP_PCONST);
2837 ins->inst_p0 = card_table;
2838 ins->dreg = card_reg;
2839 MONO_ADD_INS (cfg->cbb, ins);
2842 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
2843 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
2845 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2846 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
2849 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
2853 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
2855 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
2856 unsigned need_wb = 0;
2861 /*types with references can't have alignment smaller than sizeof(void*) */
2862 if (align < SIZEOF_VOID_P)
2865 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
2866 if (size > 32 * SIZEOF_VOID_P)
2869 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
2871 /* We don't unroll more than 5 stores to avoid code bloat. */
2872 if (size > 5 * SIZEOF_VOID_P) {
2873 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
2874 size += (SIZEOF_VOID_P - 1);
2875 size &= ~(SIZEOF_VOID_P - 1);
2877 EMIT_NEW_ICONST (cfg, iargs [2], size);
2878 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
2879 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
2883 destreg = iargs [0]->dreg;
2884 srcreg = iargs [1]->dreg;
2887 dest_ptr_reg = alloc_preg (cfg);
2888 tmp_reg = alloc_preg (cfg);
2891 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
2893 while (size >= SIZEOF_VOID_P) {
2894 MonoInst *load_inst;
2895 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
2896 load_inst->dreg = tmp_reg;
2897 load_inst->inst_basereg = srcreg;
2898 load_inst->inst_offset = offset;
2899 MONO_ADD_INS (cfg->cbb, load_inst);
2901 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
2904 emit_write_barrier (cfg, iargs [0], load_inst);
2906 offset += SIZEOF_VOID_P;
2907 size -= SIZEOF_VOID_P;
2910 /*tmp += sizeof (void*)*/
2911 if (size >= SIZEOF_VOID_P) {
2912 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
2913 MONO_ADD_INS (cfg->cbb, iargs [0]);
2917 /* Those cannot be references since size < sizeof (void*) */
2919 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
2920 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
2926 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
2927 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
2933 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
2934 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
2943 * Emit code to copy a valuetype of type @klass whose address is stored in
2944 * @src->dreg to memory whose address is stored at @dest->dreg.
2947 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2949 MonoInst *iargs [4];
2950 int context_used, n;
2952 MonoMethod *memcpy_method;
2953 MonoInst *size_ins = NULL;
2954 MonoInst *memcpy_ins = NULL;
2958 * This check breaks with spilled vars... need to handle it during verification anyway.
2959 * g_assert (klass && klass == src->klass && klass == dest->klass);
2962 if (mini_is_gsharedvt_klass (cfg, klass)) {
2964 context_used = mini_class_check_context_used (cfg, klass);
2965 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
2966 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
2970 n = mono_class_native_size (klass, &align);
2972 n = mono_class_value_size (klass, &align);
2974 /* if native is true there should be no references in the struct */
2975 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
2976 /* Avoid barriers when storing to the stack */
2977 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2978 (dest->opcode == OP_LDADDR))) {
2984 context_used = mini_class_check_context_used (cfg, klass);
2986 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
2987 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
2989 } else if (context_used) {
2990 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2992 if (cfg->compile_aot) {
2993 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2995 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2996 mono_class_compute_gc_descriptor (klass);
3001 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3003 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3008 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
3009 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3010 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3015 iargs [2] = size_ins;
3017 EMIT_NEW_ICONST (cfg, iargs [2], n);
3019 memcpy_method = get_memcpy_method ();
3021 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3023 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3028 get_memset_method (void)
3030 static MonoMethod *memset_method = NULL;
3031 if (!memset_method) {
3032 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3034 g_error ("Old corlib found. Install a new one");
3036 return memset_method;
3040 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3042 MonoInst *iargs [3];
3043 int n, context_used;
3045 MonoMethod *memset_method;
3046 MonoInst *size_ins = NULL;
3047 MonoInst *bzero_ins = NULL;
3048 static MonoMethod *bzero_method;
3050 /* FIXME: Optimize this for the case when dest is an LDADDR */
3052 mono_class_init (klass);
3053 if (mini_is_gsharedvt_klass (cfg, klass)) {
3054 context_used = mini_class_check_context_used (cfg, klass);
3055 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3056 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3058 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3059 g_assert (bzero_method);
3061 iargs [1] = size_ins;
3062 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3066 n = mono_class_value_size (klass, &align);
3068 if (n <= sizeof (gpointer) * 5) {
3069 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3072 memset_method = get_memset_method ();
3074 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3075 EMIT_NEW_ICONST (cfg, iargs [2], n);
3076 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3081 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3083 MonoInst *this = NULL;
3085 g_assert (cfg->generic_sharing_context);
3087 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3088 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3089 !method->klass->valuetype)
3090 EMIT_NEW_ARGLOAD (cfg, this, 0);
3092 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3093 MonoInst *mrgctx_loc, *mrgctx_var;
3096 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3098 mrgctx_loc = mono_get_vtable_var (cfg);
3099 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3102 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3103 MonoInst *vtable_loc, *vtable_var;
3107 vtable_loc = mono_get_vtable_var (cfg);
3108 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3110 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3111 MonoInst *mrgctx_var = vtable_var;
3114 vtable_reg = alloc_preg (cfg);
3115 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3116 vtable_var->type = STACK_PTR;
3124 vtable_reg = alloc_preg (cfg);
3125 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3130 static MonoJumpInfoRgctxEntry *
3131 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3133 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3134 res->method = method;
3135 res->in_mrgctx = in_mrgctx;
3136 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3137 res->data->type = patch_type;
3138 res->data->data.target = patch_data;
3139 res->info_type = info_type;
3144 static inline MonoInst*
3145 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3147 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3151 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3152 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3154 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3155 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3157 return emit_rgctx_fetch (cfg, rgctx, entry);
3161 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3162 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3164 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3165 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3167 return emit_rgctx_fetch (cfg, rgctx, entry);
3171 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3172 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3174 MonoJumpInfoGSharedVtCall *call_info;
3175 MonoJumpInfoRgctxEntry *entry;
3178 call_info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3179 call_info->sig = sig;
3180 call_info->method = cmethod;
3182 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3183 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3185 return emit_rgctx_fetch (cfg, rgctx, entry);
3190 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3191 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3193 MonoJumpInfoRgctxEntry *entry;
3196 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3197 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3199 return emit_rgctx_fetch (cfg, rgctx, entry);
3203 * emit_get_rgctx_method:
3205 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3206 * normal constants, else emit a load from the rgctx.
3209 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3210 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3212 if (!context_used) {
3215 switch (rgctx_type) {
3216 case MONO_RGCTX_INFO_METHOD:
3217 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3219 case MONO_RGCTX_INFO_METHOD_RGCTX:
3220 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3223 g_assert_not_reached ();
3226 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3227 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3229 return emit_rgctx_fetch (cfg, rgctx, entry);
3234 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3235 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3237 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3238 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3240 return emit_rgctx_fetch (cfg, rgctx, entry);
3244 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3246 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3247 MonoRuntimeGenericContextInfoTemplate *template;
3252 for (i = 0; i < info->entries->len; ++i) {
3253 MonoRuntimeGenericContextInfoTemplate *otemplate = g_ptr_array_index (info->entries, i);
3255 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3259 template = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate));
3260 template->info_type = rgctx_type;
3261 template->data = data;
3263 idx = info->entries->len;
3265 g_ptr_array_add (info->entries, template);
3271 * emit_get_gsharedvt_info:
3273 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3276 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3281 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3282 /* Load info->entries [idx] */
3283 dreg = alloc_preg (cfg);
3284 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3290 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3292 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3296 * On return the caller must check @klass for load errors.
3299 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3301 MonoInst *vtable_arg;
3305 context_used = mini_class_check_context_used (cfg, klass);
3308 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3309 klass, MONO_RGCTX_INFO_VTABLE);
3311 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3315 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3318 if (COMPILE_LLVM (cfg))
3319 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3321 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3322 #ifdef MONO_ARCH_VTABLE_REG
3323 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3324 cfg->uses_vtable_reg = TRUE;
3331 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3335 if (cfg->gen_seq_points && cfg->method == method) {
3336 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3338 ins->flags |= MONO_INST_NONEMPTY_STACK;
3339 MONO_ADD_INS (cfg->cbb, ins);
3344 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check, MonoBasicBlock **out_bblock)
3346 if (mini_get_debug_options ()->better_cast_details) {
3347 int to_klass_reg = alloc_preg (cfg);
3348 int vtable_reg = alloc_preg (cfg);
3349 int klass_reg = alloc_preg (cfg);
3350 MonoBasicBlock *is_null_bb = NULL;
3354 NEW_BBLOCK (cfg, is_null_bb);
3356 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3357 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3360 tls_get = mono_get_jit_tls_intrinsic (cfg);
3362 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3366 MONO_ADD_INS (cfg->cbb, tls_get);
3367 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3368 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3370 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3371 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3372 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3375 MONO_START_BB (cfg, is_null_bb);
3377 *out_bblock = cfg->cbb;
3383 reset_cast_details (MonoCompile *cfg)
3385 /* Reset the variables holding the cast details */
3386 if (mini_get_debug_options ()->better_cast_details) {
3387 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3389 MONO_ADD_INS (cfg->cbb, tls_get);
3390 /* It is enough to reset the from field */
3391 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3396 * On return the caller must check @array_class for load errors
3399 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3401 int vtable_reg = alloc_preg (cfg);
3404 context_used = mini_class_check_context_used (cfg, array_class);
3406 save_cast_details (cfg, array_class, obj->dreg, FALSE, NULL);
3408 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3410 if (cfg->opt & MONO_OPT_SHARED) {
3411 int class_reg = alloc_preg (cfg);
3412 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3413 if (cfg->compile_aot) {
3414 int klass_reg = alloc_preg (cfg);
3415 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3416 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3418 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3420 } else if (context_used) {
3421 MonoInst *vtable_ins;
3423 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3424 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3426 if (cfg->compile_aot) {
3430 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3432 vt_reg = alloc_preg (cfg);
3433 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3434 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3437 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3439 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3443 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3445 reset_cast_details (cfg);
3449 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3450 * generic code is generated.
3453 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3455 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3458 MonoInst *rgctx, *addr;
3460 /* FIXME: What if the class is shared? We might not
3461 have to get the address of the method from the
3463 addr = emit_get_rgctx_method (cfg, context_used, method,
3464 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3466 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3468 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3470 gboolean pass_vtable, pass_mrgctx;
3471 MonoInst *rgctx_arg = NULL;
3473 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3474 g_assert (!pass_mrgctx);
3477 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3480 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3483 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3488 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3492 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3493 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3494 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3495 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3497 obj_reg = sp [0]->dreg;
3498 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3499 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3501 /* FIXME: generics */
3502 g_assert (klass->rank == 0);
3505 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3506 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3508 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3509 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3512 MonoInst *element_class;
3514 /* This assertion is from the unboxcast insn */
3515 g_assert (klass->rank == 0);
3517 element_class = emit_get_rgctx_klass (cfg, context_used,
3518 klass->element_class, MONO_RGCTX_INFO_KLASS);
3520 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3521 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3523 save_cast_details (cfg, klass->element_class, obj_reg, FALSE, NULL);
3524 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3525 reset_cast_details (cfg);
3528 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3529 MONO_ADD_INS (cfg->cbb, add);
3530 add->type = STACK_MP;
3537 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj, MonoBasicBlock **out_cbb)
3539 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3540 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3544 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3550 args [1] = klass_inst;
3553 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3555 NEW_BBLOCK (cfg, is_ref_bb);
3556 NEW_BBLOCK (cfg, is_nullable_bb);
3557 NEW_BBLOCK (cfg, end_bb);
3558 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3559 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3560 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3562 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3563 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3565 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3566 addr_reg = alloc_dreg (cfg, STACK_MP);
3570 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3571 MONO_ADD_INS (cfg->cbb, addr);
3573 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3576 MONO_START_BB (cfg, is_ref_bb);
3578 /* Save the ref to a temporary */
3579 dreg = alloc_ireg (cfg);
3580 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3581 addr->dreg = addr_reg;
3582 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3583 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3586 MONO_START_BB (cfg, is_nullable_bb);
3589 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3590 MonoInst *unbox_call;
3591 MonoMethodSignature *unbox_sig;
3594 var = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
3596 unbox_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3597 unbox_sig->ret = &klass->byval_arg;
3598 unbox_sig->param_count = 1;
3599 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3600 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3602 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3603 addr->dreg = addr_reg;
3606 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3609 MONO_START_BB (cfg, end_bb);
3612 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3614 *out_cbb = cfg->cbb;
3620 * Returns NULL and set the cfg exception on error.
3623 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3625 MonoInst *iargs [2];
3631 MonoInst *iargs [2];
3633 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box);
3635 if (cfg->opt & MONO_OPT_SHARED)
3636 rgctx_info = MONO_RGCTX_INFO_KLASS;
3638 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3639 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3641 if (cfg->opt & MONO_OPT_SHARED) {
3642 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3644 alloc_ftn = mono_object_new;
3647 alloc_ftn = mono_object_new_specific;
3650 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED))
3651 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3653 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3656 if (cfg->opt & MONO_OPT_SHARED) {
3657 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3658 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3660 alloc_ftn = mono_object_new;
3661 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3662 /* This happens often in argument checking code, eg. throw new FooException... */
3663 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3664 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3665 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3667 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3668 MonoMethod *managed_alloc = NULL;
3672 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3673 cfg->exception_ptr = klass;
3677 #ifndef MONO_CROSS_COMPILE
3678 managed_alloc = mono_gc_get_managed_allocator (klass, for_box);
3681 if (managed_alloc) {
3682 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3683 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3685 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3687 guint32 lw = vtable->klass->instance_size;
3688 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3689 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3690 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3693 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3697 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3701 * Returns NULL and set the cfg exception on error.
3704 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoBasicBlock **out_cbb)
3706 MonoInst *alloc, *ins;
3708 *out_cbb = cfg->cbb;
3710 if (mono_class_is_nullable (klass)) {
3711 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3714 /* FIXME: What if the class is shared? We might not
3715 have to get the method address from the RGCTX. */
3716 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3717 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3718 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3720 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3722 gboolean pass_vtable, pass_mrgctx;
3723 MonoInst *rgctx_arg = NULL;
3725 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3726 g_assert (!pass_mrgctx);
3729 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3732 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3735 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3739 if (mini_is_gsharedvt_klass (cfg, klass)) {
3740 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3741 MonoInst *res, *is_ref, *src_var, *addr;
3744 dreg = alloc_ireg (cfg);
3746 NEW_BBLOCK (cfg, is_ref_bb);
3747 NEW_BBLOCK (cfg, is_nullable_bb);
3748 NEW_BBLOCK (cfg, end_bb);
3749 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3750 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3751 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3753 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3754 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3757 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3760 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3761 ins->opcode = OP_STOREV_MEMBASE;
3763 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
3764 res->type = STACK_OBJ;
3766 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3769 MONO_START_BB (cfg, is_ref_bb);
3770 addr_reg = alloc_ireg (cfg);
3772 /* val is a vtype, so has to load the value manually */
3773 src_var = get_vreg_to_inst (cfg, val->dreg);
3775 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
3776 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
3777 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
3778 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3781 MONO_START_BB (cfg, is_nullable_bb);
3784 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
3785 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
3787 MonoMethodSignature *box_sig;
3790 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
3791 * construct that method at JIT time, so have to do things by hand.
3793 box_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3794 box_sig->ret = &mono_defaults.object_class->byval_arg;
3795 box_sig->param_count = 1;
3796 box_sig->params [0] = &klass->byval_arg;
3797 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
3798 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
3799 res->type = STACK_OBJ;
3803 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3805 MONO_START_BB (cfg, end_bb);
3807 *out_cbb = cfg->cbb;
3811 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3815 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3822 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
3825 MonoGenericContainer *container;
3826 MonoGenericInst *ginst;
3828 if (klass->generic_class) {
3829 container = klass->generic_class->container_class->generic_container;
3830 ginst = klass->generic_class->context.class_inst;
3831 } else if (klass->generic_container && context_used) {
3832 container = klass->generic_container;
3833 ginst = container->context.class_inst;
3838 for (i = 0; i < container->type_argc; ++i) {
3840 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
3842 type = ginst->type_argv [i];
3843 if (mini_type_is_reference (cfg, type))
3849 // FIXME: This doesn't work yet (class libs tests fail?)
3850 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3853 * Returns NULL and set the cfg exception on error.
3856 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3858 MonoBasicBlock *is_null_bb;
3859 int obj_reg = src->dreg;
3860 int vtable_reg = alloc_preg (cfg);
3861 MonoInst *klass_inst = NULL;
3866 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
3867 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
3868 MonoInst *cache_ins;
3870 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3875 /* klass - it's the second element of the cache entry*/
3876 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3879 args [2] = cache_ins;
3881 return mono_emit_method_call (cfg, mono_castclass, args, NULL);
3884 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3887 NEW_BBLOCK (cfg, is_null_bb);
3889 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3890 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3892 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
3894 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3895 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3896 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3898 int klass_reg = alloc_preg (cfg);
3900 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3902 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3903 /* the remoting code is broken, access the class for now */
3904 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3905 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3907 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3908 cfg->exception_ptr = klass;
3911 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3913 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3914 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3916 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3918 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3919 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3923 MONO_START_BB (cfg, is_null_bb);
3925 reset_cast_details (cfg);
3931 * Returns NULL and set the cfg exception on error.
3934 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3937 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3938 int obj_reg = src->dreg;
3939 int vtable_reg = alloc_preg (cfg);
3940 int res_reg = alloc_ireg_ref (cfg);
3941 MonoInst *klass_inst = NULL;
3946 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
3947 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
3948 MonoInst *cache_ins;
3950 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3955 /* klass - it's the second element of the cache entry*/
3956 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3959 args [2] = cache_ins;
3961 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
3964 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3967 NEW_BBLOCK (cfg, is_null_bb);
3968 NEW_BBLOCK (cfg, false_bb);
3969 NEW_BBLOCK (cfg, end_bb);
3971 /* Do the assignment at the beginning, so the other assignment can be if converted */
3972 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3973 ins->type = STACK_OBJ;
3976 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3977 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3979 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3981 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3982 g_assert (!context_used);
3983 /* the is_null_bb target simply copies the input register to the output */
3984 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3986 int klass_reg = alloc_preg (cfg);
3989 int rank_reg = alloc_preg (cfg);
3990 int eclass_reg = alloc_preg (cfg);
3992 g_assert (!context_used);
3993 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3994 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3995 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3996 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3997 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3998 if (klass->cast_class == mono_defaults.object_class) {
3999 int parent_reg = alloc_preg (cfg);
4000 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
4001 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4002 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4003 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4004 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4005 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4006 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4007 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4008 } else if (klass->cast_class == mono_defaults.enum_class) {
4009 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4010 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4011 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4012 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4014 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4015 /* Check that the object is a vector too */
4016 int bounds_reg = alloc_preg (cfg);
4017 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
4018 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4019 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4022 /* the is_null_bb target simply copies the input register to the output */
4023 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4025 } else if (mono_class_is_nullable (klass)) {
4026 g_assert (!context_used);
4027 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4028 /* the is_null_bb target simply copies the input register to the output */
4029 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4031 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4032 g_assert (!context_used);
4033 /* the remoting code is broken, access the class for now */
4034 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4035 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4037 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4038 cfg->exception_ptr = klass;
4041 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4043 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4044 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4046 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4047 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4049 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4050 /* the is_null_bb target simply copies the input register to the output */
4051 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4056 MONO_START_BB (cfg, false_bb);
4058 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4059 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4061 MONO_START_BB (cfg, is_null_bb);
4063 MONO_START_BB (cfg, end_bb);
4069 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4071 /* This opcode takes as input an object reference and a class, and returns:
4072 0) if the object is an instance of the class,
4073 1) if the object is not instance of the class,
4074 2) if the object is a proxy whose type cannot be determined */
4077 #ifndef DISABLE_REMOTING
4078 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4080 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4082 int obj_reg = src->dreg;
4083 int dreg = alloc_ireg (cfg);
4085 #ifndef DISABLE_REMOTING
4086 int klass_reg = alloc_preg (cfg);
4089 NEW_BBLOCK (cfg, true_bb);
4090 NEW_BBLOCK (cfg, false_bb);
4091 NEW_BBLOCK (cfg, end_bb);
4092 #ifndef DISABLE_REMOTING
4093 NEW_BBLOCK (cfg, false2_bb);
4094 NEW_BBLOCK (cfg, no_proxy_bb);
4097 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4098 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4100 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4101 #ifndef DISABLE_REMOTING
4102 NEW_BBLOCK (cfg, interface_fail_bb);
4105 tmp_reg = alloc_preg (cfg);
4106 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4107 #ifndef DISABLE_REMOTING
4108 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4109 MONO_START_BB (cfg, interface_fail_bb);
4110 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4112 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4114 tmp_reg = alloc_preg (cfg);
4115 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4116 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4117 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4119 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4122 #ifndef DISABLE_REMOTING
4123 tmp_reg = alloc_preg (cfg);
4124 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4125 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4127 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4128 tmp_reg = alloc_preg (cfg);
4129 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4130 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4132 tmp_reg = alloc_preg (cfg);
4133 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4134 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4135 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4137 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4138 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4140 MONO_START_BB (cfg, no_proxy_bb);
4142 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4144 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4148 MONO_START_BB (cfg, false_bb);
4150 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4151 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4153 #ifndef DISABLE_REMOTING
4154 MONO_START_BB (cfg, false2_bb);
4156 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4157 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4160 MONO_START_BB (cfg, true_bb);
4162 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4164 MONO_START_BB (cfg, end_bb);
4167 MONO_INST_NEW (cfg, ins, OP_ICONST);
4169 ins->type = STACK_I4;
4175 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4177 /* This opcode takes as input an object reference and a class, and returns:
4178 0) if the object is an instance of the class,
4179 1) if the object is a proxy whose type cannot be determined
4180 an InvalidCastException exception is thrown otherwhise*/
4183 #ifndef DISABLE_REMOTING
4184 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4186 MonoBasicBlock *ok_result_bb;
4188 int obj_reg = src->dreg;
4189 int dreg = alloc_ireg (cfg);
4190 int tmp_reg = alloc_preg (cfg);
4192 #ifndef DISABLE_REMOTING
4193 int klass_reg = alloc_preg (cfg);
4194 NEW_BBLOCK (cfg, end_bb);
4197 NEW_BBLOCK (cfg, ok_result_bb);
4199 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4200 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4202 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4204 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4205 #ifndef DISABLE_REMOTING
4206 NEW_BBLOCK (cfg, interface_fail_bb);
4208 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4209 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4210 MONO_START_BB (cfg, interface_fail_bb);
4211 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4213 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4215 tmp_reg = alloc_preg (cfg);
4216 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4217 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4218 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4220 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4221 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4223 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4224 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4225 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4228 #ifndef DISABLE_REMOTING
4229 NEW_BBLOCK (cfg, no_proxy_bb);
4231 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4232 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4233 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4235 tmp_reg = alloc_preg (cfg);
4236 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4237 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4239 tmp_reg = alloc_preg (cfg);
4240 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4241 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4242 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4244 NEW_BBLOCK (cfg, fail_1_bb);
4246 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4248 MONO_START_BB (cfg, fail_1_bb);
4250 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4251 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4253 MONO_START_BB (cfg, no_proxy_bb);
4255 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4257 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4261 MONO_START_BB (cfg, ok_result_bb);
4263 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4265 #ifndef DISABLE_REMOTING
4266 MONO_START_BB (cfg, end_bb);
4270 MONO_INST_NEW (cfg, ins, OP_ICONST);
4272 ins->type = STACK_I4;
4278 * Returns NULL and set the cfg exception on error.
4280 static G_GNUC_UNUSED MonoInst*
4281 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
4285 gpointer *trampoline;
4286 MonoInst *obj, *method_ins, *tramp_ins;
4290 obj = handle_alloc (cfg, klass, FALSE, 0);
4294 /* Inline the contents of mono_delegate_ctor */
4296 /* Set target field */
4297 /* Optimize away setting of NULL target */
4298 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
4299 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4300 if (cfg->gen_write_barriers) {
4301 dreg = alloc_preg (cfg);
4302 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target));
4303 emit_write_barrier (cfg, ptr, target);
4307 /* Set method field */
4308 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4309 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4310 if (cfg->gen_write_barriers) {
4311 dreg = alloc_preg (cfg);
4312 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method));
4313 emit_write_barrier (cfg, ptr, method_ins);
4316 * To avoid looking up the compiled code belonging to the target method
4317 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4318 * store it, and we fill it after the method has been compiled.
4320 if (!cfg->compile_aot && !method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4321 MonoInst *code_slot_ins;
4324 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4326 domain = mono_domain_get ();
4327 mono_domain_lock (domain);
4328 if (!domain_jit_info (domain)->method_code_hash)
4329 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4330 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4332 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
4333 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4335 mono_domain_unlock (domain);
4337 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
4339 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4342 /* Set invoke_impl field */
4343 if (cfg->compile_aot) {
4344 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
4346 trampoline = mono_create_delegate_trampoline (cfg->domain, klass);
4347 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4349 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4351 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4357 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4359 MonoJitICallInfo *info;
4361 /* Need to register the icall so it gets an icall wrapper */
4362 info = mono_get_array_new_va_icall (rank);
4364 cfg->flags |= MONO_CFG_HAS_VARARGS;
4366 /* mono_array_new_va () needs a vararg calling convention */
4367 cfg->disable_llvm = TRUE;
4369 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4370 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4374 mono_emit_load_got_addr (MonoCompile *cfg)
4376 MonoInst *getaddr, *dummy_use;
4378 if (!cfg->got_var || cfg->got_var_allocated)
4381 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
4382 getaddr->cil_code = cfg->header->code;
4383 getaddr->dreg = cfg->got_var->dreg;
4385 /* Add it to the start of the first bblock */
4386 if (cfg->bb_entry->code) {
4387 getaddr->next = cfg->bb_entry->code;
4388 cfg->bb_entry->code = getaddr;
4391 MONO_ADD_INS (cfg->bb_entry, getaddr);
4393 cfg->got_var_allocated = TRUE;
4396 * Add a dummy use to keep the got_var alive, since real uses might
4397 * only be generated by the back ends.
4398 * Add it to end_bblock, so the variable's lifetime covers the whole
4400 * It would be better to make the usage of the got var explicit in all
4401 * cases when the backend needs it (i.e. calls, throw etc.), so this
4402 * wouldn't be needed.
4404 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
4405 MONO_ADD_INS (cfg->bb_exit, dummy_use);
4408 static int inline_limit;
4409 static gboolean inline_limit_inited;
4412 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
4414 MonoMethodHeaderSummary header;
4416 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4417 MonoMethodSignature *sig = mono_method_signature (method);
4421 if (cfg->generic_sharing_context)
4424 if (cfg->inline_depth > 10)
4427 #ifdef MONO_ARCH_HAVE_LMF_OPS
4428 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
4429 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
4430 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
4435 if (!mono_method_get_header_summary (method, &header))
4438 /*runtime, icall and pinvoke are checked by summary call*/
4439 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
4440 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
4441 (mono_class_is_marshalbyref (method->klass)) ||
4445 /* also consider num_locals? */
4446 /* Do the size check early to avoid creating vtables */
4447 if (!inline_limit_inited) {
4448 if (g_getenv ("MONO_INLINELIMIT"))
4449 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
4451 inline_limit = INLINE_LENGTH_LIMIT;
4452 inline_limit_inited = TRUE;
4454 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
4458 * if we can initialize the class of the method right away, we do,
4459 * otherwise we don't allow inlining if the class needs initialization,
4460 * since it would mean inserting a call to mono_runtime_class_init()
4461 * inside the inlined code
4463 if (!(cfg->opt & MONO_OPT_SHARED)) {
4464 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
4465 if (cfg->run_cctors && method->klass->has_cctor) {
4466 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
4467 if (!method->klass->runtime_info)
4468 /* No vtable created yet */
4470 vtable = mono_class_vtable (cfg->domain, method->klass);
4473 /* This makes so that inline cannot trigger */
4474 /* .cctors: too many apps depend on them */
4475 /* running with a specific order... */
4476 if (! vtable->initialized)
4478 mono_runtime_class_init (vtable);
4480 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
4481 if (!method->klass->runtime_info)
4482 /* No vtable created yet */
4484 vtable = mono_class_vtable (cfg->domain, method->klass);
4487 if (!vtable->initialized)
4492 * If we're compiling for shared code
4493 * the cctor will need to be run at aot method load time, for example,
4494 * or at the end of the compilation of the inlining method.
4496 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
4501 * CAS - do not inline methods with declarative security
4502 * Note: this has to be before any possible return TRUE;
4504 if (mono_security_method_has_declsec (method))
4507 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4508 if (mono_arch_is_soft_float ()) {
4510 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4512 for (i = 0; i < sig->param_count; ++i)
4513 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4522 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
4524 if (!cfg->compile_aot) {
4526 if (vtable->initialized)
4530 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
4533 if (!mono_class_needs_cctor_run (klass, method))
4536 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
4537 /* The initialization is already done before the method is called */
4544 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4548 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4551 if (mini_is_gsharedvt_klass (cfg, klass)) {
4554 mono_class_init (klass);
4555 size = mono_class_array_element_size (klass);
4558 mult_reg = alloc_preg (cfg);
4559 array_reg = arr->dreg;
4560 index_reg = index->dreg;
4562 #if SIZEOF_REGISTER == 8
4563 /* The array reg is 64 bits but the index reg is only 32 */
4564 if (COMPILE_LLVM (cfg)) {
4566 index2_reg = index_reg;
4568 index2_reg = alloc_preg (cfg);
4569 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4572 if (index->type == STACK_I8) {
4573 index2_reg = alloc_preg (cfg);
4574 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4576 index2_reg = index_reg;
4581 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4583 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4584 if (size == 1 || size == 2 || size == 4 || size == 8) {
4585 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4587 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
4588 ins->klass = mono_class_get_element_class (klass);
4589 ins->type = STACK_MP;
4595 add_reg = alloc_ireg_mp (cfg);
4598 MonoInst *rgctx_ins;
4601 g_assert (cfg->generic_sharing_context);
4602 context_used = mini_class_check_context_used (cfg, klass);
4603 g_assert (context_used);
4604 rgctx_ins = emit_get_gsharedvt_info (cfg, &klass->byval_arg, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
4605 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
4607 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4609 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4610 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4611 ins->klass = mono_class_get_element_class (klass);
4612 ins->type = STACK_MP;
4613 MONO_ADD_INS (cfg->cbb, ins);
4618 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4620 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4622 int bounds_reg = alloc_preg (cfg);
4623 int add_reg = alloc_ireg_mp (cfg);
4624 int mult_reg = alloc_preg (cfg);
4625 int mult2_reg = alloc_preg (cfg);
4626 int low1_reg = alloc_preg (cfg);
4627 int low2_reg = alloc_preg (cfg);
4628 int high1_reg = alloc_preg (cfg);
4629 int high2_reg = alloc_preg (cfg);
4630 int realidx1_reg = alloc_preg (cfg);
4631 int realidx2_reg = alloc_preg (cfg);
4632 int sum_reg = alloc_preg (cfg);
4633 int index1, index2, tmpreg;
4637 mono_class_init (klass);
4638 size = mono_class_array_element_size (klass);
4640 index1 = index_ins1->dreg;
4641 index2 = index_ins2->dreg;
4643 #if SIZEOF_REGISTER == 8
4644 /* The array reg is 64 bits but the index reg is only 32 */
4645 if (COMPILE_LLVM (cfg)) {
4648 tmpreg = alloc_preg (cfg);
4649 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
4651 tmpreg = alloc_preg (cfg);
4652 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
4656 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
4660 /* range checking */
4661 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4662 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4664 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4665 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4666 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4667 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4668 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4669 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4670 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4672 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4673 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4674 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4675 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4676 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
4677 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4678 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4680 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4681 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4682 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4683 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4684 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4686 ins->type = STACK_MP;
4688 MONO_ADD_INS (cfg->cbb, ins);
4695 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4699 MonoMethod *addr_method;
4702 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4705 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
4707 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4708 /* emit_ldelema_2 depends on OP_LMUL */
4709 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
4710 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
4714 element_size = mono_class_array_element_size (cmethod->klass->element_class);
4715 addr_method = mono_marshal_get_array_address (rank, element_size);
4716 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4721 static MonoBreakPolicy
4722 always_insert_breakpoint (MonoMethod *method)
4724 return MONO_BREAK_POLICY_ALWAYS;
4727 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4730 * mono_set_break_policy:
4731 * policy_callback: the new callback function
4733 * Allow embedders to decide wherther to actually obey breakpoint instructions
4734 * (both break IL instructions and Debugger.Break () method calls), for example
4735 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4736 * untrusted or semi-trusted code.
4738 * @policy_callback will be called every time a break point instruction needs to
4739 * be inserted with the method argument being the method that calls Debugger.Break()
4740 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4741 * if it wants the breakpoint to not be effective in the given method.
4742 * #MONO_BREAK_POLICY_ALWAYS is the default.
4745 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4747 if (policy_callback)
4748 break_policy_func = policy_callback;
4750 break_policy_func = always_insert_breakpoint;
4754 should_insert_brekpoint (MonoMethod *method) {
4755 switch (break_policy_func (method)) {
4756 case MONO_BREAK_POLICY_ALWAYS:
4758 case MONO_BREAK_POLICY_NEVER:
4760 case MONO_BREAK_POLICY_ON_DBG:
4761 return mono_debug_using_mono_debugger ();
4763 g_warning ("Incorrect value returned from break policy callback");
4768 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4770 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4772 MonoInst *addr, *store, *load;
4773 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4775 /* the bounds check is already done by the callers */
4776 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4778 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4779 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4780 if (mini_type_is_reference (cfg, fsig->params [2]))
4781 emit_write_barrier (cfg, addr, load);
4783 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4784 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4791 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4793 return mini_type_is_reference (cfg, &klass->byval_arg);
4797 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
4799 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
4800 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
4801 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
4802 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
4803 MonoInst *iargs [3];
4806 mono_class_setup_vtable (obj_array);
4807 g_assert (helper->slot);
4809 if (sp [0]->type != STACK_OBJ)
4811 if (sp [2]->type != STACK_OBJ)
4818 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
4822 if (mini_is_gsharedvt_klass (cfg, klass)) {
4825 // FIXME-VT: OP_ICONST optimization
4826 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
4827 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
4828 ins->opcode = OP_STOREV_MEMBASE;
4829 } else if (sp [1]->opcode == OP_ICONST) {
4830 int array_reg = sp [0]->dreg;
4831 int index_reg = sp [1]->dreg;
4832 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
4835 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
4836 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
4838 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
4839 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
4840 if (generic_class_is_reference_type (cfg, klass))
4841 emit_write_barrier (cfg, addr, sp [2]);
4848 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4853 eklass = mono_class_from_mono_type (fsig->params [2]);
4855 eklass = mono_class_from_mono_type (fsig->ret);
4859 return emit_array_store (cfg, eklass, args, FALSE);
4861 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4862 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
4868 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4870 MonoInst *ins = NULL;
4871 #ifdef MONO_ARCH_SIMD_INTRINSICS
4872 if (cfg->opt & MONO_OPT_SIMD) {
4873 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4883 emit_memory_barrier (MonoCompile *cfg, int kind)
4885 MonoInst *ins = NULL;
4886 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4887 MONO_ADD_INS (cfg->cbb, ins);
4888 ins->backend.memory_barrier_kind = kind;
4894 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4896 MonoInst *ins = NULL;
4899 /* The LLVM backend supports these intrinsics */
4900 if (cmethod->klass == mono_defaults.math_class) {
4901 if (strcmp (cmethod->name, "Sin") == 0) {
4903 } else if (strcmp (cmethod->name, "Cos") == 0) {
4905 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
4907 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
4912 MONO_INST_NEW (cfg, ins, opcode);
4913 ins->type = STACK_R8;
4914 ins->dreg = mono_alloc_freg (cfg);
4915 ins->sreg1 = args [0]->dreg;
4916 MONO_ADD_INS (cfg->cbb, ins);
4920 if (cfg->opt & MONO_OPT_CMOV) {
4921 if (strcmp (cmethod->name, "Min") == 0) {
4922 if (fsig->params [0]->type == MONO_TYPE_I4)
4924 if (fsig->params [0]->type == MONO_TYPE_U4)
4925 opcode = OP_IMIN_UN;
4926 else if (fsig->params [0]->type == MONO_TYPE_I8)
4928 else if (fsig->params [0]->type == MONO_TYPE_U8)
4929 opcode = OP_LMIN_UN;
4930 } else if (strcmp (cmethod->name, "Max") == 0) {
4931 if (fsig->params [0]->type == MONO_TYPE_I4)
4933 if (fsig->params [0]->type == MONO_TYPE_U4)
4934 opcode = OP_IMAX_UN;
4935 else if (fsig->params [0]->type == MONO_TYPE_I8)
4937 else if (fsig->params [0]->type == MONO_TYPE_U8)
4938 opcode = OP_LMAX_UN;
4943 MONO_INST_NEW (cfg, ins, opcode);
4944 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
4945 ins->dreg = mono_alloc_ireg (cfg);
4946 ins->sreg1 = args [0]->dreg;
4947 ins->sreg2 = args [1]->dreg;
4948 MONO_ADD_INS (cfg->cbb, ins);
4956 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4958 if (cmethod->klass == mono_defaults.array_class) {
4959 if (strcmp (cmethod->name, "UnsafeStore") == 0)
4960 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
4961 if (strcmp (cmethod->name, "UnsafeLoad") == 0)
4962 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
4969 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4971 MonoInst *ins = NULL;
4973 static MonoClass *runtime_helpers_class = NULL;
4974 if (! runtime_helpers_class)
4975 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
4976 "System.Runtime.CompilerServices", "RuntimeHelpers");
4978 if (cmethod->klass == mono_defaults.string_class) {
4979 if (strcmp (cmethod->name, "get_Chars") == 0) {
4980 int dreg = alloc_ireg (cfg);
4981 int index_reg = alloc_preg (cfg);
4982 int mult_reg = alloc_preg (cfg);
4983 int add_reg = alloc_preg (cfg);
4985 #if SIZEOF_REGISTER == 8
4986 /* The array reg is 64 bits but the index reg is only 32 */
4987 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
4989 index_reg = args [1]->dreg;
4991 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
4993 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4994 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
4995 add_reg = ins->dreg;
4996 /* Avoid a warning */
4998 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5001 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5002 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5003 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5004 add_reg, G_STRUCT_OFFSET (MonoString, chars));
5006 type_from_op (ins, NULL, NULL);
5008 } else if (strcmp (cmethod->name, "get_Length") == 0) {
5009 int dreg = alloc_ireg (cfg);
5010 /* Decompose later to allow more optimizations */
5011 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5012 ins->type = STACK_I4;
5013 ins->flags |= MONO_INST_FAULT;
5014 cfg->cbb->has_array_access = TRUE;
5015 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5018 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
5019 int mult_reg = alloc_preg (cfg);
5020 int add_reg = alloc_preg (cfg);
5022 /* The corlib functions check for oob already. */
5023 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
5024 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5025 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
5026 return cfg->cbb->last_ins;
5029 } else if (cmethod->klass == mono_defaults.object_class) {
5031 if (strcmp (cmethod->name, "GetType") == 0) {
5032 int dreg = alloc_ireg_ref (cfg);
5033 int vt_reg = alloc_preg (cfg);
5034 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
5035 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
5036 type_from_op (ins, NULL, NULL);
5039 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
5040 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
5041 int dreg = alloc_ireg (cfg);
5042 int t1 = alloc_ireg (cfg);
5044 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5045 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5046 ins->type = STACK_I4;
5050 } else if (strcmp (cmethod->name, ".ctor") == 0) {
5051 MONO_INST_NEW (cfg, ins, OP_NOP);
5052 MONO_ADD_INS (cfg->cbb, ins);
5056 } else if (cmethod->klass == mono_defaults.array_class) {
5057 if (!cfg->gsharedvt && strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
5058 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
5060 #ifndef MONO_BIG_ARRAYS
5062 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5065 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5066 int dreg = alloc_ireg (cfg);
5067 int bounds_reg = alloc_ireg_mp (cfg);
5068 MonoBasicBlock *end_bb, *szarray_bb;
5069 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5071 NEW_BBLOCK (cfg, end_bb);
5072 NEW_BBLOCK (cfg, szarray_bb);
5074 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5075 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
5076 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5077 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5078 /* Non-szarray case */
5080 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5081 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
5083 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5084 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5085 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5086 MONO_START_BB (cfg, szarray_bb);
5089 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5090 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
5092 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5093 MONO_START_BB (cfg, end_bb);
5095 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5096 ins->type = STACK_I4;
5102 if (cmethod->name [0] != 'g')
5105 if (strcmp (cmethod->name, "get_Rank") == 0) {
5106 int dreg = alloc_ireg (cfg);
5107 int vtable_reg = alloc_preg (cfg);
5108 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5109 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
5110 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5111 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
5112 type_from_op (ins, NULL, NULL);
5115 } else if (strcmp (cmethod->name, "get_Length") == 0) {
5116 int dreg = alloc_ireg (cfg);
5118 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5119 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
5120 type_from_op (ins, NULL, NULL);
5125 } else if (cmethod->klass == runtime_helpers_class) {
5127 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
5128 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
5132 } else if (cmethod->klass == mono_defaults.thread_class) {
5133 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
5134 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5135 MONO_ADD_INS (cfg->cbb, ins);
5137 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
5138 return emit_memory_barrier (cfg, FullBarrier);
5140 } else if (cmethod->klass == mono_defaults.monitor_class) {
5142 /* FIXME this should be integrated to the check below once we support the trampoline version */
5143 #if defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
5144 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) {
5145 MonoMethod *fast_method = NULL;
5147 /* Avoid infinite recursion */
5148 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN && !strcmp (cfg->method->name, "FastMonitorEnterV4"))
5151 fast_method = mono_monitor_get_fast_path (cmethod);
5155 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
5159 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
5160 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
5163 if (COMPILE_LLVM (cfg)) {
5165 * Pass the argument normally, the LLVM backend will handle the
5166 * calling convention problems.
5168 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5170 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
5171 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5172 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5173 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5176 return (MonoInst*)call;
5177 } else if (strcmp (cmethod->name, "Exit") == 0) {
5180 if (COMPILE_LLVM (cfg)) {
5181 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5183 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
5184 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5185 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5186 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5189 return (MonoInst*)call;
5191 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
5193 MonoMethod *fast_method = NULL;
5195 /* Avoid infinite recursion */
5196 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
5197 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
5198 strcmp (cfg->method->name, "FastMonitorExit") == 0))
5201 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) ||
5202 strcmp (cmethod->name, "Exit") == 0)
5203 fast_method = mono_monitor_get_fast_path (cmethod);
5207 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
5210 } else if (cmethod->klass->image == mono_defaults.corlib &&
5211 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5212 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
5215 #if SIZEOF_REGISTER == 8
5216 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
5217 /* 64 bit reads are already atomic */
5218 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
5219 ins->dreg = mono_alloc_preg (cfg);
5220 ins->inst_basereg = args [0]->dreg;
5221 ins->inst_offset = 0;
5222 MONO_ADD_INS (cfg->cbb, ins);
5226 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
5227 if (strcmp (cmethod->name, "Increment") == 0) {
5228 MonoInst *ins_iconst;
5231 if (fsig->params [0]->type == MONO_TYPE_I4)
5232 opcode = OP_ATOMIC_ADD_NEW_I4;
5233 #if SIZEOF_REGISTER == 8
5234 else if (fsig->params [0]->type == MONO_TYPE_I8)
5235 opcode = OP_ATOMIC_ADD_NEW_I8;
5238 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5239 ins_iconst->inst_c0 = 1;
5240 ins_iconst->dreg = mono_alloc_ireg (cfg);
5241 MONO_ADD_INS (cfg->cbb, ins_iconst);
5243 MONO_INST_NEW (cfg, ins, opcode);
5244 ins->dreg = mono_alloc_ireg (cfg);
5245 ins->inst_basereg = args [0]->dreg;
5246 ins->inst_offset = 0;
5247 ins->sreg2 = ins_iconst->dreg;
5248 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5249 MONO_ADD_INS (cfg->cbb, ins);
5251 } else if (strcmp (cmethod->name, "Decrement") == 0) {
5252 MonoInst *ins_iconst;
5255 if (fsig->params [0]->type == MONO_TYPE_I4)
5256 opcode = OP_ATOMIC_ADD_NEW_I4;
5257 #if SIZEOF_REGISTER == 8
5258 else if (fsig->params [0]->type == MONO_TYPE_I8)
5259 opcode = OP_ATOMIC_ADD_NEW_I8;
5262 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5263 ins_iconst->inst_c0 = -1;
5264 ins_iconst->dreg = mono_alloc_ireg (cfg);
5265 MONO_ADD_INS (cfg->cbb, ins_iconst);
5267 MONO_INST_NEW (cfg, ins, opcode);
5268 ins->dreg = mono_alloc_ireg (cfg);
5269 ins->inst_basereg = args [0]->dreg;
5270 ins->inst_offset = 0;
5271 ins->sreg2 = ins_iconst->dreg;
5272 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5273 MONO_ADD_INS (cfg->cbb, ins);
5275 } else if (strcmp (cmethod->name, "Add") == 0) {
5278 if (fsig->params [0]->type == MONO_TYPE_I4)
5279 opcode = OP_ATOMIC_ADD_NEW_I4;
5280 #if SIZEOF_REGISTER == 8
5281 else if (fsig->params [0]->type == MONO_TYPE_I8)
5282 opcode = OP_ATOMIC_ADD_NEW_I8;
5286 MONO_INST_NEW (cfg, ins, opcode);
5287 ins->dreg = mono_alloc_ireg (cfg);
5288 ins->inst_basereg = args [0]->dreg;
5289 ins->inst_offset = 0;
5290 ins->sreg2 = args [1]->dreg;
5291 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5292 MONO_ADD_INS (cfg->cbb, ins);
5295 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
5297 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
5298 if (strcmp (cmethod->name, "Exchange") == 0) {
5300 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
5302 if (fsig->params [0]->type == MONO_TYPE_I4)
5303 opcode = OP_ATOMIC_EXCHANGE_I4;
5304 #if SIZEOF_REGISTER == 8
5305 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
5306 (fsig->params [0]->type == MONO_TYPE_I))
5307 opcode = OP_ATOMIC_EXCHANGE_I8;
5309 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
5310 opcode = OP_ATOMIC_EXCHANGE_I4;
5315 MONO_INST_NEW (cfg, ins, opcode);
5316 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
5317 ins->inst_basereg = args [0]->dreg;
5318 ins->inst_offset = 0;
5319 ins->sreg2 = args [1]->dreg;
5320 MONO_ADD_INS (cfg->cbb, ins);
5322 switch (fsig->params [0]->type) {
5324 ins->type = STACK_I4;
5328 ins->type = STACK_I8;
5330 case MONO_TYPE_OBJECT:
5331 ins->type = STACK_OBJ;
5334 g_assert_not_reached ();
5337 if (cfg->gen_write_barriers && is_ref)
5338 emit_write_barrier (cfg, args [0], args [1]);
5340 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
5342 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
5343 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
5345 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
5346 if (fsig->params [1]->type == MONO_TYPE_I4)
5348 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
5349 size = sizeof (gpointer);
5350 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
5353 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
5354 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5355 ins->sreg1 = args [0]->dreg;
5356 ins->sreg2 = args [1]->dreg;
5357 ins->sreg3 = args [2]->dreg;
5358 ins->type = STACK_I4;
5359 MONO_ADD_INS (cfg->cbb, ins);
5360 } else if (size == 8) {
5361 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
5362 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5363 ins->sreg1 = args [0]->dreg;
5364 ins->sreg2 = args [1]->dreg;
5365 ins->sreg3 = args [2]->dreg;
5366 ins->type = STACK_I8;
5367 MONO_ADD_INS (cfg->cbb, ins);
5369 /* g_assert_not_reached (); */
5371 if (cfg->gen_write_barriers && is_ref)
5372 emit_write_barrier (cfg, args [0], args [1]);
5374 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
5376 if (strcmp (cmethod->name, "MemoryBarrier") == 0)
5377 ins = emit_memory_barrier (cfg, FullBarrier);
5381 } else if (cmethod->klass->image == mono_defaults.corlib) {
5382 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
5383 && strcmp (cmethod->klass->name, "Debugger") == 0) {
5384 if (should_insert_brekpoint (cfg->method)) {
5385 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
5387 MONO_INST_NEW (cfg, ins, OP_NOP);
5388 MONO_ADD_INS (cfg->cbb, ins);
5392 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
5393 && strcmp (cmethod->klass->name, "Environment") == 0) {
5395 EMIT_NEW_ICONST (cfg, ins, 1);
5397 EMIT_NEW_ICONST (cfg, ins, 0);
5401 } else if (cmethod->klass == mono_defaults.math_class) {
5403 * There is general branches code for Min/Max, but it does not work for
5405 * http://everything2.com/?node_id=1051618
5407 } else if ((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") || !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) && !strcmp (cmethod->klass->name, "Selector") && !strcmp (cmethod->name, "GetHandle") && cfg->compile_aot && (args [0]->opcode == OP_GOT_ENTRY || args[0]->opcode == OP_AOTCONST)) {
5408 #ifdef MONO_ARCH_HAVE_OBJC_GET_SELECTOR
5410 MonoJumpInfoToken *ji;
5413 cfg->disable_llvm = TRUE;
5415 if (args [0]->opcode == OP_GOT_ENTRY) {
5416 pi = args [0]->inst_p1;
5417 g_assert (pi->opcode == OP_PATCH_INFO);
5418 g_assert ((int)pi->inst_p1 == MONO_PATCH_INFO_LDSTR);
5421 g_assert ((int)args [0]->inst_p1 == MONO_PATCH_INFO_LDSTR);
5422 ji = args [0]->inst_p0;
5425 NULLIFY_INS (args [0]);
5428 s = mono_ldstr (cfg->domain, ji->image, mono_metadata_token_index (ji->token));
5429 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
5430 ins->dreg = mono_alloc_ireg (cfg);
5432 ins->inst_p0 = mono_string_to_utf8 (s);
5433 MONO_ADD_INS (cfg->cbb, ins);
5438 #ifdef MONO_ARCH_SIMD_INTRINSICS
5439 if (cfg->opt & MONO_OPT_SIMD) {
5440 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5446 if (COMPILE_LLVM (cfg)) {
5447 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
5452 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
5456 * This entry point could be used later for arbitrary method
5459 inline static MonoInst*
5460 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
5461 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
5463 if (method->klass == mono_defaults.string_class) {
5464 /* managed string allocation support */
5465 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
5466 MonoInst *iargs [2];
5467 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
5468 MonoMethod *managed_alloc = NULL;
5470 g_assert (vtable); /*Should not fail since it System.String*/
5471 #ifndef MONO_CROSS_COMPILE
5472 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE);
5476 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
5477 iargs [1] = args [0];
5478 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
5485 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
5487 MonoInst *store, *temp;
5490 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
5491 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
5494 * FIXME: We should use *args++ = sp [0], but that would mean the arg
5495 * would be different than the MonoInst's used to represent arguments, and
5496 * the ldelema implementation can't deal with that.
5497 * Solution: When ldelema is used on an inline argument, create a var for
5498 * it, emit ldelema on that var, and emit the saving code below in
5499 * inline_method () if needed.
5501 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
5502 cfg->args [i] = temp;
5503 /* This uses cfg->args [i] which is set by the preceeding line */
5504 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
5505 store->cil_code = sp [0]->cil_code;
5510 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
5511 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
5513 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5515 check_inline_called_method_name_limit (MonoMethod *called_method)
5518 static const char *limit = NULL;
5520 if (limit == NULL) {
5521 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
5523 if (limit_string != NULL)
5524 limit = limit_string;
5529 if (limit [0] != '\0') {
5530 char *called_method_name = mono_method_full_name (called_method, TRUE);
5532 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
5533 g_free (called_method_name);
5535 //return (strncmp_result <= 0);
5536 return (strncmp_result == 0);
5543 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5545 check_inline_caller_method_name_limit (MonoMethod *caller_method)
5548 static const char *limit = NULL;
5550 if (limit == NULL) {
5551 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
5552 if (limit_string != NULL) {
5553 limit = limit_string;
5559 if (limit [0] != '\0') {
5560 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
5562 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
5563 g_free (caller_method_name);
5565 //return (strncmp_result <= 0);
5566 return (strncmp_result == 0);
5574 emit_init_rvar (MonoCompile *cfg, MonoInst *rvar, MonoType *rtype)
5576 static double r8_0 = 0.0;
5579 switch (rvar->type) {
5581 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
5584 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
5589 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
5592 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5593 ins->type = STACK_R8;
5594 ins->inst_p0 = (void*)&r8_0;
5595 ins->dreg = rvar->dreg;
5596 MONO_ADD_INS (cfg->cbb, ins);
5599 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (rtype));
5602 g_assert_not_reached ();
5607 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
5608 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_always)
5610 MonoInst *ins, *rvar = NULL;
5611 MonoMethodHeader *cheader;
5612 MonoBasicBlock *ebblock, *sbblock;
5614 MonoMethod *prev_inlined_method;
5615 MonoInst **prev_locals, **prev_args;
5616 MonoType **prev_arg_types;
5617 guint prev_real_offset;
5618 GHashTable *prev_cbb_hash;
5619 MonoBasicBlock **prev_cil_offset_to_bb;
5620 MonoBasicBlock *prev_cbb;
5621 unsigned char* prev_cil_start;
5622 guint32 prev_cil_offset_to_bb_len;
5623 MonoMethod *prev_current_method;
5624 MonoGenericContext *prev_generic_context;
5625 gboolean ret_var_set, prev_ret_var_set, virtual = FALSE;
5627 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
5629 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5630 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
5633 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5634 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
5638 if (cfg->verbose_level > 2)
5639 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
5641 if (!cmethod->inline_info) {
5642 cfg->stat_inlineable_methods++;
5643 cmethod->inline_info = 1;
5646 /* allocate local variables */
5647 cheader = mono_method_get_header (cmethod);
5649 if (cheader == NULL || mono_loader_get_last_error ()) {
5650 MonoLoaderError *error = mono_loader_get_last_error ();
5653 mono_metadata_free_mh (cheader);
5654 if (inline_always && error)
5655 mono_cfg_set_exception (cfg, error->exception_type);
5657 mono_loader_clear_error ();
5661 /*Must verify before creating locals as it can cause the JIT to assert.*/
5662 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
5663 mono_metadata_free_mh (cheader);
5667 /* allocate space to store the return value */
5668 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
5669 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
5672 prev_locals = cfg->locals;
5673 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
5674 for (i = 0; i < cheader->num_locals; ++i)
5675 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
5677 /* allocate start and end blocks */
5678 /* This is needed so if the inline is aborted, we can clean up */
5679 NEW_BBLOCK (cfg, sbblock);
5680 sbblock->real_offset = real_offset;
5682 NEW_BBLOCK (cfg, ebblock);
5683 ebblock->block_num = cfg->num_bblocks++;
5684 ebblock->real_offset = real_offset;
5686 prev_args = cfg->args;
5687 prev_arg_types = cfg->arg_types;
5688 prev_inlined_method = cfg->inlined_method;
5689 cfg->inlined_method = cmethod;
5690 cfg->ret_var_set = FALSE;
5691 cfg->inline_depth ++;
5692 prev_real_offset = cfg->real_offset;
5693 prev_cbb_hash = cfg->cbb_hash;
5694 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
5695 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
5696 prev_cil_start = cfg->cil_start;
5697 prev_cbb = cfg->cbb;
5698 prev_current_method = cfg->current_method;
5699 prev_generic_context = cfg->generic_context;
5700 prev_ret_var_set = cfg->ret_var_set;
5702 if (*ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
5705 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, virtual);
5707 ret_var_set = cfg->ret_var_set;
5709 cfg->inlined_method = prev_inlined_method;
5710 cfg->real_offset = prev_real_offset;
5711 cfg->cbb_hash = prev_cbb_hash;
5712 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
5713 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
5714 cfg->cil_start = prev_cil_start;
5715 cfg->locals = prev_locals;
5716 cfg->args = prev_args;
5717 cfg->arg_types = prev_arg_types;
5718 cfg->current_method = prev_current_method;
5719 cfg->generic_context = prev_generic_context;
5720 cfg->ret_var_set = prev_ret_var_set;
5721 cfg->inline_depth --;
5723 if ((costs >= 0 && costs < 60) || inline_always) {
5724 if (cfg->verbose_level > 2)
5725 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
5727 cfg->stat_inlined_methods++;
5729 /* always add some code to avoid block split failures */
5730 MONO_INST_NEW (cfg, ins, OP_NOP);
5731 MONO_ADD_INS (prev_cbb, ins);
5733 prev_cbb->next_bb = sbblock;
5734 link_bblock (cfg, prev_cbb, sbblock);
5737 * Get rid of the begin and end bblocks if possible to aid local
5740 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
5742 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
5743 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
5745 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
5746 MonoBasicBlock *prev = ebblock->in_bb [0];
5747 mono_merge_basic_blocks (cfg, prev, ebblock);
5749 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
5750 mono_merge_basic_blocks (cfg, prev_cbb, prev);
5751 cfg->cbb = prev_cbb;
5755 * Its possible that the rvar is set in some prev bblock, but not in others.
5761 for (i = 0; i < ebblock->in_count; ++i) {
5762 bb = ebblock->in_bb [i];
5764 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
5767 emit_init_rvar (cfg, rvar, fsig->ret);
5777 * If the inlined method contains only a throw, then the ret var is not
5778 * set, so set it to a dummy value.
5781 emit_init_rvar (cfg, rvar, fsig->ret);
5783 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
5786 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5789 if (cfg->verbose_level > 2)
5790 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
5791 cfg->exception_type = MONO_EXCEPTION_NONE;
5792 mono_loader_clear_error ();
5794 /* This gets rid of the newly added bblocks */
5795 cfg->cbb = prev_cbb;
5797 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5802 * Some of these comments may well be out-of-date.
5803 * Design decisions: we do a single pass over the IL code (and we do bblock
5804 * splitting/merging in the few cases when it's required: a back jump to an IL
5805 * address that was not already seen as bblock starting point).
5806 * Code is validated as we go (full verification is still better left to metadata/verify.c).
5807 * Complex operations are decomposed in simpler ones right away. We need to let the
5808 * arch-specific code peek and poke inside this process somehow (except when the
5809 * optimizations can take advantage of the full semantic info of coarse opcodes).
5810 * All the opcodes of the form opcode.s are 'normalized' to opcode.
5811 * MonoInst->opcode initially is the IL opcode or some simplification of that
5812 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
5813 * opcode with value bigger than OP_LAST.
5814 * At this point the IR can be handed over to an interpreter, a dumb code generator
5815 * or to the optimizing code generator that will translate it to SSA form.
5817 * Profiling directed optimizations.
5818 * We may compile by default with few or no optimizations and instrument the code
5819 * or the user may indicate what methods to optimize the most either in a config file
5820 * or through repeated runs where the compiler applies offline the optimizations to
5821 * each method and then decides if it was worth it.
5824 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
5825 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
5826 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
5827 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
5828 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
5829 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
5830 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
5831 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; LOAD_ERROR;}
5833 /* offset from br.s -> br like opcodes */
5834 #define BIG_BRANCH_OFFSET 13
5837 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
5839 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
5841 return b == NULL || b == bb;
5845 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
5847 unsigned char *ip = start;
5848 unsigned char *target;
5851 MonoBasicBlock *bblock;
5852 const MonoOpcode *opcode;
5855 cli_addr = ip - start;
5856 i = mono_opcode_value ((const guint8 **)&ip, end);
5859 opcode = &mono_opcodes [i];
5860 switch (opcode->argument) {
5861 case MonoInlineNone:
5864 case MonoInlineString:
5865 case MonoInlineType:
5866 case MonoInlineField:
5867 case MonoInlineMethod:
5870 case MonoShortInlineR:
5877 case MonoShortInlineVar:
5878 case MonoShortInlineI:
5881 case MonoShortInlineBrTarget:
5882 target = start + cli_addr + 2 + (signed char)ip [1];
5883 GET_BBLOCK (cfg, bblock, target);
5886 GET_BBLOCK (cfg, bblock, ip);
5888 case MonoInlineBrTarget:
5889 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
5890 GET_BBLOCK (cfg, bblock, target);
5893 GET_BBLOCK (cfg, bblock, ip);
5895 case MonoInlineSwitch: {
5896 guint32 n = read32 (ip + 1);
5899 cli_addr += 5 + 4 * n;
5900 target = start + cli_addr;
5901 GET_BBLOCK (cfg, bblock, target);
5903 for (j = 0; j < n; ++j) {
5904 target = start + cli_addr + (gint32)read32 (ip);
5905 GET_BBLOCK (cfg, bblock, target);
5915 g_assert_not_reached ();
5918 if (i == CEE_THROW) {
5919 unsigned char *bb_start = ip - 1;
5921 /* Find the start of the bblock containing the throw */
5923 while ((bb_start >= start) && !bblock) {
5924 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
5928 bblock->out_of_line = 1;
5938 static inline MonoMethod *
5939 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5943 if (m->wrapper_type != MONO_WRAPPER_NONE) {
5944 method = mono_method_get_wrapper_data (m, token);
5946 method = mono_class_inflate_generic_method (method, context);
5948 method = mono_get_method_full (m->klass->image, token, klass, context);
5954 static inline MonoMethod *
5955 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5957 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
5959 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
5965 static inline MonoClass*
5966 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
5970 if (method->wrapper_type != MONO_WRAPPER_NONE) {
5971 klass = mono_method_get_wrapper_data (method, token);
5973 klass = mono_class_inflate_generic_class (klass, context);
5975 klass = mono_class_get_full (method->klass->image, token, context);
5978 mono_class_init (klass);
5982 static inline MonoMethodSignature*
5983 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
5985 MonoMethodSignature *fsig;
5987 if (method->wrapper_type != MONO_WRAPPER_NONE) {
5990 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
5992 fsig = mono_inflate_generic_signature (fsig, context, &error);
5994 g_assert (mono_error_ok (&error));
5997 fsig = mono_metadata_parse_signature (method->klass->image, token);
6003 * Returns TRUE if the JIT should abort inlining because "callee"
6004 * is influenced by security attributes.
6007 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
6011 if ((cfg->method != caller) && mono_security_method_has_declsec (callee)) {
6015 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
6016 if (result == MONO_JIT_SECURITY_OK)
6019 if (result == MONO_JIT_LINKDEMAND_ECMA) {
6020 /* Generate code to throw a SecurityException before the actual call/link */
6021 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6024 NEW_ICONST (cfg, args [0], 4);
6025 NEW_METHODCONST (cfg, args [1], caller);
6026 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
6027 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
6028 /* don't hide previous results */
6029 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
6030 cfg->exception_data = result;
6038 throw_exception (void)
6040 static MonoMethod *method = NULL;
6043 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6044 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
6051 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
6053 MonoMethod *thrower = throw_exception ();
6056 EMIT_NEW_PCONST (cfg, args [0], ex);
6057 mono_emit_method_call (cfg, thrower, args, NULL);
6061 * Return the original method is a wrapper is specified. We can only access
6062 * the custom attributes from the original method.
6065 get_original_method (MonoMethod *method)
6067 if (method->wrapper_type == MONO_WRAPPER_NONE)
6070 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
6071 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
6074 /* in other cases we need to find the original method */
6075 return mono_marshal_method_from_wrapper (method);
6079 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
6080 MonoBasicBlock *bblock, unsigned char *ip)
6082 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6083 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
6085 emit_throw_exception (cfg, ex);
6089 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
6090 MonoBasicBlock *bblock, unsigned char *ip)
6092 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6093 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
6095 emit_throw_exception (cfg, ex);
6099 * Check that the IL instructions at ip are the array initialization
6100 * sequence and return the pointer to the data and the size.
6103 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
6106 * newarr[System.Int32]
6108 * ldtoken field valuetype ...
6109 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
6111 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
6112 guint32 token = read32 (ip + 7);
6113 guint32 field_token = read32 (ip + 2);
6114 guint32 field_index = field_token & 0xffffff;
6116 const char *data_ptr;
6118 MonoMethod *cmethod;
6119 MonoClass *dummy_class;
6120 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
6126 *out_field_token = field_token;
6128 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
6131 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
6133 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
6134 case MONO_TYPE_BOOLEAN:
6138 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
6139 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
6140 case MONO_TYPE_CHAR:
6150 return NULL; /* stupid ARM FP swapped format */
6160 if (size > mono_type_size (field->type, &dummy_align))
6163 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
6164 if (!method->klass->image->dynamic) {
6165 field_index = read32 (ip + 2) & 0xffffff;
6166 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
6167 data_ptr = mono_image_rva_map (method->klass->image, rva);
6168 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
6169 /* for aot code we do the lookup on load */
6170 if (aot && data_ptr)
6171 return GUINT_TO_POINTER (rva);
6173 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
6175 data_ptr = mono_field_get_data (field);
6183 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
6185 char *method_fname = mono_method_full_name (method, TRUE);
6187 MonoMethodHeader *header = mono_method_get_header (method);
6189 if (header->code_size == 0)
6190 method_code = g_strdup ("method body is empty.");
6192 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
6193 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
6194 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
6195 g_free (method_fname);
6196 g_free (method_code);
6197 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
6201 set_exception_object (MonoCompile *cfg, MonoException *exception)
6203 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
6204 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
6205 cfg->exception_ptr = exception;
6209 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
6212 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
6213 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
6214 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
6215 /* Optimize reg-reg moves away */
6217 * Can't optimize other opcodes, since sp[0] might point to
6218 * the last ins of a decomposed opcode.
6220 sp [0]->dreg = (cfg)->locals [n]->dreg;
6222 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
6227 * ldloca inhibits many optimizations so try to get rid of it in common
6230 static inline unsigned char *
6231 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
6240 local = read16 (ip + 2);
6244 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
6245 gboolean skip = FALSE;
6247 /* From the INITOBJ case */
6248 token = read32 (ip + 2);
6249 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
6250 CHECK_TYPELOAD (klass);
6251 if (mini_type_is_reference (cfg, &klass->byval_arg)) {
6252 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
6253 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
6254 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
6267 is_exception_class (MonoClass *class)
6270 if (class == mono_defaults.exception_class)
6272 class = class->parent;
6278 * is_jit_optimizer_disabled:
6280 * Determine whenever M's assembly has a DebuggableAttribute with the
6281 * IsJITOptimizerDisabled flag set.
6284 is_jit_optimizer_disabled (MonoMethod *m)
6286 MonoAssembly *ass = m->klass->image->assembly;
6287 MonoCustomAttrInfo* attrs;
6288 static MonoClass *klass;
6290 gboolean val = FALSE;
6293 if (ass->jit_optimizer_disabled_inited)
6294 return ass->jit_optimizer_disabled;
6297 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
6300 ass->jit_optimizer_disabled = FALSE;
6301 mono_memory_barrier ();
6302 ass->jit_optimizer_disabled_inited = TRUE;
6306 attrs = mono_custom_attrs_from_assembly (ass);
6308 for (i = 0; i < attrs->num_attrs; ++i) {
6309 MonoCustomAttrEntry *attr = &attrs->attrs [i];
6312 MonoMethodSignature *sig;
6314 if (!attr->ctor || attr->ctor->klass != klass)
6316 /* Decode the attribute. See reflection.c */
6317 len = attr->data_size;
6318 p = (const char*)attr->data;
6319 g_assert (read16 (p) == 0x0001);
6322 // FIXME: Support named parameters
6323 sig = mono_method_signature (attr->ctor);
6324 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
6326 /* Two boolean arguments */
6330 mono_custom_attrs_free (attrs);
6333 ass->jit_optimizer_disabled = val;
6334 mono_memory_barrier ();
6335 ass->jit_optimizer_disabled_inited = TRUE;
6341 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig)
6343 gboolean supported_tail_call;
6346 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6347 supported_tail_call = MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
6349 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6352 for (i = 0; i < fsig->param_count; ++i) {
6353 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
6354 /* These can point to the current method's stack */
6355 supported_tail_call = FALSE;
6357 if (fsig->hasthis && cmethod->klass->valuetype)
6358 /* this might point to the current method's stack */
6359 supported_tail_call = FALSE;
6360 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
6361 supported_tail_call = FALSE;
6362 if (cfg->method->save_lmf)
6363 supported_tail_call = FALSE;
6364 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
6365 supported_tail_call = FALSE;
6367 /* Debugging support */
6369 if (supported_tail_call) {
6370 if (!mono_debug_count ())
6371 supported_tail_call = FALSE;
6375 return supported_tail_call;
6378 /* the JIT intercepts ldflda instructions to the tlsdata field in ThreadLocal<T> and redirects
6379 * it to the thread local value based on the tls_offset field. Every other kind of access to
6380 * the field causes an assert.
6383 is_magic_tls_access (MonoClassField *field)
6385 if (strcmp (field->name, "tlsdata"))
6387 if (strcmp (field->parent->name, "ThreadLocal`1"))
6389 return field->parent->image == mono_defaults.corlib;
6392 /* emits the code needed to access a managed tls var (like ThreadStatic)
6393 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
6394 * pointer for the current thread.
6395 * Returns the MonoInst* representing the address of the tls var.
6398 emit_managed_static_data_access (MonoCompile *cfg, MonoInst *thread_ins, int offset_reg)
6401 int static_data_reg, array_reg, dreg;
6402 int offset2_reg, idx_reg;
6403 // inlined access to the tls data
6404 // idx = (offset >> 24) - 1;
6405 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
6406 static_data_reg = alloc_ireg (cfg);
6407 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
6408 idx_reg = alloc_ireg (cfg);
6409 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
6410 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
6411 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
6412 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
6413 array_reg = alloc_ireg (cfg);
6414 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
6415 offset2_reg = alloc_ireg (cfg);
6416 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
6417 dreg = alloc_ireg (cfg);
6418 EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
6423 * redirect access to the tlsdata field to the tls var given by the tls_offset field.
6424 * this address is cached per-method in cached_tls_addr.
6427 create_magic_tls_access (MonoCompile *cfg, MonoClassField *tls_field, MonoInst **cached_tls_addr, MonoInst *thread_local)
6429 MonoInst *load, *addr, *temp, *store, *thread_ins;
6430 MonoClassField *offset_field;
6432 if (*cached_tls_addr) {
6433 EMIT_NEW_TEMPLOAD (cfg, addr, (*cached_tls_addr)->inst_c0);
6436 thread_ins = mono_get_thread_intrinsic (cfg);
6437 offset_field = mono_class_get_field_from_name (tls_field->parent, "tls_offset");
6439 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, offset_field->type, thread_local->dreg, offset_field->offset);
6441 MONO_ADD_INS (cfg->cbb, thread_ins);
6443 MonoMethod *thread_method;
6444 thread_method = mono_class_get_method_from_name (mono_get_thread_class(), "CurrentInternalThread_internal", 0);
6445 thread_ins = mono_emit_method_call (cfg, thread_method, NULL, NULL);
6447 addr = emit_managed_static_data_access (cfg, thread_ins, load->dreg);
6448 addr->klass = mono_class_from_mono_type (tls_field->type);
6449 addr->type = STACK_MP;
6450 *cached_tls_addr = temp = mono_compile_create_var (cfg, type_from_stack_type (addr), OP_LOCAL);
6451 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, addr);
6453 EMIT_NEW_TEMPLOAD (cfg, addr, temp->inst_c0);
6458 * mono_method_to_ir:
6460 * Translate the .net IL into linear IR.
6463 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
6464 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
6465 guint inline_offset, gboolean is_virtual_call)
6468 MonoInst *ins, **sp, **stack_start;
6469 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
6470 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
6471 MonoMethod *cmethod, *method_definition;
6472 MonoInst **arg_array;
6473 MonoMethodHeader *header;
6475 guint32 token, ins_flag;
6477 MonoClass *constrained_call = NULL;
6478 unsigned char *ip, *end, *target, *err_pos;
6479 static double r8_0 = 0.0;
6480 MonoMethodSignature *sig;
6481 MonoGenericContext *generic_context = NULL;
6482 MonoGenericContainer *generic_container = NULL;
6483 MonoType **param_types;
6484 int i, n, start_new_bblock, dreg;
6485 int num_calls = 0, inline_costs = 0;
6486 int breakpoint_id = 0;
6488 MonoBoolean security, pinvoke;
6489 MonoSecurityManager* secman = NULL;
6490 MonoDeclSecurityActions actions;
6491 GSList *class_inits = NULL;
6492 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
6494 gboolean init_locals, seq_points, skip_dead_blocks;
6495 gboolean disable_inline, sym_seq_points = FALSE;
6496 MonoInst *cached_tls_addr = NULL;
6497 MonoDebugMethodInfo *minfo;
6498 MonoBitSet *seq_point_locs = NULL;
6499 MonoBitSet *seq_point_set_locs = NULL;
6501 disable_inline = is_jit_optimizer_disabled (method);
6503 /* serialization and xdomain stuff may need access to private fields and methods */
6504 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
6505 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
6506 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
6507 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
6508 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
6509 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
6511 dont_verify |= mono_security_smcs_hack_enabled ();
6513 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
6514 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
6515 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
6516 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
6517 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
6519 image = method->klass->image;
6520 header = mono_method_get_header (method);
6522 MonoLoaderError *error;
6524 if ((error = mono_loader_get_last_error ())) {
6525 mono_cfg_set_exception (cfg, error->exception_type);
6527 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
6528 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
6530 goto exception_exit;
6532 generic_container = mono_method_get_generic_container (method);
6533 sig = mono_method_signature (method);
6534 num_args = sig->hasthis + sig->param_count;
6535 ip = (unsigned char*)header->code;
6536 cfg->cil_start = ip;
6537 end = ip + header->code_size;
6538 cfg->stat_cil_code_size += header->code_size;
6539 init_locals = header->init_locals;
6541 seq_points = cfg->gen_seq_points && cfg->method == method;
6542 #ifdef PLATFORM_ANDROID
6543 seq_points &= cfg->method->wrapper_type == MONO_WRAPPER_NONE;
6546 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
6547 /* We could hit a seq point before attaching to the JIT (#8338) */
6551 if (cfg->gen_seq_points && cfg->method == method) {
6552 minfo = mono_debug_lookup_method (method);
6554 int i, n_il_offsets;
6558 mono_debug_symfile_get_line_numbers_full (minfo, NULL, NULL, &n_il_offsets, &il_offsets, &line_numbers, NULL, NULL);
6559 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6560 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6561 sym_seq_points = TRUE;
6562 for (i = 0; i < n_il_offsets; ++i) {
6563 if (il_offsets [i] < header->code_size)
6564 mono_bitset_set_fast (seq_point_locs, il_offsets [i]);
6566 g_free (il_offsets);
6567 g_free (line_numbers);
6572 * Methods without init_locals set could cause asserts in various passes
6577 method_definition = method;
6578 while (method_definition->is_inflated) {
6579 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
6580 method_definition = imethod->declaring;
6583 /* SkipVerification is not allowed if core-clr is enabled */
6584 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
6586 dont_verify_stloc = TRUE;
6589 if (mono_debug_using_mono_debugger ())
6590 cfg->keep_cil_nops = TRUE;
6592 if (sig->is_inflated)
6593 generic_context = mono_method_get_context (method);
6594 else if (generic_container)
6595 generic_context = &generic_container->context;
6596 cfg->generic_context = generic_context;
6598 if (!cfg->generic_sharing_context)
6599 g_assert (!sig->has_type_parameters);
6601 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
6602 g_assert (method->is_inflated);
6603 g_assert (mono_method_get_context (method)->method_inst);
6605 if (method->is_inflated && mono_method_get_context (method)->method_inst)
6606 g_assert (sig->generic_param_count);
6608 if (cfg->method == method) {
6609 cfg->real_offset = 0;
6611 cfg->real_offset = inline_offset;
6614 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
6615 cfg->cil_offset_to_bb_len = header->code_size;
6617 cfg->current_method = method;
6619 if (cfg->verbose_level > 2)
6620 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
6622 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
6624 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
6625 for (n = 0; n < sig->param_count; ++n)
6626 param_types [n + sig->hasthis] = sig->params [n];
6627 cfg->arg_types = param_types;
6629 dont_inline = g_list_prepend (dont_inline, method);
6630 if (cfg->method == method) {
6632 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
6633 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
6636 NEW_BBLOCK (cfg, start_bblock);
6637 cfg->bb_entry = start_bblock;
6638 start_bblock->cil_code = NULL;
6639 start_bblock->cil_length = 0;
6640 #if defined(__native_client_codegen__)
6641 MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
6642 ins->dreg = alloc_dreg (cfg, STACK_I4);
6643 MONO_ADD_INS (start_bblock, ins);
6647 NEW_BBLOCK (cfg, end_bblock);
6648 cfg->bb_exit = end_bblock;
6649 end_bblock->cil_code = NULL;
6650 end_bblock->cil_length = 0;
6651 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
6652 g_assert (cfg->num_bblocks == 2);
6654 arg_array = cfg->args;
6656 if (header->num_clauses) {
6657 cfg->spvars = g_hash_table_new (NULL, NULL);
6658 cfg->exvars = g_hash_table_new (NULL, NULL);
6660 /* handle exception clauses */
6661 for (i = 0; i < header->num_clauses; ++i) {
6662 MonoBasicBlock *try_bb;
6663 MonoExceptionClause *clause = &header->clauses [i];
6664 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
6665 try_bb->real_offset = clause->try_offset;
6666 try_bb->try_start = TRUE;
6667 try_bb->region = ((i + 1) << 8) | clause->flags;
6668 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
6669 tblock->real_offset = clause->handler_offset;
6670 tblock->flags |= BB_EXCEPTION_HANDLER;
6672 link_bblock (cfg, try_bb, tblock);
6674 if (*(ip + clause->handler_offset) == CEE_POP)
6675 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
6677 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
6678 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
6679 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
6680 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
6681 MONO_ADD_INS (tblock, ins);
6683 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) {
6684 /* finally clauses already have a seq point */
6685 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
6686 MONO_ADD_INS (tblock, ins);
6689 /* todo: is a fault block unsafe to optimize? */
6690 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
6691 tblock->flags |= BB_EXCEPTION_UNSAFE;
6695 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
6697 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
6699 /* catch and filter blocks get the exception object on the stack */
6700 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
6701 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
6702 MonoInst *dummy_use;
6704 /* mostly like handle_stack_args (), but just sets the input args */
6705 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
6706 tblock->in_scount = 1;
6707 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
6708 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
6711 * Add a dummy use for the exvar so its liveness info will be
6715 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
6717 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
6718 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
6719 tblock->flags |= BB_EXCEPTION_HANDLER;
6720 tblock->real_offset = clause->data.filter_offset;
6721 tblock->in_scount = 1;
6722 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
6723 /* The filter block shares the exvar with the handler block */
6724 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
6725 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
6726 MONO_ADD_INS (tblock, ins);
6730 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
6731 clause->data.catch_class &&
6732 cfg->generic_sharing_context &&
6733 mono_class_check_context_used (clause->data.catch_class)) {
6735 * In shared generic code with catch
6736 * clauses containing type variables
6737 * the exception handling code has to
6738 * be able to get to the rgctx.
6739 * Therefore we have to make sure that
6740 * the vtable/mrgctx argument (for
6741 * static or generic methods) or the
6742 * "this" argument (for non-static
6743 * methods) are live.
6745 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
6746 mini_method_get_context (method)->method_inst ||
6747 method->klass->valuetype) {
6748 mono_get_vtable_var (cfg);
6750 MonoInst *dummy_use;
6752 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
6757 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
6758 cfg->cbb = start_bblock;
6759 cfg->args = arg_array;
6760 mono_save_args (cfg, sig, inline_args);
6763 /* FIRST CODE BLOCK */
6764 NEW_BBLOCK (cfg, bblock);
6765 bblock->cil_code = ip;
6769 ADD_BBLOCK (cfg, bblock);
6771 if (cfg->method == method) {
6772 breakpoint_id = mono_debugger_method_has_breakpoint (method);
6773 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
6774 MONO_INST_NEW (cfg, ins, OP_BREAK);
6775 MONO_ADD_INS (bblock, ins);
6779 if (mono_security_cas_enabled ())
6780 secman = mono_security_manager_get_methods ();
6782 security = (secman && mono_security_method_has_declsec (method));
6783 /* at this point having security doesn't mean we have any code to generate */
6784 if (security && (cfg->method == method)) {
6785 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
6786 * And we do not want to enter the next section (with allocation) if we
6787 * have nothing to generate */
6788 security = mono_declsec_get_demands (method, &actions);
6791 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
6792 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
6794 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
6795 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6796 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
6798 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
6799 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
6803 mono_custom_attrs_free (custom);
6806 custom = mono_custom_attrs_from_class (wrapped->klass);
6807 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
6811 mono_custom_attrs_free (custom);
6814 /* not a P/Invoke after all */
6819 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
6820 /* we use a separate basic block for the initialization code */
6821 NEW_BBLOCK (cfg, init_localsbb);
6822 cfg->bb_init = init_localsbb;
6823 init_localsbb->real_offset = cfg->real_offset;
6824 start_bblock->next_bb = init_localsbb;
6825 init_localsbb->next_bb = bblock;
6826 link_bblock (cfg, start_bblock, init_localsbb);
6827 link_bblock (cfg, init_localsbb, bblock);
6829 cfg->cbb = init_localsbb;
6831 start_bblock->next_bb = bblock;
6832 link_bblock (cfg, start_bblock, bblock);
6835 if (cfg->gsharedvt && cfg->method == method) {
6836 MonoGSharedVtMethodInfo *info;
6837 MonoInst *var, *locals_var;
6840 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
6841 info->method = cfg->method;
6843 info->entries = g_ptr_array_new ();
6844 cfg->gsharedvt_info = info;
6846 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6847 /* prevent it from being register allocated */
6848 //var->flags |= MONO_INST_INDIRECT;
6849 cfg->gsharedvt_info_var = var;
6851 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
6852 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
6854 /* Allocate locals */
6855 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6856 /* prevent it from being register allocated */
6857 //locals_var->flags |= MONO_INST_INDIRECT;
6858 cfg->gsharedvt_locals_var = locals_var;
6860 dreg = alloc_ireg (cfg);
6861 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
6863 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
6864 ins->dreg = locals_var->dreg;
6866 MONO_ADD_INS (cfg->cbb, ins);
6867 cfg->gsharedvt_locals_var_ins = ins;
6869 cfg->flags |= MONO_CFG_HAS_ALLOCA;
6872 ins->flags |= MONO_INST_INIT;
6876 /* at this point we know, if security is TRUE, that some code needs to be generated */
6877 if (security && (cfg->method == method)) {
6880 cfg->stat_cas_demand_generation++;
6882 if (actions.demand.blob) {
6883 /* Add code for SecurityAction.Demand */
6884 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
6885 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
6886 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
6887 mono_emit_method_call (cfg, secman->demand, args, NULL);
6889 if (actions.noncasdemand.blob) {
6890 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
6891 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
6892 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
6893 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
6894 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
6895 mono_emit_method_call (cfg, secman->demand, args, NULL);
6897 if (actions.demandchoice.blob) {
6898 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
6899 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
6900 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
6901 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
6902 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
6906 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
6908 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
6911 if (mono_security_core_clr_enabled ()) {
6912 /* check if this is native code, e.g. an icall or a p/invoke */
6913 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
6914 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
6916 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
6917 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
6919 /* if this ia a native call then it can only be JITted from platform code */
6920 if ((icall || pinvk) && method->klass && method->klass->image) {
6921 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
6922 MonoException *ex = icall ? mono_get_exception_security () :
6923 mono_get_exception_method_access ();
6924 emit_throw_exception (cfg, ex);
6931 CHECK_CFG_EXCEPTION;
6933 if (header->code_size == 0)
6936 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
6941 if (cfg->method == method)
6942 mono_debug_init_method (cfg, bblock, breakpoint_id);
6944 for (n = 0; n < header->num_locals; ++n) {
6945 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
6950 /* We force the vtable variable here for all shared methods
6951 for the possibility that they might show up in a stack
6952 trace where their exact instantiation is needed. */
6953 if (cfg->generic_sharing_context && method == cfg->method) {
6954 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
6955 mini_method_get_context (method)->method_inst ||
6956 method->klass->valuetype) {
6957 mono_get_vtable_var (cfg);
6959 /* FIXME: Is there a better way to do this?
6960 We need the variable live for the duration
6961 of the whole method. */
6962 cfg->args [0]->flags |= MONO_INST_INDIRECT;
6966 /* add a check for this != NULL to inlined methods */
6967 if (is_virtual_call) {
6970 NEW_ARGLOAD (cfg, arg_ins, 0);
6971 MONO_ADD_INS (cfg->cbb, arg_ins);
6972 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
6975 skip_dead_blocks = !dont_verify;
6976 if (skip_dead_blocks) {
6977 original_bb = bb = mono_basic_block_split (method, &error);
6978 if (!mono_error_ok (&error)) {
6979 mono_error_cleanup (&error);
6985 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
6986 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
6989 start_new_bblock = 0;
6992 if (cfg->method == method)
6993 cfg->real_offset = ip - header->code;
6995 cfg->real_offset = inline_offset;
7000 if (start_new_bblock) {
7001 bblock->cil_length = ip - bblock->cil_code;
7002 if (start_new_bblock == 2) {
7003 g_assert (ip == tblock->cil_code);
7005 GET_BBLOCK (cfg, tblock, ip);
7007 bblock->next_bb = tblock;
7010 start_new_bblock = 0;
7011 for (i = 0; i < bblock->in_scount; ++i) {
7012 if (cfg->verbose_level > 3)
7013 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
7014 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
7018 g_slist_free (class_inits);
7021 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
7022 link_bblock (cfg, bblock, tblock);
7023 if (sp != stack_start) {
7024 handle_stack_args (cfg, stack_start, sp - stack_start);
7026 CHECK_UNVERIFIABLE (cfg);
7028 bblock->next_bb = tblock;
7031 for (i = 0; i < bblock->in_scount; ++i) {
7032 if (cfg->verbose_level > 3)
7033 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
7034 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
7037 g_slist_free (class_inits);
7042 if (skip_dead_blocks) {
7043 int ip_offset = ip - header->code;
7045 if (ip_offset == bb->end)
7049 int op_size = mono_opcode_size (ip, end);
7050 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
7052 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
7054 if (ip_offset + op_size == bb->end) {
7055 MONO_INST_NEW (cfg, ins, OP_NOP);
7056 MONO_ADD_INS (bblock, ins);
7057 start_new_bblock = 1;
7065 * Sequence points are points where the debugger can place a breakpoint.
7066 * Currently, we generate these automatically at points where the IL
7069 if (seq_points && ((sp == stack_start) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
7071 * Make methods interruptable at the beginning, and at the targets of
7072 * backward branches.
7073 * Also, do this at the start of every bblock in methods with clauses too,
7074 * to be able to handle instructions with inprecise control flow like
7076 * Backward branches are handled at the end of method-to-ir ().
7078 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
7080 /* Avoid sequence points on empty IL like .volatile */
7081 // FIXME: Enable this
7082 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
7083 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
7084 if (sp != stack_start)
7085 ins->flags |= MONO_INST_NONEMPTY_STACK;
7086 MONO_ADD_INS (cfg->cbb, ins);
7089 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
7092 bblock->real_offset = cfg->real_offset;
7094 if ((cfg->method == method) && cfg->coverage_info) {
7095 guint32 cil_offset = ip - header->code;
7096 cfg->coverage_info->data [cil_offset].cil_code = ip;
7098 /* TODO: Use an increment here */
7099 #if defined(TARGET_X86)
7100 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
7101 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
7103 MONO_ADD_INS (cfg->cbb, ins);
7105 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
7106 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
7110 if (cfg->verbose_level > 3)
7111 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
7115 if (seq_points && !sym_seq_points && sp != stack_start) {
7117 * The C# compiler uses these nops to notify the JIT that it should
7118 * insert seq points.
7120 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
7121 MONO_ADD_INS (cfg->cbb, ins);
7123 if (cfg->keep_cil_nops)
7124 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
7126 MONO_INST_NEW (cfg, ins, OP_NOP);
7128 MONO_ADD_INS (bblock, ins);
7131 if (should_insert_brekpoint (cfg->method)) {
7132 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
7134 MONO_INST_NEW (cfg, ins, OP_NOP);
7137 MONO_ADD_INS (bblock, ins);
7143 CHECK_STACK_OVF (1);
7144 n = (*ip)-CEE_LDARG_0;
7146 EMIT_NEW_ARGLOAD (cfg, ins, n);
7154 CHECK_STACK_OVF (1);
7155 n = (*ip)-CEE_LDLOC_0;
7157 EMIT_NEW_LOCLOAD (cfg, ins, n);
7166 n = (*ip)-CEE_STLOC_0;
7169 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
7171 emit_stloc_ir (cfg, sp, header, n);
7178 CHECK_STACK_OVF (1);
7181 EMIT_NEW_ARGLOAD (cfg, ins, n);
7187 CHECK_STACK_OVF (1);
7190 NEW_ARGLOADA (cfg, ins, n);
7191 MONO_ADD_INS (cfg->cbb, ins);
7201 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
7203 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
7208 CHECK_STACK_OVF (1);
7211 EMIT_NEW_LOCLOAD (cfg, ins, n);
7215 case CEE_LDLOCA_S: {
7216 unsigned char *tmp_ip;
7218 CHECK_STACK_OVF (1);
7219 CHECK_LOCAL (ip [1]);
7221 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
7227 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
7236 CHECK_LOCAL (ip [1]);
7237 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
7239 emit_stloc_ir (cfg, sp, header, ip [1]);
7244 CHECK_STACK_OVF (1);
7245 EMIT_NEW_PCONST (cfg, ins, NULL);
7246 ins->type = STACK_OBJ;
7251 CHECK_STACK_OVF (1);
7252 EMIT_NEW_ICONST (cfg, ins, -1);
7265 CHECK_STACK_OVF (1);
7266 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
7272 CHECK_STACK_OVF (1);
7274 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
7280 CHECK_STACK_OVF (1);
7281 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
7287 CHECK_STACK_OVF (1);
7288 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7289 ins->type = STACK_I8;
7290 ins->dreg = alloc_dreg (cfg, STACK_I8);
7292 ins->inst_l = (gint64)read64 (ip);
7293 MONO_ADD_INS (bblock, ins);
7299 gboolean use_aotconst = FALSE;
7301 #ifdef TARGET_POWERPC
7302 /* FIXME: Clean this up */
7303 if (cfg->compile_aot)
7304 use_aotconst = TRUE;
7307 /* FIXME: we should really allocate this only late in the compilation process */
7308 f = mono_domain_alloc (cfg->domain, sizeof (float));
7310 CHECK_STACK_OVF (1);
7316 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
7318 dreg = alloc_freg (cfg);
7319 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
7320 ins->type = STACK_R8;
7322 MONO_INST_NEW (cfg, ins, OP_R4CONST);
7323 ins->type = STACK_R8;
7324 ins->dreg = alloc_dreg (cfg, STACK_R8);
7326 MONO_ADD_INS (bblock, ins);
7336 gboolean use_aotconst = FALSE;
7338 #ifdef TARGET_POWERPC
7339 /* FIXME: Clean this up */
7340 if (cfg->compile_aot)
7341 use_aotconst = TRUE;
7344 /* FIXME: we should really allocate this only late in the compilation process */
7345 d = mono_domain_alloc (cfg->domain, sizeof (double));
7347 CHECK_STACK_OVF (1);
7353 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
7355 dreg = alloc_freg (cfg);
7356 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
7357 ins->type = STACK_R8;
7359 MONO_INST_NEW (cfg, ins, OP_R8CONST);
7360 ins->type = STACK_R8;
7361 ins->dreg = alloc_dreg (cfg, STACK_R8);
7363 MONO_ADD_INS (bblock, ins);
7372 MonoInst *temp, *store;
7374 CHECK_STACK_OVF (1);
7378 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
7379 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
7381 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
7384 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
7397 if (sp [0]->type == STACK_R8)
7398 /* we need to pop the value from the x86 FP stack */
7399 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
7405 INLINE_FAILURE ("jmp");
7406 GSHAREDVT_FAILURE (*ip);
7409 if (stack_start != sp)
7411 token = read32 (ip + 1);
7412 /* FIXME: check the signature matches */
7413 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7415 if (!cmethod || mono_loader_get_last_error ())
7418 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
7419 GENERIC_SHARING_FAILURE (CEE_JMP);
7421 if (mono_security_cas_enabled ())
7422 CHECK_CFG_EXCEPTION;
7424 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
7426 MonoMethodSignature *fsig = mono_method_signature (cmethod);
7429 /* Handle tail calls similarly to calls */
7430 n = fsig->param_count + fsig->hasthis;
7432 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
7433 call->method = cmethod;
7434 call->tail_call = TRUE;
7435 call->signature = mono_method_signature (cmethod);
7436 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
7437 call->inst.inst_p0 = cmethod;
7438 for (i = 0; i < n; ++i)
7439 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
7441 mono_arch_emit_call (cfg, call);
7442 MONO_ADD_INS (bblock, (MonoInst*)call);
7445 for (i = 0; i < num_args; ++i)
7446 /* Prevent arguments from being optimized away */
7447 arg_array [i]->flags |= MONO_INST_VOLATILE;
7449 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
7450 ins = (MonoInst*)call;
7451 ins->inst_p0 = cmethod;
7452 MONO_ADD_INS (bblock, ins);
7456 start_new_bblock = 1;
7461 case CEE_CALLVIRT: {
7462 MonoInst *addr = NULL;
7463 MonoMethodSignature *fsig = NULL;
7465 int virtual = *ip == CEE_CALLVIRT;
7466 int calli = *ip == CEE_CALLI;
7467 gboolean pass_imt_from_rgctx = FALSE;
7468 MonoInst *imt_arg = NULL;
7469 MonoInst *keep_this_alive = NULL;
7470 gboolean pass_vtable = FALSE;
7471 gboolean pass_mrgctx = FALSE;
7472 MonoInst *vtable_arg = NULL;
7473 gboolean check_this = FALSE;
7474 gboolean supported_tail_call = FALSE;
7475 gboolean tail_call = FALSE;
7476 gboolean need_seq_point = FALSE;
7477 guint32 call_opcode = *ip;
7478 gboolean emit_widen = TRUE;
7479 gboolean push_res = TRUE;
7480 gboolean skip_ret = FALSE;
7481 gboolean delegate_invoke = FALSE;
7484 token = read32 (ip + 1);
7489 //GSHAREDVT_FAILURE (*ip);
7494 fsig = mini_get_signature (method, token, generic_context);
7495 n = fsig->param_count + fsig->hasthis;
7497 if (method->dynamic && fsig->pinvoke) {
7501 * This is a call through a function pointer using a pinvoke
7502 * signature. Have to create a wrapper and call that instead.
7503 * FIXME: This is very slow, need to create a wrapper at JIT time
7504 * instead based on the signature.
7506 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
7507 EMIT_NEW_PCONST (cfg, args [1], fsig);
7509 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
7512 MonoMethod *cil_method;
7514 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7515 cil_method = cmethod;
7517 if (constrained_call) {
7518 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7519 if (cfg->verbose_level > 2)
7520 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_call));
7521 if (!((constrained_call->byval_arg.type == MONO_TYPE_VAR ||
7522 constrained_call->byval_arg.type == MONO_TYPE_MVAR) &&
7523 cfg->generic_sharing_context)) {
7524 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_call, generic_context);
7527 if (cfg->verbose_level > 2)
7528 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_call));
7530 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
7532 * This is needed since get_method_constrained can't find
7533 * the method in klass representing a type var.
7534 * The type var is guaranteed to be a reference type in this
7537 if (!mini_is_gsharedvt_klass (cfg, constrained_call))
7538 g_assert (!cmethod->klass->valuetype);
7540 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
7545 if (!cmethod || mono_loader_get_last_error ())
7547 if (!dont_verify && !cfg->skip_visibility) {
7548 MonoMethod *target_method = cil_method;
7549 if (method->is_inflated) {
7550 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
7552 if (!mono_method_can_access_method (method_definition, target_method) &&
7553 !mono_method_can_access_method (method, cil_method))
7554 METHOD_ACCESS_FAILURE;
7557 if (mono_security_core_clr_enabled ())
7558 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
7560 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
7561 /* MS.NET seems to silently convert this to a callvirt */
7566 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
7567 * converts to a callvirt.
7569 * tests/bug-515884.il is an example of this behavior
7571 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
7572 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
7573 if (!virtual && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
7577 if (!cmethod->klass->inited)
7578 if (!mono_class_init (cmethod->klass))
7579 TYPE_LOAD_ERROR (cmethod->klass);
7581 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
7582 mini_class_is_system_array (cmethod->klass)) {
7583 array_rank = cmethod->klass->rank;
7584 fsig = mono_method_signature (cmethod);
7586 fsig = mono_method_signature (cmethod);
7591 if (fsig->pinvoke) {
7592 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
7593 check_for_pending_exc, cfg->compile_aot);
7594 fsig = mono_method_signature (wrapper);
7595 } else if (constrained_call) {
7596 fsig = mono_method_signature (cmethod);
7598 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
7602 mono_save_token_info (cfg, image, token, cil_method);
7604 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7606 * Need to emit an implicit seq point after every non-void call so single stepping through nested calls like
7607 * foo (bar (), baz ())
7608 * works correctly. MS does this also:
7609 * http://stackoverflow.com/questions/6937198/making-your-net-language-step-correctly-in-the-debugger
7610 * The problem with this approach is that the debugger will stop after all calls returning a value,
7611 * even for simple cases, like:
7614 /* Special case a few common successor opcodes */
7615 if (!(ip + 5 < end && (ip [5] == CEE_POP || ip [5] == CEE_NOP)) && !(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
7616 need_seq_point = TRUE;
7619 n = fsig->param_count + fsig->hasthis;
7621 /* Don't support calls made using type arguments for now */
7623 if (cfg->gsharedvt) {
7624 if (mini_is_gsharedvt_signature (cfg, fsig))
7625 GSHAREDVT_FAILURE (*ip);
7629 if (mono_security_cas_enabled ()) {
7630 if (check_linkdemand (cfg, method, cmethod))
7631 INLINE_FAILURE ("linkdemand");
7632 CHECK_CFG_EXCEPTION;
7635 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
7636 g_assert_not_reached ();
7639 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
7642 if (!cfg->generic_sharing_context && cmethod)
7643 g_assert (!mono_method_check_context_used (cmethod));
7647 //g_assert (!virtual || fsig->hasthis);
7651 if (constrained_call) {
7652 if (mini_is_gsharedvt_klass (cfg, constrained_call)) {
7654 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
7656 if ((cmethod->klass != mono_defaults.object_class) && constrained_call->valuetype && cmethod->klass->valuetype) {
7657 /* The 'Own method' case below */
7658 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
7659 /* 'The type parameter is instantiated as a reference type' case below. */
7660 } else if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
7661 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || mini_is_gsharedvt_type (cfg, fsig->ret)) &&
7662 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || mini_is_gsharedvt_type (cfg, fsig->params [0]))))) {
7663 MonoInst *args [16];
7666 * This case handles calls to
7667 * - object:ToString()/Equals()/GetHashCode(),
7668 * - System.IComparable<T>:CompareTo()
7669 * - System.IEquatable<T>:Equals ()
7670 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
7674 if (mono_method_check_context_used (cmethod))
7675 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
7677 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
7678 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_call), constrained_call, MONO_RGCTX_INFO_KLASS);
7680 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
7681 if (fsig->hasthis && fsig->param_count) {
7682 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
7683 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
7684 ins->dreg = alloc_preg (cfg);
7685 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
7686 MONO_ADD_INS (cfg->cbb, ins);
7689 if (mini_is_gsharedvt_type (cfg, fsig->params [0])) {
7692 args [3] = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
7694 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
7695 addr_reg = ins->dreg;
7696 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
7698 EMIT_NEW_ICONST (cfg, args [3], 0);
7699 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
7702 EMIT_NEW_ICONST (cfg, args [3], 0);
7703 EMIT_NEW_ICONST (cfg, args [4], 0);
7705 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
7708 if (mini_is_gsharedvt_type (cfg, fsig->ret)) {
7709 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins, &bblock);
7710 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret)) {
7714 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
7715 MONO_ADD_INS (cfg->cbb, add);
7717 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
7718 MONO_ADD_INS (cfg->cbb, ins);
7719 /* ins represents the call result */
7724 GSHAREDVT_FAILURE (*ip);
7728 * We have the `constrained.' prefix opcode.
7730 if (constrained_call->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
7732 * The type parameter is instantiated as a valuetype,
7733 * but that type doesn't override the method we're
7734 * calling, so we need to box `this'.
7736 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
7737 ins->klass = constrained_call;
7738 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
7739 CHECK_CFG_EXCEPTION;
7740 } else if (!constrained_call->valuetype) {
7741 int dreg = alloc_ireg_ref (cfg);
7744 * The type parameter is instantiated as a reference
7745 * type. We have a managed pointer on the stack, so
7746 * we need to dereference it here.
7748 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
7749 ins->type = STACK_OBJ;
7752 if (cmethod->klass->valuetype) {
7755 /* Interface method */
7758 mono_class_setup_vtable (constrained_call);
7759 CHECK_TYPELOAD (constrained_call);
7760 ioffset = mono_class_interface_offset (constrained_call, cmethod->klass);
7762 TYPE_LOAD_ERROR (constrained_call);
7763 slot = mono_method_get_vtable_slot (cmethod);
7765 TYPE_LOAD_ERROR (cmethod->klass);
7766 cmethod = constrained_call->vtable [ioffset + slot];
7768 if (cmethod->klass == mono_defaults.enum_class) {
7769 /* Enum implements some interfaces, so treat this as the first case */
7770 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
7771 ins->klass = constrained_call;
7772 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
7773 CHECK_CFG_EXCEPTION;
7778 constrained_call = NULL;
7781 if (!calli && check_call_signature (cfg, fsig, sp))
7784 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
7785 if (cmethod && (cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
7786 delegate_invoke = TRUE;
7789 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
7791 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7792 type_to_eval_stack_type ((cfg), fsig->ret, ins);
7800 * If the callee is a shared method, then its static cctor
7801 * might not get called after the call was patched.
7803 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
7804 emit_generic_class_init (cfg, cmethod->klass);
7805 CHECK_TYPELOAD (cmethod->klass);
7809 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
7811 if (cfg->generic_sharing_context && cmethod) {
7812 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
7814 context_used = mini_method_check_context_used (cfg, cmethod);
7816 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7817 /* Generic method interface
7818 calls are resolved via a
7819 helper function and don't
7821 if (!cmethod_context || !cmethod_context->method_inst)
7822 pass_imt_from_rgctx = TRUE;
7826 * If a shared method calls another
7827 * shared method then the caller must
7828 * have a generic sharing context
7829 * because the magic trampoline
7830 * requires it. FIXME: We shouldn't
7831 * have to force the vtable/mrgctx
7832 * variable here. Instead there
7833 * should be a flag in the cfg to
7834 * request a generic sharing context.
7837 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
7838 mono_get_vtable_var (cfg);
7843 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7845 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7847 CHECK_TYPELOAD (cmethod->klass);
7848 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7853 g_assert (!vtable_arg);
7855 if (!cfg->compile_aot) {
7857 * emit_get_rgctx_method () calls mono_class_vtable () so check
7858 * for type load errors before.
7860 mono_class_setup_vtable (cmethod->klass);
7861 CHECK_TYPELOAD (cmethod->klass);
7864 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7866 /* !marshalbyref is needed to properly handle generic methods + remoting */
7867 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
7868 MONO_METHOD_IS_FINAL (cmethod)) &&
7869 !mono_class_is_marshalbyref (cmethod->klass)) {
7876 if (pass_imt_from_rgctx) {
7877 g_assert (!pass_vtable);
7880 imt_arg = emit_get_rgctx_method (cfg, context_used,
7881 cmethod, MONO_RGCTX_INFO_METHOD);
7885 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
7887 /* Calling virtual generic methods */
7888 if (cmethod && virtual &&
7889 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
7890 !(MONO_METHOD_IS_FINAL (cmethod) &&
7891 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
7892 fsig->generic_param_count &&
7893 !(cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))) {
7894 MonoInst *this_temp, *this_arg_temp, *store;
7895 MonoInst *iargs [4];
7896 gboolean use_imt = FALSE;
7898 g_assert (fsig->is_inflated);
7900 /* Prevent inlining of methods that contain indirect calls */
7901 INLINE_FAILURE ("virtual generic call");
7903 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
7904 GSHAREDVT_FAILURE (*ip);
7906 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
7907 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt)
7912 g_assert (!imt_arg);
7914 g_assert (cmethod->is_inflated);
7915 imt_arg = emit_get_rgctx_method (cfg, context_used,
7916 cmethod, MONO_RGCTX_INFO_METHOD);
7917 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
7919 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
7920 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
7921 MONO_ADD_INS (bblock, store);
7923 /* FIXME: This should be a managed pointer */
7924 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7926 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
7927 iargs [1] = emit_get_rgctx_method (cfg, context_used,
7928 cmethod, MONO_RGCTX_INFO_METHOD);
7929 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
7930 addr = mono_emit_jit_icall (cfg,
7931 mono_helper_compile_generic_method, iargs);
7933 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
7935 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
7942 * Implement a workaround for the inherent races involved in locking:
7948 * If a thread abort happens between the call to Monitor.Enter () and the start of the
7949 * try block, the Exit () won't be executed, see:
7950 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
7951 * To work around this, we extend such try blocks to include the last x bytes
7952 * of the Monitor.Enter () call.
7954 if (cmethod && cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
7955 MonoBasicBlock *tbb;
7957 GET_BBLOCK (cfg, tbb, ip + 5);
7959 * Only extend try blocks with a finally, to avoid catching exceptions thrown
7960 * from Monitor.Enter like ArgumentNullException.
7962 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
7963 /* Mark this bblock as needing to be extended */
7964 tbb->extend_try_block = TRUE;
7968 /* Conversion to a JIT intrinsic */
7969 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
7971 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7972 type_to_eval_stack_type ((cfg), fsig->ret, ins);
7979 if (cmethod && (cfg->opt & MONO_OPT_INLINE) &&
7980 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
7981 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
7982 !g_list_find (dont_inline, cmethod)) {
7984 gboolean always = FALSE;
7986 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
7987 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
7988 /* Prevent inlining of methods that call wrappers */
7989 INLINE_FAILURE ("wrapper call");
7990 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
7994 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, always);
7996 cfg->real_offset += 5;
7999 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8000 /* *sp is already set by inline_method */
8005 inline_costs += costs;
8011 /* Tail recursion elimination */
8012 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
8013 gboolean has_vtargs = FALSE;
8016 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8017 INLINE_FAILURE ("tail call");
8019 /* keep it simple */
8020 for (i = fsig->param_count - 1; i >= 0; i--) {
8021 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
8026 for (i = 0; i < n; ++i)
8027 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8028 MONO_INST_NEW (cfg, ins, OP_BR);
8029 MONO_ADD_INS (bblock, ins);
8030 tblock = start_bblock->out_bb [0];
8031 link_bblock (cfg, bblock, tblock);
8032 ins->inst_target_bb = tblock;
8033 start_new_bblock = 1;
8035 /* skip the CEE_RET, too */
8036 if (ip_in_bb (cfg, bblock, ip + 5))
8043 inline_costs += 10 * num_calls++;
8046 * Making generic calls out of gsharedvt methods.
8048 if (cmethod && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
8049 MonoRgctxInfoType info_type;
8052 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
8053 //GSHAREDVT_FAILURE (*ip);
8054 // disable for possible remoting calls
8055 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
8056 GSHAREDVT_FAILURE (*ip);
8057 if (fsig->generic_param_count) {
8058 /* virtual generic call */
8059 g_assert (mono_use_imt);
8060 g_assert (!imt_arg);
8061 /* Same as the virtual generic case above */
8062 imt_arg = emit_get_rgctx_method (cfg, context_used,
8063 cmethod, MONO_RGCTX_INFO_METHOD);
8064 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
8069 if (cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)
8070 /* test_0_multi_dim_arrays () in gshared.cs */
8071 GSHAREDVT_FAILURE (*ip);
8073 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
8074 keep_this_alive = sp [0];
8076 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
8077 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
8079 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
8080 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
8082 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8084 } else if (calli && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
8086 * We pass the address to the gsharedvt trampoline in the rgctx reg
8088 MonoInst *callee = addr;
8090 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8092 GSHAREDVT_FAILURE (*ip);
8094 addr = emit_get_rgctx_sig (cfg, context_used,
8095 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8096 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8100 /* Generic sharing */
8101 /* FIXME: only do this for generic methods if
8102 they are not shared! */
8103 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
8104 (!mono_method_is_generic_sharable (cmethod, TRUE) ||
8105 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
8106 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
8107 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
8108 INLINE_FAILURE ("gshared");
8110 g_assert (cfg->generic_sharing_context && cmethod);
8114 * We are compiling a call to a
8115 * generic method from shared code,
8116 * which means that we have to look up
8117 * the method in the rgctx and do an
8121 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8123 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8124 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8128 /* Indirect calls */
8130 if (call_opcode == CEE_CALL)
8131 g_assert (context_used);
8132 else if (call_opcode == CEE_CALLI)
8133 g_assert (!vtable_arg);
8135 /* FIXME: what the hell is this??? */
8136 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
8137 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
8139 /* Prevent inlining of methods with indirect calls */
8140 INLINE_FAILURE ("indirect call");
8142 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8147 * Instead of emitting an indirect call, emit a direct call
8148 * with the contents of the aotconst as the patch info.
8150 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8151 info_type = addr->inst_c1;
8152 info_data = addr->inst_p0;
8154 info_type = addr->inst_right->inst_c1;
8155 info_data = addr->inst_right->inst_left;
8158 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8159 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8164 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8172 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
8173 MonoInst *val = sp [fsig->param_count];
8175 if (val->type == STACK_OBJ) {
8176 MonoInst *iargs [2];
8181 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
8184 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
8185 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
8186 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
8187 emit_write_barrier (cfg, addr, val);
8188 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
8189 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
8191 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
8192 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
8193 if (!cmethod->klass->element_class->valuetype && !readonly)
8194 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
8195 CHECK_TYPELOAD (cmethod->klass);
8198 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
8201 g_assert_not_reached ();
8208 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
8212 /* Tail prefix / tail call optimization */
8214 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
8215 /* FIXME: runtime generic context pointer for jumps? */
8216 /* FIXME: handle this for generic sharing eventually */
8217 if (cmethod && (ins_flag & MONO_INST_TAILCALL) &&
8218 !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig))
8219 supported_tail_call = TRUE;
8220 if (supported_tail_call) {
8221 if (call_opcode != CEE_CALL)
8222 supported_tail_call = FALSE;
8225 if (supported_tail_call) {
8228 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8229 INLINE_FAILURE ("tail call");
8231 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
8233 if (ARCH_USE_OP_TAIL_CALL) {
8234 /* Handle tail calls similarly to normal calls */
8237 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8238 call->tail_call = TRUE;
8239 call->method = cmethod;
8240 call->signature = mono_method_signature (cmethod);
8243 * We implement tail calls by storing the actual arguments into the
8244 * argument variables, then emitting a CEE_JMP.
8246 for (i = 0; i < n; ++i) {
8247 /* Prevent argument from being register allocated */
8248 arg_array [i]->flags |= MONO_INST_VOLATILE;
8249 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8251 ins = (MonoInst*)call;
8252 ins->inst_p0 = cmethod;
8253 ins->inst_p1 = arg_array [0];
8254 MONO_ADD_INS (bblock, ins);
8255 link_bblock (cfg, bblock, end_bblock);
8256 start_new_bblock = 1;
8258 // FIXME: Eliminate unreachable epilogs
8261 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
8262 * only reachable from this call.
8264 GET_BBLOCK (cfg, tblock, ip + 5);
8265 if (tblock == bblock || tblock->in_count == 0)
8274 * Synchronized wrappers.
8275 * Its hard to determine where to replace a method with its synchronized
8276 * wrapper without causing an infinite recursion. The current solution is
8277 * to add the synchronized wrapper in the trampolines, and to
8278 * change the called method to a dummy wrapper, and resolve that wrapper
8279 * to the real method in mono_jit_compile_method ().
8281 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8282 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
8283 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
8284 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
8288 INLINE_FAILURE ("call");
8289 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual ? sp [0] : NULL,
8290 imt_arg, vtable_arg);
8293 link_bblock (cfg, bblock, end_bblock);
8294 start_new_bblock = 1;
8296 // FIXME: Eliminate unreachable epilogs
8299 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
8300 * only reachable from this call.
8302 GET_BBLOCK (cfg, tblock, ip + 5);
8303 if (tblock == bblock || tblock->in_count == 0)
8310 /* End of call, INS should contain the result of the call, if any */
8312 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
8315 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8320 if (keep_this_alive) {
8321 MonoInst *dummy_use;
8323 /* See mono_emit_method_call_full () */
8324 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
8327 CHECK_CFG_EXCEPTION;
8331 g_assert (*ip == CEE_RET);
8335 constrained_call = NULL;
8337 emit_seq_point (cfg, method, ip, FALSE, TRUE);
8341 if (cfg->method != method) {
8342 /* return from inlined method */
8344 * If in_count == 0, that means the ret is unreachable due to
8345 * being preceeded by a throw. In that case, inline_method () will
8346 * handle setting the return value
8347 * (test case: test_0_inline_throw ()).
8349 if (return_var && cfg->cbb->in_count) {
8350 MonoType *ret_type = mono_method_signature (method)->ret;
8356 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
8359 //g_assert (returnvar != -1);
8360 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
8361 cfg->ret_var_set = TRUE;
8365 MonoType *ret_type = mono_method_signature (method)->ret;
8367 if (seq_points && !sym_seq_points) {
8369 * Place a seq point here too even through the IL stack is not
8370 * empty, so a step over on
8373 * will work correctly.
8375 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
8376 MONO_ADD_INS (cfg->cbb, ins);
8379 g_assert (!return_var);
8383 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
8386 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
8389 if (!cfg->vret_addr) {
8392 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
8394 EMIT_NEW_RETLOADA (cfg, ret_addr);
8396 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
8397 ins->klass = mono_class_from_mono_type (ret_type);
8400 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
8401 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
8402 MonoInst *iargs [1];
8406 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
8407 mono_arch_emit_setret (cfg, method, conv);
8409 mono_arch_emit_setret (cfg, method, *sp);
8412 mono_arch_emit_setret (cfg, method, *sp);
8417 if (sp != stack_start)
8419 MONO_INST_NEW (cfg, ins, OP_BR);
8421 ins->inst_target_bb = end_bblock;
8422 MONO_ADD_INS (bblock, ins);
8423 link_bblock (cfg, bblock, end_bblock);
8424 start_new_bblock = 1;
8428 MONO_INST_NEW (cfg, ins, OP_BR);
8430 target = ip + 1 + (signed char)(*ip);
8432 GET_BBLOCK (cfg, tblock, target);
8433 link_bblock (cfg, bblock, tblock);
8434 ins->inst_target_bb = tblock;
8435 if (sp != stack_start) {
8436 handle_stack_args (cfg, stack_start, sp - stack_start);
8438 CHECK_UNVERIFIABLE (cfg);
8440 MONO_ADD_INS (bblock, ins);
8441 start_new_bblock = 1;
8442 inline_costs += BRANCH_COST;
8456 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
8458 target = ip + 1 + *(signed char*)ip;
8464 inline_costs += BRANCH_COST;
8468 MONO_INST_NEW (cfg, ins, OP_BR);
8471 target = ip + 4 + (gint32)read32(ip);
8473 GET_BBLOCK (cfg, tblock, target);
8474 link_bblock (cfg, bblock, tblock);
8475 ins->inst_target_bb = tblock;
8476 if (sp != stack_start) {
8477 handle_stack_args (cfg, stack_start, sp - stack_start);
8479 CHECK_UNVERIFIABLE (cfg);
8482 MONO_ADD_INS (bblock, ins);
8484 start_new_bblock = 1;
8485 inline_costs += BRANCH_COST;
8492 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
8493 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
8494 guint32 opsize = is_short ? 1 : 4;
8496 CHECK_OPSIZE (opsize);
8498 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
8501 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
8506 GET_BBLOCK (cfg, tblock, target);
8507 link_bblock (cfg, bblock, tblock);
8508 GET_BBLOCK (cfg, tblock, ip);
8509 link_bblock (cfg, bblock, tblock);
8511 if (sp != stack_start) {
8512 handle_stack_args (cfg, stack_start, sp - stack_start);
8513 CHECK_UNVERIFIABLE (cfg);
8516 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
8517 cmp->sreg1 = sp [0]->dreg;
8518 type_from_op (cmp, sp [0], NULL);
8521 #if SIZEOF_REGISTER == 4
8522 if (cmp->opcode == OP_LCOMPARE_IMM) {
8523 /* Convert it to OP_LCOMPARE */
8524 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8525 ins->type = STACK_I8;
8526 ins->dreg = alloc_dreg (cfg, STACK_I8);
8528 MONO_ADD_INS (bblock, ins);
8529 cmp->opcode = OP_LCOMPARE;
8530 cmp->sreg2 = ins->dreg;
8533 MONO_ADD_INS (bblock, cmp);
8535 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
8536 type_from_op (ins, sp [0], NULL);
8537 MONO_ADD_INS (bblock, ins);
8538 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
8539 GET_BBLOCK (cfg, tblock, target);
8540 ins->inst_true_bb = tblock;
8541 GET_BBLOCK (cfg, tblock, ip);
8542 ins->inst_false_bb = tblock;
8543 start_new_bblock = 2;
8546 inline_costs += BRANCH_COST;
8561 MONO_INST_NEW (cfg, ins, *ip);
8563 target = ip + 4 + (gint32)read32(ip);
8569 inline_costs += BRANCH_COST;
8573 MonoBasicBlock **targets;
8574 MonoBasicBlock *default_bblock;
8575 MonoJumpInfoBBTable *table;
8576 int offset_reg = alloc_preg (cfg);
8577 int target_reg = alloc_preg (cfg);
8578 int table_reg = alloc_preg (cfg);
8579 int sum_reg = alloc_preg (cfg);
8580 gboolean use_op_switch;
8584 n = read32 (ip + 1);
8587 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
8591 CHECK_OPSIZE (n * sizeof (guint32));
8592 target = ip + n * sizeof (guint32);
8594 GET_BBLOCK (cfg, default_bblock, target);
8595 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
8597 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
8598 for (i = 0; i < n; ++i) {
8599 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
8600 targets [i] = tblock;
8601 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
8605 if (sp != stack_start) {
8607 * Link the current bb with the targets as well, so handle_stack_args
8608 * will set their in_stack correctly.
8610 link_bblock (cfg, bblock, default_bblock);
8611 for (i = 0; i < n; ++i)
8612 link_bblock (cfg, bblock, targets [i]);
8614 handle_stack_args (cfg, stack_start, sp - stack_start);
8616 CHECK_UNVERIFIABLE (cfg);
8619 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
8620 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
8623 for (i = 0; i < n; ++i)
8624 link_bblock (cfg, bblock, targets [i]);
8626 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
8627 table->table = targets;
8628 table->table_size = n;
8630 use_op_switch = FALSE;
8632 /* ARM implements SWITCH statements differently */
8633 /* FIXME: Make it use the generic implementation */
8634 if (!cfg->compile_aot)
8635 use_op_switch = TRUE;
8638 if (COMPILE_LLVM (cfg))
8639 use_op_switch = TRUE;
8641 cfg->cbb->has_jump_table = 1;
8643 if (use_op_switch) {
8644 MONO_INST_NEW (cfg, ins, OP_SWITCH);
8645 ins->sreg1 = src1->dreg;
8646 ins->inst_p0 = table;
8647 ins->inst_many_bb = targets;
8648 ins->klass = GUINT_TO_POINTER (n);
8649 MONO_ADD_INS (cfg->cbb, ins);
8651 if (sizeof (gpointer) == 8)
8652 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
8654 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
8656 #if SIZEOF_REGISTER == 8
8657 /* The upper word might not be zero, and we add it to a 64 bit address later */
8658 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
8661 if (cfg->compile_aot) {
8662 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
8664 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
8665 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
8666 ins->inst_p0 = table;
8667 ins->dreg = table_reg;
8668 MONO_ADD_INS (cfg->cbb, ins);
8671 /* FIXME: Use load_memindex */
8672 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
8673 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
8674 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
8676 start_new_bblock = 1;
8677 inline_costs += (BRANCH_COST * 2);
8697 dreg = alloc_freg (cfg);
8700 dreg = alloc_lreg (cfg);
8703 dreg = alloc_ireg_ref (cfg);
8706 dreg = alloc_preg (cfg);
8709 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
8710 ins->type = ldind_type [*ip - CEE_LDIND_I1];
8711 ins->flags |= ins_flag;
8713 MONO_ADD_INS (bblock, ins);
8715 if (ins->flags & MONO_INST_VOLATILE) {
8716 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
8717 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
8718 emit_memory_barrier (cfg, FullBarrier);
8733 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
8734 ins->flags |= ins_flag;
8737 if (ins->flags & MONO_INST_VOLATILE) {
8738 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
8739 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
8740 emit_memory_barrier (cfg, FullBarrier);
8743 MONO_ADD_INS (bblock, ins);
8745 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
8746 emit_write_barrier (cfg, sp [0], sp [1]);
8755 MONO_INST_NEW (cfg, ins, (*ip));
8757 ins->sreg1 = sp [0]->dreg;
8758 ins->sreg2 = sp [1]->dreg;
8759 type_from_op (ins, sp [0], sp [1]);
8761 ins->dreg = alloc_dreg ((cfg), (ins)->type);
8763 /* Use the immediate opcodes if possible */
8764 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
8765 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
8766 if (imm_opcode != -1) {
8767 ins->opcode = imm_opcode;
8768 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
8771 sp [1]->opcode = OP_NOP;
8775 MONO_ADD_INS ((cfg)->cbb, (ins));
8777 *sp++ = mono_decompose_opcode (cfg, ins);
8794 MONO_INST_NEW (cfg, ins, (*ip));
8796 ins->sreg1 = sp [0]->dreg;
8797 ins->sreg2 = sp [1]->dreg;
8798 type_from_op (ins, sp [0], sp [1]);
8800 ADD_WIDEN_OP (ins, sp [0], sp [1]);
8801 ins->dreg = alloc_dreg ((cfg), (ins)->type);
8803 /* FIXME: Pass opcode to is_inst_imm */
8805 /* Use the immediate opcodes if possible */
8806 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
8809 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
8810 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
8811 /* Keep emulated opcodes which are optimized away later */
8812 if ((ins->opcode == OP_IREM_UN || ins->opcode == OP_IDIV_UN_IMM) && (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) && sp [1]->opcode == OP_ICONST && mono_is_power_of_two (sp [1]->inst_c0) >= 0) {
8813 imm_opcode = mono_op_to_op_imm (ins->opcode);
8816 if (imm_opcode != -1) {
8817 ins->opcode = imm_opcode;
8818 if (sp [1]->opcode == OP_I8CONST) {
8819 #if SIZEOF_REGISTER == 8
8820 ins->inst_imm = sp [1]->inst_l;
8822 ins->inst_ls_word = sp [1]->inst_ls_word;
8823 ins->inst_ms_word = sp [1]->inst_ms_word;
8827 ins->inst_imm = (gssize)(sp [1]->inst_c0);
8830 /* Might be followed by an instruction added by ADD_WIDEN_OP */
8831 if (sp [1]->next == NULL)
8832 sp [1]->opcode = OP_NOP;
8835 MONO_ADD_INS ((cfg)->cbb, (ins));
8837 *sp++ = mono_decompose_opcode (cfg, ins);
8850 case CEE_CONV_OVF_I8:
8851 case CEE_CONV_OVF_U8:
8855 /* Special case this earlier so we have long constants in the IR */
8856 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
8857 int data = sp [-1]->inst_c0;
8858 sp [-1]->opcode = OP_I8CONST;
8859 sp [-1]->type = STACK_I8;
8860 #if SIZEOF_REGISTER == 8
8861 if ((*ip) == CEE_CONV_U8)
8862 sp [-1]->inst_c0 = (guint32)data;
8864 sp [-1]->inst_c0 = data;
8866 sp [-1]->inst_ls_word = data;
8867 if ((*ip) == CEE_CONV_U8)
8868 sp [-1]->inst_ms_word = 0;
8870 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
8872 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
8879 case CEE_CONV_OVF_I4:
8880 case CEE_CONV_OVF_I1:
8881 case CEE_CONV_OVF_I2:
8882 case CEE_CONV_OVF_I:
8883 case CEE_CONV_OVF_U:
8886 if (sp [-1]->type == STACK_R8) {
8887 ADD_UNOP (CEE_CONV_OVF_I8);
8894 case CEE_CONV_OVF_U1:
8895 case CEE_CONV_OVF_U2:
8896 case CEE_CONV_OVF_U4:
8899 if (sp [-1]->type == STACK_R8) {
8900 ADD_UNOP (CEE_CONV_OVF_U8);
8907 case CEE_CONV_OVF_I1_UN:
8908 case CEE_CONV_OVF_I2_UN:
8909 case CEE_CONV_OVF_I4_UN:
8910 case CEE_CONV_OVF_I8_UN:
8911 case CEE_CONV_OVF_U1_UN:
8912 case CEE_CONV_OVF_U2_UN:
8913 case CEE_CONV_OVF_U4_UN:
8914 case CEE_CONV_OVF_U8_UN:
8915 case CEE_CONV_OVF_I_UN:
8916 case CEE_CONV_OVF_U_UN:
8923 CHECK_CFG_EXCEPTION;
8927 case CEE_ADD_OVF_UN:
8929 case CEE_MUL_OVF_UN:
8931 case CEE_SUB_OVF_UN:
8937 GSHAREDVT_FAILURE (*ip);
8940 token = read32 (ip + 1);
8941 klass = mini_get_class (method, token, generic_context);
8942 CHECK_TYPELOAD (klass);
8944 if (generic_class_is_reference_type (cfg, klass)) {
8945 MonoInst *store, *load;
8946 int dreg = alloc_ireg_ref (cfg);
8948 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
8949 load->flags |= ins_flag;
8950 MONO_ADD_INS (cfg->cbb, load);
8952 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
8953 store->flags |= ins_flag;
8954 MONO_ADD_INS (cfg->cbb, store);
8956 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
8957 emit_write_barrier (cfg, sp [0], sp [1]);
8959 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
8971 token = read32 (ip + 1);
8972 klass = mini_get_class (method, token, generic_context);
8973 CHECK_TYPELOAD (klass);
8975 /* Optimize the common ldobj+stloc combination */
8985 loc_index = ip [5] - CEE_STLOC_0;
8992 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
8993 CHECK_LOCAL (loc_index);
8995 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
8996 ins->dreg = cfg->locals [loc_index]->dreg;
9002 /* Optimize the ldobj+stobj combination */
9003 /* The reference case ends up being a load+store anyway */
9004 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
9009 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
9016 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9025 CHECK_STACK_OVF (1);
9027 n = read32 (ip + 1);
9029 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
9030 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
9031 ins->type = STACK_OBJ;
9034 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
9035 MonoInst *iargs [1];
9037 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
9038 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
9040 if (cfg->opt & MONO_OPT_SHARED) {
9041 MonoInst *iargs [3];
9043 if (cfg->compile_aot) {
9044 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
9046 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9047 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
9048 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
9049 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
9050 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
9052 if (bblock->out_of_line) {
9053 MonoInst *iargs [2];
9055 if (image == mono_defaults.corlib) {
9057 * Avoid relocations in AOT and save some space by using a
9058 * version of helper_ldstr specialized to mscorlib.
9060 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
9061 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
9063 /* Avoid creating the string object */
9064 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9065 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
9066 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
9070 if (cfg->compile_aot) {
9071 NEW_LDSTRCONST (cfg, ins, image, n);
9073 MONO_ADD_INS (bblock, ins);
9076 NEW_PCONST (cfg, ins, NULL);
9077 ins->type = STACK_OBJ;
9078 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
9080 OUT_OF_MEMORY_FAILURE;
9083 MONO_ADD_INS (bblock, ins);
9092 MonoInst *iargs [2];
9093 MonoMethodSignature *fsig;
9096 MonoInst *vtable_arg = NULL;
9099 token = read32 (ip + 1);
9100 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
9101 if (!cmethod || mono_loader_get_last_error ())
9103 fsig = mono_method_get_signature (cmethod, image, token);
9107 mono_save_token_info (cfg, image, token, cmethod);
9109 if (!mono_class_init (cmethod->klass))
9110 TYPE_LOAD_ERROR (cmethod->klass);
9112 context_used = mini_method_check_context_used (cfg, cmethod);
9114 if (mono_security_cas_enabled ()) {
9115 if (check_linkdemand (cfg, method, cmethod))
9116 INLINE_FAILURE ("linkdemand");
9117 CHECK_CFG_EXCEPTION;
9118 } else if (mono_security_core_clr_enabled ()) {
9119 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9122 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9123 emit_generic_class_init (cfg, cmethod->klass);
9124 CHECK_TYPELOAD (cmethod->klass);
9128 if (cfg->gsharedvt) {
9129 if (mini_is_gsharedvt_variable_signature (sig))
9130 GSHAREDVT_FAILURE (*ip);
9134 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
9135 mono_method_is_generic_sharable (cmethod, TRUE)) {
9136 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
9137 mono_class_vtable (cfg->domain, cmethod->klass);
9138 CHECK_TYPELOAD (cmethod->klass);
9140 vtable_arg = emit_get_rgctx_method (cfg, context_used,
9141 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9144 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
9145 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9147 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9149 CHECK_TYPELOAD (cmethod->klass);
9150 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9155 n = fsig->param_count;
9159 * Generate smaller code for the common newobj <exception> instruction in
9160 * argument checking code.
9162 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
9163 is_exception_class (cmethod->klass) && n <= 2 &&
9164 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
9165 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
9166 MonoInst *iargs [3];
9168 g_assert (!vtable_arg);
9172 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
9175 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
9179 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
9184 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
9187 g_assert_not_reached ();
9195 /* move the args to allow room for 'this' in the first position */
9201 /* check_call_signature () requires sp[0] to be set */
9202 this_ins.type = STACK_OBJ;
9204 if (check_call_signature (cfg, fsig, sp))
9209 if (mini_class_is_system_array (cmethod->klass)) {
9210 g_assert (!vtable_arg);
9212 *sp = emit_get_rgctx_method (cfg, context_used,
9213 cmethod, MONO_RGCTX_INFO_METHOD);
9215 /* Avoid varargs in the common case */
9216 if (fsig->param_count == 1)
9217 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
9218 else if (fsig->param_count == 2)
9219 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
9220 else if (fsig->param_count == 3)
9221 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
9222 else if (fsig->param_count == 4)
9223 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
9225 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
9226 } else if (cmethod->string_ctor) {
9227 g_assert (!context_used);
9228 g_assert (!vtable_arg);
9229 /* we simply pass a null pointer */
9230 EMIT_NEW_PCONST (cfg, *sp, NULL);
9231 /* now call the string ctor */
9232 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
9234 MonoInst* callvirt_this_arg = NULL;
9236 if (cmethod->klass->valuetype) {
9237 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
9238 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
9239 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
9244 * The code generated by mini_emit_virtual_call () expects
9245 * iargs [0] to be a boxed instance, but luckily the vcall
9246 * will be transformed into a normal call there.
9248 } else if (context_used) {
9249 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
9252 MonoVTable *vtable = NULL;
9254 if (!cfg->compile_aot)
9255 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9256 CHECK_TYPELOAD (cmethod->klass);
9259 * TypeInitializationExceptions thrown from the mono_runtime_class_init
9260 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
9261 * As a workaround, we call class cctors before allocating objects.
9263 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
9264 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, cmethod->klass, helper_sig_class_init_trampoline, NULL);
9265 if (cfg->verbose_level > 2)
9266 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
9267 class_inits = g_slist_prepend (class_inits, cmethod->klass);
9270 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
9273 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
9276 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
9278 /* Now call the actual ctor */
9279 /* Avoid virtual calls to ctors if possible */
9280 if (mono_class_is_marshalbyref (cmethod->klass))
9281 callvirt_this_arg = sp [0];
9284 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
9285 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9286 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9291 CHECK_CFG_EXCEPTION;
9292 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
9293 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
9294 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
9295 !g_list_find (dont_inline, cmethod)) {
9298 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
9299 cfg->real_offset += 5;
9302 inline_costs += costs - 5;
9304 INLINE_FAILURE ("inline failure");
9305 // FIXME-VT: Clean this up
9306 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
9307 GSHAREDVT_FAILURE(*ip);
9308 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
9310 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
9313 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
9314 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
9315 } else if (context_used &&
9316 (!mono_method_is_generic_sharable (cmethod, TRUE) ||
9317 !mono_class_generic_sharing_enabled (cmethod->klass))) {
9318 MonoInst *cmethod_addr;
9320 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
9321 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9323 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
9325 INLINE_FAILURE ("ctor call");
9326 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
9327 callvirt_this_arg, NULL, vtable_arg);
9331 if (alloc == NULL) {
9333 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
9334 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
9348 token = read32 (ip + 1);
9349 klass = mini_get_class (method, token, generic_context);
9350 CHECK_TYPELOAD (klass);
9351 if (sp [0]->type != STACK_OBJ)
9354 context_used = mini_class_check_context_used (cfg, klass);
9356 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9357 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
9364 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9367 if (cfg->compile_aot)
9368 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9370 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9372 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
9374 save_cast_details (cfg, klass, sp [0]->dreg, TRUE, &bblock);
9375 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
9376 reset_cast_details (cfg);
9379 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9380 MonoMethod *mono_castclass;
9381 MonoInst *iargs [1];
9384 mono_castclass = mono_marshal_get_castclass (klass);
9387 save_cast_details (cfg, klass, sp [0]->dreg, TRUE, &bblock);
9388 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
9389 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9390 reset_cast_details (cfg);
9391 CHECK_CFG_EXCEPTION;
9392 g_assert (costs > 0);
9395 cfg->real_offset += 5;
9400 inline_costs += costs;
9403 ins = handle_castclass (cfg, klass, *sp, context_used);
9404 CHECK_CFG_EXCEPTION;
9414 token = read32 (ip + 1);
9415 klass = mini_get_class (method, token, generic_context);
9416 CHECK_TYPELOAD (klass);
9417 if (sp [0]->type != STACK_OBJ)
9420 context_used = mini_class_check_context_used (cfg, klass);
9422 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9423 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
9430 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9433 if (cfg->compile_aot)
9434 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9436 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9438 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
9441 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9442 MonoMethod *mono_isinst;
9443 MonoInst *iargs [1];
9446 mono_isinst = mono_marshal_get_isinst (klass);
9449 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
9450 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9451 CHECK_CFG_EXCEPTION;
9452 g_assert (costs > 0);
9455 cfg->real_offset += 5;
9460 inline_costs += costs;
9463 ins = handle_isinst (cfg, klass, *sp, context_used);
9464 CHECK_CFG_EXCEPTION;
9471 case CEE_UNBOX_ANY: {
9475 token = read32 (ip + 1);
9476 klass = mini_get_class (method, token, generic_context);
9477 CHECK_TYPELOAD (klass);
9479 mono_save_token_info (cfg, image, token, klass);
9481 context_used = mini_class_check_context_used (cfg, klass);
9483 if (mini_is_gsharedvt_klass (cfg, klass)) {
9484 *sp = handle_unbox_gsharedvt (cfg, klass, *sp, &bblock);
9492 if (generic_class_is_reference_type (cfg, klass)) {
9493 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
9494 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9495 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
9502 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9505 /*FIXME AOT support*/
9506 if (cfg->compile_aot)
9507 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9509 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9511 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
9512 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
9515 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9516 MonoMethod *mono_castclass;
9517 MonoInst *iargs [1];
9520 mono_castclass = mono_marshal_get_castclass (klass);
9523 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
9524 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9525 CHECK_CFG_EXCEPTION;
9526 g_assert (costs > 0);
9529 cfg->real_offset += 5;
9533 inline_costs += costs;
9535 ins = handle_castclass (cfg, klass, *sp, context_used);
9536 CHECK_CFG_EXCEPTION;
9544 if (mono_class_is_nullable (klass)) {
9545 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
9552 ins = handle_unbox (cfg, klass, sp, context_used);
9558 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9571 token = read32 (ip + 1);
9572 klass = mini_get_class (method, token, generic_context);
9573 CHECK_TYPELOAD (klass);
9575 mono_save_token_info (cfg, image, token, klass);
9577 context_used = mini_class_check_context_used (cfg, klass);
9579 if (generic_class_is_reference_type (cfg, klass)) {
9585 if (klass == mono_defaults.void_class)
9587 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
9589 /* frequent check in generic code: box (struct), brtrue */
9591 // FIXME: LLVM can't handle the inconsistent bb linking
9592 if (!mono_class_is_nullable (klass) &&
9593 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
9594 (ip [5] == CEE_BRTRUE ||
9595 ip [5] == CEE_BRTRUE_S ||
9596 ip [5] == CEE_BRFALSE ||
9597 ip [5] == CEE_BRFALSE_S)) {
9598 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
9600 MonoBasicBlock *true_bb, *false_bb;
9604 if (cfg->verbose_level > 3) {
9605 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9606 printf ("<box+brtrue opt>\n");
9614 target = ip + 1 + (signed char)(*ip);
9621 target = ip + 4 + (gint)(read32 (ip));
9625 g_assert_not_reached ();
9629 * We need to link both bblocks, since it is needed for handling stack
9630 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
9631 * Branching to only one of them would lead to inconsistencies, so
9632 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
9634 GET_BBLOCK (cfg, true_bb, target);
9635 GET_BBLOCK (cfg, false_bb, ip);
9637 mono_link_bblock (cfg, cfg->cbb, true_bb);
9638 mono_link_bblock (cfg, cfg->cbb, false_bb);
9640 if (sp != stack_start) {
9641 handle_stack_args (cfg, stack_start, sp - stack_start);
9643 CHECK_UNVERIFIABLE (cfg);
9646 if (COMPILE_LLVM (cfg)) {
9647 dreg = alloc_ireg (cfg);
9648 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
9649 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
9651 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
9653 /* The JIT can't eliminate the iconst+compare */
9654 MONO_INST_NEW (cfg, ins, OP_BR);
9655 ins->inst_target_bb = is_true ? true_bb : false_bb;
9656 MONO_ADD_INS (cfg->cbb, ins);
9659 start_new_bblock = 1;
9663 *sp++ = handle_box (cfg, val, klass, context_used, &bblock);
9665 CHECK_CFG_EXCEPTION;
9674 token = read32 (ip + 1);
9675 klass = mini_get_class (method, token, generic_context);
9676 CHECK_TYPELOAD (klass);
9678 mono_save_token_info (cfg, image, token, klass);
9680 context_used = mini_class_check_context_used (cfg, klass);
9682 if (mono_class_is_nullable (klass)) {
9685 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
9686 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
9690 ins = handle_unbox (cfg, klass, sp, context_used);
9703 MonoClassField *field;
9704 #ifndef DISABLE_REMOTING
9708 gboolean is_instance;
9710 gpointer addr = NULL;
9711 gboolean is_special_static;
9713 MonoInst *store_val = NULL;
9716 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
9718 if (op == CEE_STFLD) {
9726 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
9728 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
9731 if (op == CEE_STSFLD) {
9739 token = read32 (ip + 1);
9740 if (method->wrapper_type != MONO_WRAPPER_NONE) {
9741 field = mono_method_get_wrapper_data (method, token);
9742 klass = field->parent;
9745 field = mono_field_from_token (image, token, &klass, generic_context);
9749 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
9750 FIELD_ACCESS_FAILURE;
9751 mono_class_init (klass);
9753 if (is_instance && *ip != CEE_LDFLDA && is_magic_tls_access (field))
9756 /* if the class is Critical then transparent code cannot access it's fields */
9757 if (!is_instance && mono_security_core_clr_enabled ())
9758 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
9760 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
9761 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
9762 if (mono_security_core_clr_enabled ())
9763 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
9767 * LDFLD etc. is usable on static fields as well, so convert those cases to
9770 if (is_instance && field->type->attrs & FIELD_ATTRIBUTE_STATIC) {
9782 g_assert_not_reached ();
9784 is_instance = FALSE;
9787 context_used = mini_class_check_context_used (cfg, klass);
9791 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
9792 if (op == CEE_STFLD) {
9793 if (target_type_is_incompatible (cfg, field->type, sp [1]))
9795 #ifndef DISABLE_REMOTING
9796 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
9797 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
9798 MonoInst *iargs [5];
9800 GSHAREDVT_FAILURE (op);
9803 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9804 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
9805 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
9809 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
9810 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
9811 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9812 CHECK_CFG_EXCEPTION;
9813 g_assert (costs > 0);
9815 cfg->real_offset += 5;
9818 inline_costs += costs;
9820 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
9827 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
9829 if (mini_is_gsharedvt_klass (cfg, klass)) {
9830 MonoInst *offset_ins;
9832 context_used = mini_class_check_context_used (cfg, klass);
9834 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9835 dreg = alloc_ireg_mp (cfg);
9836 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
9837 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
9838 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
9840 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
9842 if (sp [0]->opcode != OP_LDADDR)
9843 store->flags |= MONO_INST_FAULT;
9845 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
9846 /* insert call to write barrier */
9850 dreg = alloc_ireg_mp (cfg);
9851 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
9852 emit_write_barrier (cfg, ptr, sp [1]);
9855 store->flags |= ins_flag;
9862 #ifndef DISABLE_REMOTING
9863 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
9864 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
9865 MonoInst *iargs [4];
9867 GSHAREDVT_FAILURE (op);
9870 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9871 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
9872 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
9873 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
9874 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
9875 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9876 CHECK_CFG_EXCEPTION;
9878 g_assert (costs > 0);
9880 cfg->real_offset += 5;
9884 inline_costs += costs;
9886 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
9892 if (sp [0]->type == STACK_VTYPE) {
9895 /* Have to compute the address of the variable */
9897 var = get_vreg_to_inst (cfg, sp [0]->dreg);
9899 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
9901 g_assert (var->klass == klass);
9903 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
9907 if (op == CEE_LDFLDA) {
9908 if (is_magic_tls_access (field)) {
9909 GSHAREDVT_FAILURE (*ip);
9911 *sp++ = create_magic_tls_access (cfg, field, &cached_tls_addr, ins);
9913 if (sp [0]->type == STACK_OBJ) {
9914 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
9915 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
9918 dreg = alloc_ireg_mp (cfg);
9920 if (mini_is_gsharedvt_klass (cfg, klass)) {
9921 MonoInst *offset_ins;
9923 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9924 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
9926 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
9928 ins->klass = mono_class_from_mono_type (field->type);
9929 ins->type = STACK_MP;
9935 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
9937 if (mini_is_gsharedvt_klass (cfg, klass)) {
9938 MonoInst *offset_ins;
9940 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9941 dreg = alloc_ireg_mp (cfg);
9942 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
9943 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
9945 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
9947 load->flags |= ins_flag;
9948 if (sp [0]->opcode != OP_LDADDR)
9949 load->flags |= MONO_INST_FAULT;
9963 * We can only support shared generic static
9964 * field access on architectures where the
9965 * trampoline code has been extended to handle
9966 * the generic class init.
9968 #ifndef MONO_ARCH_VTABLE_REG
9969 GENERIC_SHARING_FAILURE (op);
9972 context_used = mini_class_check_context_used (cfg, klass);
9974 ftype = mono_field_get_type (field);
9976 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
9979 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
9980 * to be called here.
9982 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
9983 mono_class_vtable (cfg->domain, klass);
9984 CHECK_TYPELOAD (klass);
9986 mono_domain_lock (cfg->domain);
9987 if (cfg->domain->special_static_fields)
9988 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
9989 mono_domain_unlock (cfg->domain);
9991 is_special_static = mono_class_field_is_special_static (field);
9993 /* Generate IR to compute the field address */
9994 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
9996 * Fast access to TLS data
9997 * Inline version of get_thread_static_data () in
10001 int idx, static_data_reg, array_reg, dreg;
10002 MonoInst *thread_ins;
10004 GSHAREDVT_FAILURE (op);
10006 // offset &= 0x7fffffff;
10007 // idx = (offset >> 24) - 1;
10008 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
10010 thread_ins = mono_get_thread_intrinsic (cfg);
10011 MONO_ADD_INS (cfg->cbb, thread_ins);
10012 static_data_reg = alloc_ireg (cfg);
10013 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
10015 if (cfg->compile_aot) {
10016 int offset_reg, offset2_reg, idx_reg;
10018 /* For TLS variables, this will return the TLS offset */
10019 EMIT_NEW_SFLDACONST (cfg, ins, field);
10020 offset_reg = ins->dreg;
10021 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
10022 idx_reg = alloc_ireg (cfg);
10023 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
10024 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
10025 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
10026 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
10027 array_reg = alloc_ireg (cfg);
10028 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
10029 offset2_reg = alloc_ireg (cfg);
10030 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
10031 dreg = alloc_ireg (cfg);
10032 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
10034 offset = (gsize)addr & 0x7fffffff;
10035 idx = (offset >> 24) - 1;
10037 array_reg = alloc_ireg (cfg);
10038 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
10039 dreg = alloc_ireg (cfg);
10040 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
10042 } else if ((cfg->opt & MONO_OPT_SHARED) ||
10043 (cfg->compile_aot && is_special_static) ||
10044 (context_used && is_special_static)) {
10045 MonoInst *iargs [2];
10047 g_assert (field->parent);
10048 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10049 if (context_used) {
10050 iargs [1] = emit_get_rgctx_field (cfg, context_used,
10051 field, MONO_RGCTX_INFO_CLASS_FIELD);
10053 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10055 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10056 } else if (context_used) {
10057 MonoInst *static_data;
10060 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
10061 method->klass->name_space, method->klass->name, method->name,
10062 depth, field->offset);
10065 if (mono_class_needs_cctor_run (klass, method))
10066 emit_generic_class_init (cfg, klass);
10069 * The pointer we're computing here is
10071 * super_info.static_data + field->offset
10073 static_data = emit_get_rgctx_klass (cfg, context_used,
10074 klass, MONO_RGCTX_INFO_STATIC_DATA);
10076 if (mini_is_gsharedvt_klass (cfg, klass)) {
10077 MonoInst *offset_ins;
10079 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10080 dreg = alloc_ireg_mp (cfg);
10081 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
10082 } else if (field->offset == 0) {
10085 int addr_reg = mono_alloc_preg (cfg);
10086 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
10088 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
10089 MonoInst *iargs [2];
10091 g_assert (field->parent);
10092 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10093 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10094 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10096 MonoVTable *vtable = NULL;
10098 if (!cfg->compile_aot)
10099 vtable = mono_class_vtable (cfg->domain, klass);
10100 CHECK_TYPELOAD (klass);
10103 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
10104 if (!(g_slist_find (class_inits, klass))) {
10105 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, klass, helper_sig_class_init_trampoline, NULL);
10106 if (cfg->verbose_level > 2)
10107 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
10108 class_inits = g_slist_prepend (class_inits, klass);
10111 if (cfg->run_cctors) {
10113 /* This makes so that inline cannot trigger */
10114 /* .cctors: too many apps depend on them */
10115 /* running with a specific order... */
10117 if (! vtable->initialized)
10118 INLINE_FAILURE ("class init");
10119 ex = mono_runtime_class_init_full (vtable, FALSE);
10121 set_exception_object (cfg, ex);
10122 goto exception_exit;
10126 if (cfg->compile_aot)
10127 EMIT_NEW_SFLDACONST (cfg, ins, field);
10130 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10131 EMIT_NEW_PCONST (cfg, ins, addr);
10134 MonoInst *iargs [1];
10135 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
10136 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
10140 /* Generate IR to do the actual load/store operation */
10142 if (op == CEE_LDSFLDA) {
10143 ins->klass = mono_class_from_mono_type (ftype);
10144 ins->type = STACK_PTR;
10146 } else if (op == CEE_STSFLD) {
10149 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
10150 store->flags |= ins_flag;
10152 gboolean is_const = FALSE;
10153 MonoVTable *vtable = NULL;
10154 gpointer addr = NULL;
10156 if (!context_used) {
10157 vtable = mono_class_vtable (cfg->domain, klass);
10158 CHECK_TYPELOAD (klass);
10160 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
10161 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
10162 int ro_type = ftype->type;
10164 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10165 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
10166 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
10169 GSHAREDVT_FAILURE (op);
10171 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
10174 case MONO_TYPE_BOOLEAN:
10176 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
10180 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
10183 case MONO_TYPE_CHAR:
10185 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
10189 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
10194 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
10198 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
10203 case MONO_TYPE_PTR:
10204 case MONO_TYPE_FNPTR:
10205 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10206 type_to_eval_stack_type ((cfg), field->type, *sp);
10209 case MONO_TYPE_STRING:
10210 case MONO_TYPE_OBJECT:
10211 case MONO_TYPE_CLASS:
10212 case MONO_TYPE_SZARRAY:
10213 case MONO_TYPE_ARRAY:
10214 if (!mono_gc_is_moving ()) {
10215 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10216 type_to_eval_stack_type ((cfg), field->type, *sp);
10224 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
10229 case MONO_TYPE_VALUETYPE:
10239 CHECK_STACK_OVF (1);
10241 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
10242 load->flags |= ins_flag;
10255 token = read32 (ip + 1);
10256 klass = mini_get_class (method, token, generic_context);
10257 CHECK_TYPELOAD (klass);
10258 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
10259 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
10260 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
10261 generic_class_is_reference_type (cfg, klass)) {
10262 /* insert call to write barrier */
10263 emit_write_barrier (cfg, sp [0], sp [1]);
10275 const char *data_ptr;
10277 guint32 field_token;
10283 token = read32 (ip + 1);
10285 klass = mini_get_class (method, token, generic_context);
10286 CHECK_TYPELOAD (klass);
10288 context_used = mini_class_check_context_used (cfg, klass);
10290 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
10291 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
10292 ins->sreg1 = sp [0]->dreg;
10293 ins->type = STACK_I4;
10294 ins->dreg = alloc_ireg (cfg);
10295 MONO_ADD_INS (cfg->cbb, ins);
10296 *sp = mono_decompose_opcode (cfg, ins);
10299 if (context_used) {
10300 MonoInst *args [3];
10301 MonoClass *array_class = mono_array_class_get (klass, 1);
10302 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
10304 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
10307 args [0] = emit_get_rgctx_klass (cfg, context_used,
10308 array_class, MONO_RGCTX_INFO_VTABLE);
10313 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
10315 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
10317 if (cfg->opt & MONO_OPT_SHARED) {
10318 /* Decompose now to avoid problems with references to the domainvar */
10319 MonoInst *iargs [3];
10321 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10322 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10323 iargs [2] = sp [0];
10325 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
10327 /* Decompose later since it is needed by abcrem */
10328 MonoClass *array_type = mono_array_class_get (klass, 1);
10329 mono_class_vtable (cfg->domain, array_type);
10330 CHECK_TYPELOAD (array_type);
10332 MONO_INST_NEW (cfg, ins, OP_NEWARR);
10333 ins->dreg = alloc_ireg_ref (cfg);
10334 ins->sreg1 = sp [0]->dreg;
10335 ins->inst_newa_class = klass;
10336 ins->type = STACK_OBJ;
10337 ins->klass = array_type;
10338 MONO_ADD_INS (cfg->cbb, ins);
10339 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10340 cfg->cbb->has_array_access = TRUE;
10342 /* Needed so mono_emit_load_get_addr () gets called */
10343 mono_get_got_var (cfg);
10353 * we inline/optimize the initialization sequence if possible.
10354 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
10355 * for small sizes open code the memcpy
10356 * ensure the rva field is big enough
10358 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
10359 MonoMethod *memcpy_method = get_memcpy_method ();
10360 MonoInst *iargs [3];
10361 int add_reg = alloc_ireg_mp (cfg);
10363 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
10364 if (cfg->compile_aot) {
10365 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
10367 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
10369 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
10370 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
10379 if (sp [0]->type != STACK_OBJ)
10382 MONO_INST_NEW (cfg, ins, OP_LDLEN);
10383 ins->dreg = alloc_preg (cfg);
10384 ins->sreg1 = sp [0]->dreg;
10385 ins->type = STACK_I4;
10386 /* This flag will be inherited by the decomposition */
10387 ins->flags |= MONO_INST_FAULT;
10388 MONO_ADD_INS (cfg->cbb, ins);
10389 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10390 cfg->cbb->has_array_access = TRUE;
10398 if (sp [0]->type != STACK_OBJ)
10401 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10403 klass = mini_get_class (method, read32 (ip + 1), generic_context);
10404 CHECK_TYPELOAD (klass);
10405 /* we need to make sure that this array is exactly the type it needs
10406 * to be for correctness. the wrappers are lax with their usage
10407 * so we need to ignore them here
10409 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
10410 MonoClass *array_class = mono_array_class_get (klass, 1);
10411 mini_emit_check_array_type (cfg, sp [0], array_class);
10412 CHECK_TYPELOAD (array_class);
10416 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10421 case CEE_LDELEM_I1:
10422 case CEE_LDELEM_U1:
10423 case CEE_LDELEM_I2:
10424 case CEE_LDELEM_U2:
10425 case CEE_LDELEM_I4:
10426 case CEE_LDELEM_U4:
10427 case CEE_LDELEM_I8:
10429 case CEE_LDELEM_R4:
10430 case CEE_LDELEM_R8:
10431 case CEE_LDELEM_REF: {
10437 if (*ip == CEE_LDELEM) {
10439 token = read32 (ip + 1);
10440 klass = mini_get_class (method, token, generic_context);
10441 CHECK_TYPELOAD (klass);
10442 mono_class_init (klass);
10445 klass = array_access_to_klass (*ip);
10447 if (sp [0]->type != STACK_OBJ)
10450 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10452 if (mini_is_gsharedvt_klass (cfg, klass)) {
10453 // FIXME-VT: OP_ICONST optimization
10454 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10455 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10456 ins->opcode = OP_LOADV_MEMBASE;
10457 } else if (sp [1]->opcode == OP_ICONST) {
10458 int array_reg = sp [0]->dreg;
10459 int index_reg = sp [1]->dreg;
10460 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
10462 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
10463 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
10465 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10466 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10469 if (*ip == CEE_LDELEM)
10476 case CEE_STELEM_I1:
10477 case CEE_STELEM_I2:
10478 case CEE_STELEM_I4:
10479 case CEE_STELEM_I8:
10480 case CEE_STELEM_R4:
10481 case CEE_STELEM_R8:
10482 case CEE_STELEM_REF:
10487 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10489 if (*ip == CEE_STELEM) {
10491 token = read32 (ip + 1);
10492 klass = mini_get_class (method, token, generic_context);
10493 CHECK_TYPELOAD (klass);
10494 mono_class_init (klass);
10497 klass = array_access_to_klass (*ip);
10499 if (sp [0]->type != STACK_OBJ)
10502 emit_array_store (cfg, klass, sp, TRUE);
10504 if (*ip == CEE_STELEM)
10511 case CEE_CKFINITE: {
10515 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
10516 ins->sreg1 = sp [0]->dreg;
10517 ins->dreg = alloc_freg (cfg);
10518 ins->type = STACK_R8;
10519 MONO_ADD_INS (bblock, ins);
10521 *sp++ = mono_decompose_opcode (cfg, ins);
10526 case CEE_REFANYVAL: {
10527 MonoInst *src_var, *src;
10529 int klass_reg = alloc_preg (cfg);
10530 int dreg = alloc_preg (cfg);
10532 GSHAREDVT_FAILURE (*ip);
10535 MONO_INST_NEW (cfg, ins, *ip);
10538 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
10539 CHECK_TYPELOAD (klass);
10540 mono_class_init (klass);
10542 context_used = mini_class_check_context_used (cfg, klass);
10545 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10547 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
10548 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
10549 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
10551 if (context_used) {
10552 MonoInst *klass_ins;
10554 klass_ins = emit_get_rgctx_klass (cfg, context_used,
10555 klass, MONO_RGCTX_INFO_KLASS);
10558 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
10559 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
10561 mini_emit_class_check (cfg, klass_reg, klass);
10563 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
10564 ins->type = STACK_MP;
10569 case CEE_MKREFANY: {
10570 MonoInst *loc, *addr;
10572 GSHAREDVT_FAILURE (*ip);
10575 MONO_INST_NEW (cfg, ins, *ip);
10578 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
10579 CHECK_TYPELOAD (klass);
10580 mono_class_init (klass);
10582 context_used = mini_class_check_context_used (cfg, klass);
10584 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
10585 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
10587 if (context_used) {
10588 MonoInst *const_ins;
10589 int type_reg = alloc_preg (cfg);
10591 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
10592 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
10593 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
10594 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
10595 } else if (cfg->compile_aot) {
10596 int const_reg = alloc_preg (cfg);
10597 int type_reg = alloc_preg (cfg);
10599 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
10600 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
10601 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
10602 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
10604 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
10605 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
10607 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
10609 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
10610 ins->type = STACK_VTYPE;
10611 ins->klass = mono_defaults.typed_reference_class;
10616 case CEE_LDTOKEN: {
10618 MonoClass *handle_class;
10620 CHECK_STACK_OVF (1);
10623 n = read32 (ip + 1);
10625 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
10626 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
10627 handle = mono_method_get_wrapper_data (method, n);
10628 handle_class = mono_method_get_wrapper_data (method, n + 1);
10629 if (handle_class == mono_defaults.typehandle_class)
10630 handle = &((MonoClass*)handle)->byval_arg;
10633 handle = mono_ldtoken (image, n, &handle_class, generic_context);
10637 mono_class_init (handle_class);
10638 if (cfg->generic_sharing_context) {
10639 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
10640 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
10641 /* This case handles ldtoken
10642 of an open type, like for
10645 } else if (handle_class == mono_defaults.typehandle_class) {
10646 /* If we get a MONO_TYPE_CLASS
10647 then we need to provide the
10649 instantiation of it. */
10650 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
10653 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type (handle));
10654 } else if (handle_class == mono_defaults.fieldhandle_class)
10655 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
10656 else if (handle_class == mono_defaults.methodhandle_class)
10657 context_used = mini_method_check_context_used (cfg, handle);
10659 g_assert_not_reached ();
10662 if ((cfg->opt & MONO_OPT_SHARED) &&
10663 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
10664 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
10665 MonoInst *addr, *vtvar, *iargs [3];
10666 int method_context_used;
10668 method_context_used = mini_method_check_context_used (cfg, method);
10670 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
10672 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10673 EMIT_NEW_ICONST (cfg, iargs [1], n);
10674 if (method_context_used) {
10675 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
10676 method, MONO_RGCTX_INFO_METHOD);
10677 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
10679 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
10680 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
10682 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
10684 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
10686 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
10688 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
10689 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
10690 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
10691 (cmethod->klass == mono_defaults.systemtype_class) &&
10692 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
10693 MonoClass *tclass = mono_class_from_mono_type (handle);
10695 mono_class_init (tclass);
10696 if (context_used) {
10697 ins = emit_get_rgctx_klass (cfg, context_used,
10698 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
10699 } else if (cfg->compile_aot) {
10700 if (method->wrapper_type) {
10701 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
10702 /* Special case for static synchronized wrappers */
10703 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
10705 /* FIXME: n is not a normal token */
10706 cfg->disable_aot = TRUE;
10707 EMIT_NEW_PCONST (cfg, ins, NULL);
10710 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
10713 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
10715 ins->type = STACK_OBJ;
10716 ins->klass = cmethod->klass;
10719 MonoInst *addr, *vtvar;
10721 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
10723 if (context_used) {
10724 if (handle_class == mono_defaults.typehandle_class) {
10725 ins = emit_get_rgctx_klass (cfg, context_used,
10726 mono_class_from_mono_type (handle),
10727 MONO_RGCTX_INFO_TYPE);
10728 } else if (handle_class == mono_defaults.methodhandle_class) {
10729 ins = emit_get_rgctx_method (cfg, context_used,
10730 handle, MONO_RGCTX_INFO_METHOD);
10731 } else if (handle_class == mono_defaults.fieldhandle_class) {
10732 ins = emit_get_rgctx_field (cfg, context_used,
10733 handle, MONO_RGCTX_INFO_CLASS_FIELD);
10735 g_assert_not_reached ();
10737 } else if (cfg->compile_aot) {
10738 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
10740 EMIT_NEW_PCONST (cfg, ins, handle);
10742 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
10743 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
10744 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
10754 MONO_INST_NEW (cfg, ins, OP_THROW);
10756 ins->sreg1 = sp [0]->dreg;
10758 bblock->out_of_line = TRUE;
10759 MONO_ADD_INS (bblock, ins);
10760 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
10761 MONO_ADD_INS (bblock, ins);
10764 link_bblock (cfg, bblock, end_bblock);
10765 start_new_bblock = 1;
10767 case CEE_ENDFINALLY:
10768 /* mono_save_seq_point_info () depends on this */
10769 if (sp != stack_start)
10770 emit_seq_point (cfg, method, ip, FALSE, FALSE);
10771 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
10772 MONO_ADD_INS (bblock, ins);
10774 start_new_bblock = 1;
10777 * Control will leave the method so empty the stack, otherwise
10778 * the next basic block will start with a nonempty stack.
10780 while (sp != stack_start) {
10785 case CEE_LEAVE_S: {
10788 if (*ip == CEE_LEAVE) {
10790 target = ip + 5 + (gint32)read32(ip + 1);
10793 target = ip + 2 + (signed char)(ip [1]);
10796 /* empty the stack */
10797 while (sp != stack_start) {
10802 * If this leave statement is in a catch block, check for a
10803 * pending exception, and rethrow it if necessary.
10804 * We avoid doing this in runtime invoke wrappers, since those are called
10805 * by native code which excepts the wrapper to catch all exceptions.
10807 for (i = 0; i < header->num_clauses; ++i) {
10808 MonoExceptionClause *clause = &header->clauses [i];
10811 * Use <= in the final comparison to handle clauses with multiple
10812 * leave statements, like in bug #78024.
10813 * The ordering of the exception clauses guarantees that we find the
10814 * innermost clause.
10816 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
10818 MonoBasicBlock *dont_throw;
10823 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
10826 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
10828 NEW_BBLOCK (cfg, dont_throw);
10831 * Currently, we always rethrow the abort exception, despite the
10832 * fact that this is not correct. See thread6.cs for an example.
10833 * But propagating the abort exception is more important than
10834 * getting the sematics right.
10836 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
10837 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
10838 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
10840 MONO_START_BB (cfg, dont_throw);
10845 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
10847 MonoExceptionClause *clause;
10849 for (tmp = handlers; tmp; tmp = tmp->next) {
10850 clause = tmp->data;
10851 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
10853 link_bblock (cfg, bblock, tblock);
10854 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
10855 ins->inst_target_bb = tblock;
10856 ins->inst_eh_block = clause;
10857 MONO_ADD_INS (bblock, ins);
10858 bblock->has_call_handler = 1;
10859 if (COMPILE_LLVM (cfg)) {
10860 MonoBasicBlock *target_bb;
10863 * Link the finally bblock with the target, since it will
10864 * conceptually branch there.
10865 * FIXME: Have to link the bblock containing the endfinally.
10867 GET_BBLOCK (cfg, target_bb, target);
10868 link_bblock (cfg, tblock, target_bb);
10871 g_list_free (handlers);
10874 MONO_INST_NEW (cfg, ins, OP_BR);
10875 MONO_ADD_INS (bblock, ins);
10876 GET_BBLOCK (cfg, tblock, target);
10877 link_bblock (cfg, bblock, tblock);
10878 ins->inst_target_bb = tblock;
10879 start_new_bblock = 1;
10881 if (*ip == CEE_LEAVE)
10890 * Mono specific opcodes
10892 case MONO_CUSTOM_PREFIX: {
10894 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10898 case CEE_MONO_ICALL: {
10900 MonoJitICallInfo *info;
10902 token = read32 (ip + 2);
10903 func = mono_method_get_wrapper_data (method, token);
10904 info = mono_find_jit_icall_by_addr (func);
10906 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
10909 CHECK_STACK (info->sig->param_count);
10910 sp -= info->sig->param_count;
10912 ins = mono_emit_jit_icall (cfg, info->func, sp);
10913 if (!MONO_TYPE_IS_VOID (info->sig->ret))
10917 inline_costs += 10 * num_calls++;
10921 case CEE_MONO_LDPTR: {
10924 CHECK_STACK_OVF (1);
10926 token = read32 (ip + 2);
10928 ptr = mono_method_get_wrapper_data (method, token);
10929 /* FIXME: Generalize this */
10930 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
10931 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
10936 EMIT_NEW_PCONST (cfg, ins, ptr);
10939 inline_costs += 10 * num_calls++;
10940 /* Can't embed random pointers into AOT code */
10941 cfg->disable_aot = 1;
10944 case CEE_MONO_JIT_ICALL_ADDR: {
10945 MonoJitICallInfo *callinfo;
10948 CHECK_STACK_OVF (1);
10950 token = read32 (ip + 2);
10952 ptr = mono_method_get_wrapper_data (method, token);
10953 callinfo = mono_find_jit_icall_by_addr (ptr);
10954 g_assert (callinfo);
10955 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
10958 inline_costs += 10 * num_calls++;
10961 case CEE_MONO_ICALL_ADDR: {
10962 MonoMethod *cmethod;
10965 CHECK_STACK_OVF (1);
10967 token = read32 (ip + 2);
10969 cmethod = mono_method_get_wrapper_data (method, token);
10971 if (cfg->compile_aot) {
10972 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
10974 ptr = mono_lookup_internal_call (cmethod);
10976 EMIT_NEW_PCONST (cfg, ins, ptr);
10982 case CEE_MONO_VTADDR: {
10983 MonoInst *src_var, *src;
10989 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10990 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
10995 case CEE_MONO_NEWOBJ: {
10996 MonoInst *iargs [2];
10998 CHECK_STACK_OVF (1);
11000 token = read32 (ip + 2);
11001 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11002 mono_class_init (klass);
11003 NEW_DOMAINCONST (cfg, iargs [0]);
11004 MONO_ADD_INS (cfg->cbb, iargs [0]);
11005 NEW_CLASSCONST (cfg, iargs [1], klass);
11006 MONO_ADD_INS (cfg->cbb, iargs [1]);
11007 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
11009 inline_costs += 10 * num_calls++;
11012 case CEE_MONO_OBJADDR:
11015 MONO_INST_NEW (cfg, ins, OP_MOVE);
11016 ins->dreg = alloc_ireg_mp (cfg);
11017 ins->sreg1 = sp [0]->dreg;
11018 ins->type = STACK_MP;
11019 MONO_ADD_INS (cfg->cbb, ins);
11023 case CEE_MONO_LDNATIVEOBJ:
11025 * Similar to LDOBJ, but instead load the unmanaged
11026 * representation of the vtype to the stack.
11031 token = read32 (ip + 2);
11032 klass = mono_method_get_wrapper_data (method, token);
11033 g_assert (klass->valuetype);
11034 mono_class_init (klass);
11037 MonoInst *src, *dest, *temp;
11040 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
11041 temp->backend.is_pinvoke = 1;
11042 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
11043 mini_emit_stobj (cfg, dest, src, klass, TRUE);
11045 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
11046 dest->type = STACK_VTYPE;
11047 dest->klass = klass;
11053 case CEE_MONO_RETOBJ: {
11055 * Same as RET, but return the native representation of a vtype
11058 g_assert (cfg->ret);
11059 g_assert (mono_method_signature (method)->pinvoke);
11064 token = read32 (ip + 2);
11065 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11067 if (!cfg->vret_addr) {
11068 g_assert (cfg->ret_var_is_local);
11070 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
11072 EMIT_NEW_RETLOADA (cfg, ins);
11074 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
11076 if (sp != stack_start)
11079 MONO_INST_NEW (cfg, ins, OP_BR);
11080 ins->inst_target_bb = end_bblock;
11081 MONO_ADD_INS (bblock, ins);
11082 link_bblock (cfg, bblock, end_bblock);
11083 start_new_bblock = 1;
11087 case CEE_MONO_CISINST:
11088 case CEE_MONO_CCASTCLASS: {
11093 token = read32 (ip + 2);
11094 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11095 if (ip [1] == CEE_MONO_CISINST)
11096 ins = handle_cisinst (cfg, klass, sp [0]);
11098 ins = handle_ccastclass (cfg, klass, sp [0]);
11104 case CEE_MONO_SAVE_LMF:
11105 case CEE_MONO_RESTORE_LMF:
11106 #ifdef MONO_ARCH_HAVE_LMF_OPS
11107 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
11108 MONO_ADD_INS (bblock, ins);
11109 cfg->need_lmf_area = TRUE;
11113 case CEE_MONO_CLASSCONST:
11114 CHECK_STACK_OVF (1);
11116 token = read32 (ip + 2);
11117 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
11120 inline_costs += 10 * num_calls++;
11122 case CEE_MONO_NOT_TAKEN:
11123 bblock->out_of_line = TRUE;
11127 CHECK_STACK_OVF (1);
11129 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
11130 ins->dreg = alloc_preg (cfg);
11131 ins->inst_offset = (gint32)read32 (ip + 2);
11132 ins->type = STACK_PTR;
11133 MONO_ADD_INS (bblock, ins);
11137 case CEE_MONO_DYN_CALL: {
11138 MonoCallInst *call;
11140 /* It would be easier to call a trampoline, but that would put an
11141 * extra frame on the stack, confusing exception handling. So
11142 * implement it inline using an opcode for now.
11145 if (!cfg->dyn_call_var) {
11146 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11147 /* prevent it from being register allocated */
11148 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
11151 /* Has to use a call inst since it local regalloc expects it */
11152 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
11153 ins = (MonoInst*)call;
11155 ins->sreg1 = sp [0]->dreg;
11156 ins->sreg2 = sp [1]->dreg;
11157 MONO_ADD_INS (bblock, ins);
11159 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
11160 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
11164 inline_costs += 10 * num_calls++;
11168 case CEE_MONO_MEMORY_BARRIER: {
11170 emit_memory_barrier (cfg, (int)read32 (ip + 1));
11174 case CEE_MONO_JIT_ATTACH: {
11175 MonoInst *args [16];
11176 MonoInst *ad_ins, *lmf_ins;
11177 MonoBasicBlock *next_bb = NULL;
11179 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11181 EMIT_NEW_PCONST (cfg, ins, NULL);
11182 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11188 ad_ins = mono_get_domain_intrinsic (cfg);
11189 lmf_ins = mono_get_lmf_intrinsic (cfg);
11192 if (MONO_ARCH_HAVE_TLS_GET && ad_ins && lmf_ins) {
11193 NEW_BBLOCK (cfg, next_bb);
11195 MONO_ADD_INS (cfg->cbb, ad_ins);
11196 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ad_ins->dreg, 0);
11197 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
11199 MONO_ADD_INS (cfg->cbb, lmf_ins);
11200 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, lmf_ins->dreg, 0);
11201 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
11204 if (cfg->compile_aot) {
11205 /* AOT code is only used in the root domain */
11206 EMIT_NEW_PCONST (cfg, args [0], NULL);
11208 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
11210 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
11211 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11214 MONO_START_BB (cfg, next_bb);
11220 case CEE_MONO_JIT_DETACH: {
11221 MonoInst *args [16];
11223 /* Restore the original domain */
11224 dreg = alloc_ireg (cfg);
11225 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
11226 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
11231 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
11237 case CEE_PREFIX1: {
11240 case CEE_ARGLIST: {
11241 /* somewhat similar to LDTOKEN */
11242 MonoInst *addr, *vtvar;
11243 CHECK_STACK_OVF (1);
11244 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
11246 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11247 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
11249 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11250 ins->type = STACK_VTYPE;
11251 ins->klass = mono_defaults.argumenthandle_class;
11264 * The following transforms:
11265 * CEE_CEQ into OP_CEQ
11266 * CEE_CGT into OP_CGT
11267 * CEE_CGT_UN into OP_CGT_UN
11268 * CEE_CLT into OP_CLT
11269 * CEE_CLT_UN into OP_CLT_UN
11271 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
11273 MONO_INST_NEW (cfg, ins, cmp->opcode);
11275 cmp->sreg1 = sp [0]->dreg;
11276 cmp->sreg2 = sp [1]->dreg;
11277 type_from_op (cmp, sp [0], sp [1]);
11279 if ((sp [0]->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
11280 cmp->opcode = OP_LCOMPARE;
11281 else if (sp [0]->type == STACK_R8)
11282 cmp->opcode = OP_FCOMPARE;
11284 cmp->opcode = OP_ICOMPARE;
11285 MONO_ADD_INS (bblock, cmp);
11286 ins->type = STACK_I4;
11287 ins->dreg = alloc_dreg (cfg, ins->type);
11288 type_from_op (ins, sp [0], sp [1]);
11290 if (cmp->opcode == OP_FCOMPARE) {
11292 * The backends expect the fceq opcodes to do the
11295 cmp->opcode = OP_NOP;
11296 ins->sreg1 = cmp->sreg1;
11297 ins->sreg2 = cmp->sreg2;
11299 MONO_ADD_INS (bblock, ins);
11305 MonoInst *argconst;
11306 MonoMethod *cil_method;
11308 CHECK_STACK_OVF (1);
11310 n = read32 (ip + 2);
11311 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
11312 if (!cmethod || mono_loader_get_last_error ())
11314 mono_class_init (cmethod->klass);
11316 mono_save_token_info (cfg, image, n, cmethod);
11318 context_used = mini_method_check_context_used (cfg, cmethod);
11320 cil_method = cmethod;
11321 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
11322 METHOD_ACCESS_FAILURE;
11324 if (mono_security_cas_enabled ()) {
11325 if (check_linkdemand (cfg, method, cmethod))
11326 INLINE_FAILURE ("linkdemand");
11327 CHECK_CFG_EXCEPTION;
11328 } else if (mono_security_core_clr_enabled ()) {
11329 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
11333 * Optimize the common case of ldftn+delegate creation
11335 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
11336 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
11337 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
11338 MonoInst *target_ins;
11339 MonoMethod *invoke;
11340 int invoke_context_used;
11342 invoke = mono_get_delegate_invoke (ctor_method->klass);
11343 if (!invoke || !mono_method_signature (invoke))
11346 invoke_context_used = mini_method_check_context_used (cfg, invoke);
11348 target_ins = sp [-1];
11350 if (mono_security_core_clr_enabled ())
11351 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
11353 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
11354 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
11355 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
11356 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
11357 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
11361 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
11362 /* FIXME: SGEN support */
11363 if (invoke_context_used == 0) {
11365 if (cfg->verbose_level > 3)
11366 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
11368 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
11369 CHECK_CFG_EXCEPTION;
11378 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
11379 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
11383 inline_costs += 10 * num_calls++;
11386 case CEE_LDVIRTFTN: {
11387 MonoInst *args [2];
11391 n = read32 (ip + 2);
11392 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
11393 if (!cmethod || mono_loader_get_last_error ())
11395 mono_class_init (cmethod->klass);
11397 context_used = mini_method_check_context_used (cfg, cmethod);
11399 if (mono_security_cas_enabled ()) {
11400 if (check_linkdemand (cfg, method, cmethod))
11401 INLINE_FAILURE ("linkdemand");
11402 CHECK_CFG_EXCEPTION;
11403 } else if (mono_security_core_clr_enabled ()) {
11404 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
11410 args [1] = emit_get_rgctx_method (cfg, context_used,
11411 cmethod, MONO_RGCTX_INFO_METHOD);
11414 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
11416 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
11419 inline_costs += 10 * num_calls++;
11423 CHECK_STACK_OVF (1);
11425 n = read16 (ip + 2);
11427 EMIT_NEW_ARGLOAD (cfg, ins, n);
11432 CHECK_STACK_OVF (1);
11434 n = read16 (ip + 2);
11436 NEW_ARGLOADA (cfg, ins, n);
11437 MONO_ADD_INS (cfg->cbb, ins);
11445 n = read16 (ip + 2);
11447 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
11449 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
11453 CHECK_STACK_OVF (1);
11455 n = read16 (ip + 2);
11457 EMIT_NEW_LOCLOAD (cfg, ins, n);
11462 unsigned char *tmp_ip;
11463 CHECK_STACK_OVF (1);
11465 n = read16 (ip + 2);
11468 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
11474 EMIT_NEW_LOCLOADA (cfg, ins, n);
11483 n = read16 (ip + 2);
11485 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
11487 emit_stloc_ir (cfg, sp, header, n);
11494 if (sp != stack_start)
11496 if (cfg->method != method)
11498 * Inlining this into a loop in a parent could lead to
11499 * stack overflows which is different behavior than the
11500 * non-inlined case, thus disable inlining in this case.
11502 goto inline_failure;
11504 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
11505 ins->dreg = alloc_preg (cfg);
11506 ins->sreg1 = sp [0]->dreg;
11507 ins->type = STACK_PTR;
11508 MONO_ADD_INS (cfg->cbb, ins);
11510 cfg->flags |= MONO_CFG_HAS_ALLOCA;
11512 ins->flags |= MONO_INST_INIT;
11517 case CEE_ENDFILTER: {
11518 MonoExceptionClause *clause, *nearest;
11519 int cc, nearest_num;
11523 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
11525 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
11526 ins->sreg1 = (*sp)->dreg;
11527 MONO_ADD_INS (bblock, ins);
11528 start_new_bblock = 1;
11533 for (cc = 0; cc < header->num_clauses; ++cc) {
11534 clause = &header->clauses [cc];
11535 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
11536 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
11537 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
11542 g_assert (nearest);
11543 if ((ip - header->code) != nearest->handler_offset)
11548 case CEE_UNALIGNED_:
11549 ins_flag |= MONO_INST_UNALIGNED;
11550 /* FIXME: record alignment? we can assume 1 for now */
11554 case CEE_VOLATILE_:
11555 ins_flag |= MONO_INST_VOLATILE;
11559 ins_flag |= MONO_INST_TAILCALL;
11560 cfg->flags |= MONO_CFG_HAS_TAIL;
11561 /* Can't inline tail calls at this time */
11562 inline_costs += 100000;
11569 token = read32 (ip + 2);
11570 klass = mini_get_class (method, token, generic_context);
11571 CHECK_TYPELOAD (klass);
11572 if (generic_class_is_reference_type (cfg, klass))
11573 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
11575 mini_emit_initobj (cfg, *sp, NULL, klass);
11579 case CEE_CONSTRAINED_:
11581 token = read32 (ip + 2);
11582 constrained_call = mini_get_class (method, token, generic_context);
11583 CHECK_TYPELOAD (constrained_call);
11587 case CEE_INITBLK: {
11588 MonoInst *iargs [3];
11592 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
11593 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
11594 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
11595 /* emit_memset only works when val == 0 */
11596 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
11598 iargs [0] = sp [0];
11599 iargs [1] = sp [1];
11600 iargs [2] = sp [2];
11601 if (ip [1] == CEE_CPBLK) {
11602 MonoMethod *memcpy_method = get_memcpy_method ();
11603 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11605 MonoMethod *memset_method = get_memset_method ();
11606 mono_emit_method_call (cfg, memset_method, iargs, NULL);
11616 ins_flag |= MONO_INST_NOTYPECHECK;
11618 ins_flag |= MONO_INST_NORANGECHECK;
11619 /* we ignore the no-nullcheck for now since we
11620 * really do it explicitly only when doing callvirt->call
11624 case CEE_RETHROW: {
11626 int handler_offset = -1;
11628 for (i = 0; i < header->num_clauses; ++i) {
11629 MonoExceptionClause *clause = &header->clauses [i];
11630 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
11631 handler_offset = clause->handler_offset;
11636 bblock->flags |= BB_EXCEPTION_UNSAFE;
11638 g_assert (handler_offset != -1);
11640 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
11641 MONO_INST_NEW (cfg, ins, OP_RETHROW);
11642 ins->sreg1 = load->dreg;
11643 MONO_ADD_INS (bblock, ins);
11645 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11646 MONO_ADD_INS (bblock, ins);
11649 link_bblock (cfg, bblock, end_bblock);
11650 start_new_bblock = 1;
11658 GSHAREDVT_FAILURE (*ip);
11660 CHECK_STACK_OVF (1);
11662 token = read32 (ip + 2);
11663 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic && !generic_context) {
11664 MonoType *type = mono_type_create_from_typespec (image, token);
11665 val = mono_type_size (type, &ialign);
11667 MonoClass *klass = mono_class_get_full (image, token, generic_context);
11668 CHECK_TYPELOAD (klass);
11669 mono_class_init (klass);
11670 val = mono_type_size (&klass->byval_arg, &ialign);
11672 EMIT_NEW_ICONST (cfg, ins, val);
11677 case CEE_REFANYTYPE: {
11678 MonoInst *src_var, *src;
11680 GSHAREDVT_FAILURE (*ip);
11686 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11688 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11689 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11690 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
11695 case CEE_READONLY_:
11708 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
11718 g_warning ("opcode 0x%02x not handled", *ip);
11722 if (start_new_bblock != 1)
11725 bblock->cil_length = ip - bblock->cil_code;
11726 if (bblock->next_bb) {
11727 /* This could already be set because of inlining, #693905 */
11728 MonoBasicBlock *bb = bblock;
11730 while (bb->next_bb)
11732 bb->next_bb = end_bblock;
11734 bblock->next_bb = end_bblock;
11737 if (cfg->method == method && cfg->domainvar) {
11739 MonoInst *get_domain;
11741 cfg->cbb = init_localsbb;
11743 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
11744 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
11747 get_domain->dreg = alloc_preg (cfg);
11748 MONO_ADD_INS (cfg->cbb, get_domain);
11750 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
11751 MONO_ADD_INS (cfg->cbb, store);
11754 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
11755 if (cfg->compile_aot)
11756 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
11757 mono_get_got_var (cfg);
11760 if (cfg->method == method && cfg->got_var)
11761 mono_emit_load_got_addr (cfg);
11766 cfg->cbb = init_localsbb;
11768 for (i = 0; i < header->num_locals; ++i) {
11769 MonoType *ptype = header->locals [i];
11770 int t = ptype->type;
11771 dreg = cfg->locals [i]->dreg;
11773 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
11774 t = mono_class_enum_basetype (ptype->data.klass)->type;
11775 if (ptype->byref) {
11776 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
11777 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
11778 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
11779 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
11780 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
11781 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
11782 MONO_INST_NEW (cfg, ins, OP_R8CONST);
11783 ins->type = STACK_R8;
11784 ins->inst_p0 = (void*)&r8_0;
11785 ins->dreg = alloc_dreg (cfg, STACK_R8);
11786 MONO_ADD_INS (init_localsbb, ins);
11787 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
11788 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
11789 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
11790 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
11791 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, ptype)) {
11792 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
11794 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
11799 if (cfg->init_ref_vars && cfg->method == method) {
11800 /* Emit initialization for ref vars */
11801 // FIXME: Avoid duplication initialization for IL locals.
11802 for (i = 0; i < cfg->num_varinfo; ++i) {
11803 MonoInst *ins = cfg->varinfo [i];
11805 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
11806 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
11811 MonoBasicBlock *bb;
11814 * Make seq points at backward branch targets interruptable.
11816 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
11817 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
11818 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
11821 /* Add a sequence point for method entry/exit events */
11823 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
11824 MONO_ADD_INS (init_localsbb, ins);
11825 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
11826 MONO_ADD_INS (cfg->bb_exit, ins);
11830 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
11831 * the code they refer to was dead (#11880).
11833 if (sym_seq_points) {
11834 for (i = 0; i < header->code_size; ++i) {
11835 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
11838 NEW_SEQ_POINT (cfg, ins, i, FALSE);
11839 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
11846 if (cfg->method == method) {
11847 MonoBasicBlock *bb;
11848 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
11849 bb->region = mono_find_block_region (cfg, bb->real_offset);
11851 mono_create_spvar_for_region (cfg, bb->region);
11852 if (cfg->verbose_level > 2)
11853 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
11857 g_slist_free (class_inits);
11858 dont_inline = g_list_remove (dont_inline, method);
11860 if (inline_costs < 0) {
11863 /* Method is too large */
11864 mname = mono_method_full_name (method, TRUE);
11865 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
11866 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
11868 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
11869 mono_basic_block_free (original_bb);
11873 if ((cfg->verbose_level > 2) && (cfg->method == method))
11874 mono_print_code (cfg, "AFTER METHOD-TO-IR");
11876 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
11877 mono_basic_block_free (original_bb);
11878 return inline_costs;
11881 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
11888 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
11892 set_exception_type_from_invalid_il (cfg, method, ip);
11896 g_slist_free (class_inits);
11897 mono_basic_block_free (original_bb);
11898 dont_inline = g_list_remove (dont_inline, method);
11899 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
11904 store_membase_reg_to_store_membase_imm (int opcode)
11907 case OP_STORE_MEMBASE_REG:
11908 return OP_STORE_MEMBASE_IMM;
11909 case OP_STOREI1_MEMBASE_REG:
11910 return OP_STOREI1_MEMBASE_IMM;
11911 case OP_STOREI2_MEMBASE_REG:
11912 return OP_STOREI2_MEMBASE_IMM;
11913 case OP_STOREI4_MEMBASE_REG:
11914 return OP_STOREI4_MEMBASE_IMM;
11915 case OP_STOREI8_MEMBASE_REG:
11916 return OP_STOREI8_MEMBASE_IMM;
11918 g_assert_not_reached ();
11925 mono_op_to_op_imm (int opcode)
11929 return OP_IADD_IMM;
11931 return OP_ISUB_IMM;
11933 return OP_IDIV_IMM;
11935 return OP_IDIV_UN_IMM;
11937 return OP_IREM_IMM;
11939 return OP_IREM_UN_IMM;
11941 return OP_IMUL_IMM;
11943 return OP_IAND_IMM;
11947 return OP_IXOR_IMM;
11949 return OP_ISHL_IMM;
11951 return OP_ISHR_IMM;
11953 return OP_ISHR_UN_IMM;
11956 return OP_LADD_IMM;
11958 return OP_LSUB_IMM;
11960 return OP_LAND_IMM;
11964 return OP_LXOR_IMM;
11966 return OP_LSHL_IMM;
11968 return OP_LSHR_IMM;
11970 return OP_LSHR_UN_IMM;
11973 return OP_COMPARE_IMM;
11975 return OP_ICOMPARE_IMM;
11977 return OP_LCOMPARE_IMM;
11979 case OP_STORE_MEMBASE_REG:
11980 return OP_STORE_MEMBASE_IMM;
11981 case OP_STOREI1_MEMBASE_REG:
11982 return OP_STOREI1_MEMBASE_IMM;
11983 case OP_STOREI2_MEMBASE_REG:
11984 return OP_STOREI2_MEMBASE_IMM;
11985 case OP_STOREI4_MEMBASE_REG:
11986 return OP_STOREI4_MEMBASE_IMM;
11988 #if defined(TARGET_X86) || defined (TARGET_AMD64)
11990 return OP_X86_PUSH_IMM;
11991 case OP_X86_COMPARE_MEMBASE_REG:
11992 return OP_X86_COMPARE_MEMBASE_IMM;
11994 #if defined(TARGET_AMD64)
11995 case OP_AMD64_ICOMPARE_MEMBASE_REG:
11996 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
11998 case OP_VOIDCALL_REG:
11999 return OP_VOIDCALL;
12007 return OP_LOCALLOC_IMM;
12014 ldind_to_load_membase (int opcode)
12018 return OP_LOADI1_MEMBASE;
12020 return OP_LOADU1_MEMBASE;
12022 return OP_LOADI2_MEMBASE;
12024 return OP_LOADU2_MEMBASE;
12026 return OP_LOADI4_MEMBASE;
12028 return OP_LOADU4_MEMBASE;
12030 return OP_LOAD_MEMBASE;
12031 case CEE_LDIND_REF:
12032 return OP_LOAD_MEMBASE;
12034 return OP_LOADI8_MEMBASE;
12036 return OP_LOADR4_MEMBASE;
12038 return OP_LOADR8_MEMBASE;
12040 g_assert_not_reached ();
12047 stind_to_store_membase (int opcode)
12051 return OP_STOREI1_MEMBASE_REG;
12053 return OP_STOREI2_MEMBASE_REG;
12055 return OP_STOREI4_MEMBASE_REG;
12057 case CEE_STIND_REF:
12058 return OP_STORE_MEMBASE_REG;
12060 return OP_STOREI8_MEMBASE_REG;
12062 return OP_STORER4_MEMBASE_REG;
12064 return OP_STORER8_MEMBASE_REG;
12066 g_assert_not_reached ();
12073 mono_load_membase_to_load_mem (int opcode)
12075 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
12076 #if defined(TARGET_X86) || defined(TARGET_AMD64)
12078 case OP_LOAD_MEMBASE:
12079 return OP_LOAD_MEM;
12080 case OP_LOADU1_MEMBASE:
12081 return OP_LOADU1_MEM;
12082 case OP_LOADU2_MEMBASE:
12083 return OP_LOADU2_MEM;
12084 case OP_LOADI4_MEMBASE:
12085 return OP_LOADI4_MEM;
12086 case OP_LOADU4_MEMBASE:
12087 return OP_LOADU4_MEM;
12088 #if SIZEOF_REGISTER == 8
12089 case OP_LOADI8_MEMBASE:
12090 return OP_LOADI8_MEM;
12099 op_to_op_dest_membase (int store_opcode, int opcode)
12101 #if defined(TARGET_X86)
12102 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
12107 return OP_X86_ADD_MEMBASE_REG;
12109 return OP_X86_SUB_MEMBASE_REG;
12111 return OP_X86_AND_MEMBASE_REG;
12113 return OP_X86_OR_MEMBASE_REG;
12115 return OP_X86_XOR_MEMBASE_REG;
12118 return OP_X86_ADD_MEMBASE_IMM;
12121 return OP_X86_SUB_MEMBASE_IMM;
12124 return OP_X86_AND_MEMBASE_IMM;
12127 return OP_X86_OR_MEMBASE_IMM;
12130 return OP_X86_XOR_MEMBASE_IMM;
12136 #if defined(TARGET_AMD64)
12137 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
12142 return OP_X86_ADD_MEMBASE_REG;
12144 return OP_X86_SUB_MEMBASE_REG;
12146 return OP_X86_AND_MEMBASE_REG;
12148 return OP_X86_OR_MEMBASE_REG;
12150 return OP_X86_XOR_MEMBASE_REG;
12152 return OP_X86_ADD_MEMBASE_IMM;
12154 return OP_X86_SUB_MEMBASE_IMM;
12156 return OP_X86_AND_MEMBASE_IMM;
12158 return OP_X86_OR_MEMBASE_IMM;
12160 return OP_X86_XOR_MEMBASE_IMM;
12162 return OP_AMD64_ADD_MEMBASE_REG;
12164 return OP_AMD64_SUB_MEMBASE_REG;
12166 return OP_AMD64_AND_MEMBASE_REG;
12168 return OP_AMD64_OR_MEMBASE_REG;
12170 return OP_AMD64_XOR_MEMBASE_REG;
12173 return OP_AMD64_ADD_MEMBASE_IMM;
12176 return OP_AMD64_SUB_MEMBASE_IMM;
12179 return OP_AMD64_AND_MEMBASE_IMM;
12182 return OP_AMD64_OR_MEMBASE_IMM;
12185 return OP_AMD64_XOR_MEMBASE_IMM;
12195 op_to_op_store_membase (int store_opcode, int opcode)
12197 #if defined(TARGET_X86) || defined(TARGET_AMD64)
12200 if (store_opcode == OP_STOREI1_MEMBASE_REG)
12201 return OP_X86_SETEQ_MEMBASE;
12203 if (store_opcode == OP_STOREI1_MEMBASE_REG)
12204 return OP_X86_SETNE_MEMBASE;
12212 op_to_op_src1_membase (int load_opcode, int opcode)
12215 /* FIXME: This has sign extension issues */
12217 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
12218 return OP_X86_COMPARE_MEMBASE8_IMM;
12221 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
12226 return OP_X86_PUSH_MEMBASE;
12227 case OP_COMPARE_IMM:
12228 case OP_ICOMPARE_IMM:
12229 return OP_X86_COMPARE_MEMBASE_IMM;
12232 return OP_X86_COMPARE_MEMBASE_REG;
12236 #ifdef TARGET_AMD64
12237 /* FIXME: This has sign extension issues */
12239 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
12240 return OP_X86_COMPARE_MEMBASE8_IMM;
12245 #ifdef __mono_ilp32__
12246 if (load_opcode == OP_LOADI8_MEMBASE)
12248 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12250 return OP_X86_PUSH_MEMBASE;
12252 /* FIXME: This only works for 32 bit immediates
12253 case OP_COMPARE_IMM:
12254 case OP_LCOMPARE_IMM:
12255 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12256 return OP_AMD64_COMPARE_MEMBASE_IMM;
12258 case OP_ICOMPARE_IMM:
12259 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
12260 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
12264 #ifdef __mono_ilp32__
12265 if (load_opcode == OP_LOAD_MEMBASE)
12266 return OP_AMD64_ICOMPARE_MEMBASE_REG;
12267 if (load_opcode == OP_LOADI8_MEMBASE)
12269 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12271 return OP_AMD64_COMPARE_MEMBASE_REG;
12274 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
12275 return OP_AMD64_ICOMPARE_MEMBASE_REG;
12284 op_to_op_src2_membase (int load_opcode, int opcode)
12287 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
12293 return OP_X86_COMPARE_REG_MEMBASE;
12295 return OP_X86_ADD_REG_MEMBASE;
12297 return OP_X86_SUB_REG_MEMBASE;
12299 return OP_X86_AND_REG_MEMBASE;
12301 return OP_X86_OR_REG_MEMBASE;
12303 return OP_X86_XOR_REG_MEMBASE;
12307 #ifdef TARGET_AMD64
12308 #ifdef __mono_ilp32__
12309 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
12311 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
12315 return OP_AMD64_ICOMPARE_REG_MEMBASE;
12317 return OP_X86_ADD_REG_MEMBASE;
12319 return OP_X86_SUB_REG_MEMBASE;
12321 return OP_X86_AND_REG_MEMBASE;
12323 return OP_X86_OR_REG_MEMBASE;
12325 return OP_X86_XOR_REG_MEMBASE;
12327 #ifdef __mono_ilp32__
12328 } else if (load_opcode == OP_LOADI8_MEMBASE) {
12330 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
12335 return OP_AMD64_COMPARE_REG_MEMBASE;
12337 return OP_AMD64_ADD_REG_MEMBASE;
12339 return OP_AMD64_SUB_REG_MEMBASE;
12341 return OP_AMD64_AND_REG_MEMBASE;
12343 return OP_AMD64_OR_REG_MEMBASE;
12345 return OP_AMD64_XOR_REG_MEMBASE;
12354 mono_op_to_op_imm_noemul (int opcode)
12357 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
12363 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
12370 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
12375 return mono_op_to_op_imm (opcode);
12380 * mono_handle_global_vregs:
12382 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
12386 mono_handle_global_vregs (MonoCompile *cfg)
12388 gint32 *vreg_to_bb;
12389 MonoBasicBlock *bb;
12392 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
12394 #ifdef MONO_ARCH_SIMD_INTRINSICS
12395 if (cfg->uses_simd_intrinsics)
12396 mono_simd_simplify_indirection (cfg);
12399 /* Find local vregs used in more than one bb */
12400 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12401 MonoInst *ins = bb->code;
12402 int block_num = bb->block_num;
12404 if (cfg->verbose_level > 2)
12405 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
12408 for (; ins; ins = ins->next) {
12409 const char *spec = INS_INFO (ins->opcode);
12410 int regtype = 0, regindex;
12413 if (G_UNLIKELY (cfg->verbose_level > 2))
12414 mono_print_ins (ins);
12416 g_assert (ins->opcode >= MONO_CEE_LAST);
12418 for (regindex = 0; regindex < 4; regindex ++) {
12421 if (regindex == 0) {
12422 regtype = spec [MONO_INST_DEST];
12423 if (regtype == ' ')
12426 } else if (regindex == 1) {
12427 regtype = spec [MONO_INST_SRC1];
12428 if (regtype == ' ')
12431 } else if (regindex == 2) {
12432 regtype = spec [MONO_INST_SRC2];
12433 if (regtype == ' ')
12436 } else if (regindex == 3) {
12437 regtype = spec [MONO_INST_SRC3];
12438 if (regtype == ' ')
12443 #if SIZEOF_REGISTER == 4
12444 /* In the LLVM case, the long opcodes are not decomposed */
12445 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
12447 * Since some instructions reference the original long vreg,
12448 * and some reference the two component vregs, it is quite hard
12449 * to determine when it needs to be global. So be conservative.
12451 if (!get_vreg_to_inst (cfg, vreg)) {
12452 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
12454 if (cfg->verbose_level > 2)
12455 printf ("LONG VREG R%d made global.\n", vreg);
12459 * Make the component vregs volatile since the optimizations can
12460 * get confused otherwise.
12462 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
12463 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
12467 g_assert (vreg != -1);
12469 prev_bb = vreg_to_bb [vreg];
12470 if (prev_bb == 0) {
12471 /* 0 is a valid block num */
12472 vreg_to_bb [vreg] = block_num + 1;
12473 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
12474 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
12477 if (!get_vreg_to_inst (cfg, vreg)) {
12478 if (G_UNLIKELY (cfg->verbose_level > 2))
12479 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
12483 if (vreg_is_ref (cfg, vreg))
12484 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
12486 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
12489 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
12492 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
12495 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
12498 g_assert_not_reached ();
12502 /* Flag as having been used in more than one bb */
12503 vreg_to_bb [vreg] = -1;
12509 /* If a variable is used in only one bblock, convert it into a local vreg */
12510 for (i = 0; i < cfg->num_varinfo; i++) {
12511 MonoInst *var = cfg->varinfo [i];
12512 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
12514 switch (var->type) {
12520 #if SIZEOF_REGISTER == 8
12523 #if !defined(TARGET_X86)
12524 /* Enabling this screws up the fp stack on x86 */
12527 if (mono_arch_is_soft_float ())
12530 /* Arguments are implicitly global */
12531 /* Putting R4 vars into registers doesn't work currently */
12532 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
12533 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var) {
12535 * Make that the variable's liveness interval doesn't contain a call, since
12536 * that would cause the lvreg to be spilled, making the whole optimization
12539 /* This is too slow for JIT compilation */
12541 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
12543 int def_index, call_index, ins_index;
12544 gboolean spilled = FALSE;
12549 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
12550 const char *spec = INS_INFO (ins->opcode);
12552 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
12553 def_index = ins_index;
12555 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
12556 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
12557 if (call_index > def_index) {
12563 if (MONO_IS_CALL (ins))
12564 call_index = ins_index;
12574 if (G_UNLIKELY (cfg->verbose_level > 2))
12575 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
12576 var->flags |= MONO_INST_IS_DEAD;
12577 cfg->vreg_to_inst [var->dreg] = NULL;
12584 * Compress the varinfo and vars tables so the liveness computation is faster and
12585 * takes up less space.
12588 for (i = 0; i < cfg->num_varinfo; ++i) {
12589 MonoInst *var = cfg->varinfo [i];
12590 if (pos < i && cfg->locals_start == i)
12591 cfg->locals_start = pos;
12592 if (!(var->flags & MONO_INST_IS_DEAD)) {
12594 cfg->varinfo [pos] = cfg->varinfo [i];
12595 cfg->varinfo [pos]->inst_c0 = pos;
12596 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
12597 cfg->vars [pos].idx = pos;
12598 #if SIZEOF_REGISTER == 4
12599 if (cfg->varinfo [pos]->type == STACK_I8) {
12600 /* Modify the two component vars too */
12603 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
12604 var1->inst_c0 = pos;
12605 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
12606 var1->inst_c0 = pos;
12613 cfg->num_varinfo = pos;
12614 if (cfg->locals_start > cfg->num_varinfo)
12615 cfg->locals_start = cfg->num_varinfo;
12619 * mono_spill_global_vars:
12621 * Generate spill code for variables which are not allocated to registers,
12622 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
12623 * code is generated which could be optimized by the local optimization passes.
12626 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
12628 MonoBasicBlock *bb;
12630 int orig_next_vreg;
12631 guint32 *vreg_to_lvreg;
12633 guint32 i, lvregs_len;
12634 gboolean dest_has_lvreg = FALSE;
12635 guint32 stacktypes [128];
12636 MonoInst **live_range_start, **live_range_end;
12637 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
12638 int *gsharedvt_vreg_to_idx = NULL;
12640 *need_local_opts = FALSE;
12642 memset (spec2, 0, sizeof (spec2));
12644 /* FIXME: Move this function to mini.c */
12645 stacktypes ['i'] = STACK_PTR;
12646 stacktypes ['l'] = STACK_I8;
12647 stacktypes ['f'] = STACK_R8;
12648 #ifdef MONO_ARCH_SIMD_INTRINSICS
12649 stacktypes ['x'] = STACK_VTYPE;
12652 #if SIZEOF_REGISTER == 4
12653 /* Create MonoInsts for longs */
12654 for (i = 0; i < cfg->num_varinfo; i++) {
12655 MonoInst *ins = cfg->varinfo [i];
12657 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
12658 switch (ins->type) {
12663 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
12666 g_assert (ins->opcode == OP_REGOFFSET);
12668 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
12670 tree->opcode = OP_REGOFFSET;
12671 tree->inst_basereg = ins->inst_basereg;
12672 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
12674 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
12676 tree->opcode = OP_REGOFFSET;
12677 tree->inst_basereg = ins->inst_basereg;
12678 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
12688 if (cfg->compute_gc_maps) {
12689 /* registers need liveness info even for !non refs */
12690 for (i = 0; i < cfg->num_varinfo; i++) {
12691 MonoInst *ins = cfg->varinfo [i];
12693 if (ins->opcode == OP_REGVAR)
12694 ins->flags |= MONO_INST_GC_TRACK;
12698 if (cfg->gsharedvt) {
12699 gsharedvt_vreg_to_idx = g_new0 (int, cfg->next_vreg);
12701 for (i = 0; i < cfg->num_varinfo; ++i) {
12702 MonoInst *ins = cfg->varinfo [i];
12705 if (mini_is_gsharedvt_variable_type (cfg, ins->inst_vtype)) {
12706 if (i >= cfg->locals_start) {
12708 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
12709 gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
12710 ins->opcode = OP_GSHAREDVT_LOCAL;
12711 ins->inst_imm = idx;
12714 gsharedvt_vreg_to_idx [ins->dreg] = -1;
12715 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
12721 /* FIXME: widening and truncation */
12724 * As an optimization, when a variable allocated to the stack is first loaded into
12725 * an lvreg, we will remember the lvreg and use it the next time instead of loading
12726 * the variable again.
12728 orig_next_vreg = cfg->next_vreg;
12729 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
12730 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
12734 * These arrays contain the first and last instructions accessing a given
12736 * Since we emit bblocks in the same order we process them here, and we
12737 * don't split live ranges, these will precisely describe the live range of
12738 * the variable, i.e. the instruction range where a valid value can be found
12739 * in the variables location.
12740 * The live range is computed using the liveness info computed by the liveness pass.
12741 * We can't use vmv->range, since that is an abstract live range, and we need
12742 * one which is instruction precise.
12743 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
12745 /* FIXME: Only do this if debugging info is requested */
12746 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
12747 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
12748 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
12749 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
12751 /* Add spill loads/stores */
12752 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12755 if (cfg->verbose_level > 2)
12756 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
12758 /* Clear vreg_to_lvreg array */
12759 for (i = 0; i < lvregs_len; i++)
12760 vreg_to_lvreg [lvregs [i]] = 0;
12764 MONO_BB_FOR_EACH_INS (bb, ins) {
12765 const char *spec = INS_INFO (ins->opcode);
12766 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
12767 gboolean store, no_lvreg;
12768 int sregs [MONO_MAX_SRC_REGS];
12770 if (G_UNLIKELY (cfg->verbose_level > 2))
12771 mono_print_ins (ins);
12773 if (ins->opcode == OP_NOP)
12777 * We handle LDADDR here as well, since it can only be decomposed
12778 * when variable addresses are known.
12780 if (ins->opcode == OP_LDADDR) {
12781 MonoInst *var = ins->inst_p0;
12783 if (var->opcode == OP_VTARG_ADDR) {
12784 /* Happens on SPARC/S390 where vtypes are passed by reference */
12785 MonoInst *vtaddr = var->inst_left;
12786 if (vtaddr->opcode == OP_REGVAR) {
12787 ins->opcode = OP_MOVE;
12788 ins->sreg1 = vtaddr->dreg;
12790 else if (var->inst_left->opcode == OP_REGOFFSET) {
12791 ins->opcode = OP_LOAD_MEMBASE;
12792 ins->inst_basereg = vtaddr->inst_basereg;
12793 ins->inst_offset = vtaddr->inst_offset;
12796 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg] < 0) {
12797 /* gsharedvt arg passed by ref */
12798 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
12800 ins->opcode = OP_LOAD_MEMBASE;
12801 ins->inst_basereg = var->inst_basereg;
12802 ins->inst_offset = var->inst_offset;
12803 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg]) {
12804 MonoInst *load, *load2, *load3;
12805 int idx = gsharedvt_vreg_to_idx [var->dreg] - 1;
12806 int reg1, reg2, reg3;
12807 MonoInst *info_var = cfg->gsharedvt_info_var;
12808 MonoInst *locals_var = cfg->gsharedvt_locals_var;
12812 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
12815 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
12817 g_assert (info_var);
12818 g_assert (locals_var);
12820 /* Mark the instruction used to compute the locals var as used */
12821 cfg->gsharedvt_locals_var_ins = NULL;
12823 /* Load the offset */
12824 if (info_var->opcode == OP_REGOFFSET) {
12825 reg1 = alloc_ireg (cfg);
12826 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
12827 } else if (info_var->opcode == OP_REGVAR) {
12829 reg1 = info_var->dreg;
12831 g_assert_not_reached ();
12833 reg2 = alloc_ireg (cfg);
12834 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
12835 /* Load the locals area address */
12836 reg3 = alloc_ireg (cfg);
12837 if (locals_var->opcode == OP_REGOFFSET) {
12838 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
12839 } else if (locals_var->opcode == OP_REGVAR) {
12840 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
12842 g_assert_not_reached ();
12844 /* Compute the address */
12845 ins->opcode = OP_PADD;
12849 mono_bblock_insert_before_ins (bb, ins, load3);
12850 mono_bblock_insert_before_ins (bb, load3, load2);
12852 mono_bblock_insert_before_ins (bb, load2, load);
12854 g_assert (var->opcode == OP_REGOFFSET);
12856 ins->opcode = OP_ADD_IMM;
12857 ins->sreg1 = var->inst_basereg;
12858 ins->inst_imm = var->inst_offset;
12861 *need_local_opts = TRUE;
12862 spec = INS_INFO (ins->opcode);
12865 if (ins->opcode < MONO_CEE_LAST) {
12866 mono_print_ins (ins);
12867 g_assert_not_reached ();
12871 * Store opcodes have destbasereg in the dreg, but in reality, it is an
12875 if (MONO_IS_STORE_MEMBASE (ins)) {
12876 tmp_reg = ins->dreg;
12877 ins->dreg = ins->sreg2;
12878 ins->sreg2 = tmp_reg;
12881 spec2 [MONO_INST_DEST] = ' ';
12882 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
12883 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
12884 spec2 [MONO_INST_SRC3] = ' ';
12886 } else if (MONO_IS_STORE_MEMINDEX (ins))
12887 g_assert_not_reached ();
12892 if (G_UNLIKELY (cfg->verbose_level > 2)) {
12893 printf ("\t %.3s %d", spec, ins->dreg);
12894 num_sregs = mono_inst_get_src_registers (ins, sregs);
12895 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
12896 printf (" %d", sregs [srcindex]);
12903 regtype = spec [MONO_INST_DEST];
12904 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
12907 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
12908 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
12909 MonoInst *store_ins;
12911 MonoInst *def_ins = ins;
12912 int dreg = ins->dreg; /* The original vreg */
12914 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
12916 if (var->opcode == OP_REGVAR) {
12917 ins->dreg = var->dreg;
12918 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
12920 * Instead of emitting a load+store, use a _membase opcode.
12922 g_assert (var->opcode == OP_REGOFFSET);
12923 if (ins->opcode == OP_MOVE) {
12927 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
12928 ins->inst_basereg = var->inst_basereg;
12929 ins->inst_offset = var->inst_offset;
12932 spec = INS_INFO (ins->opcode);
12936 g_assert (var->opcode == OP_REGOFFSET);
12938 prev_dreg = ins->dreg;
12940 /* Invalidate any previous lvreg for this vreg */
12941 vreg_to_lvreg [ins->dreg] = 0;
12945 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
12947 store_opcode = OP_STOREI8_MEMBASE_REG;
12950 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
12952 #if SIZEOF_REGISTER != 8
12953 if (regtype == 'l') {
12954 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
12955 mono_bblock_insert_after_ins (bb, ins, store_ins);
12956 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
12957 mono_bblock_insert_after_ins (bb, ins, store_ins);
12958 def_ins = store_ins;
12963 g_assert (store_opcode != OP_STOREV_MEMBASE);
12965 /* Try to fuse the store into the instruction itself */
12966 /* FIXME: Add more instructions */
12967 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
12968 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
12969 ins->inst_imm = ins->inst_c0;
12970 ins->inst_destbasereg = var->inst_basereg;
12971 ins->inst_offset = var->inst_offset;
12972 spec = INS_INFO (ins->opcode);
12973 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
12974 ins->opcode = store_opcode;
12975 ins->inst_destbasereg = var->inst_basereg;
12976 ins->inst_offset = var->inst_offset;
12980 tmp_reg = ins->dreg;
12981 ins->dreg = ins->sreg2;
12982 ins->sreg2 = tmp_reg;
12985 spec2 [MONO_INST_DEST] = ' ';
12986 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
12987 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
12988 spec2 [MONO_INST_SRC3] = ' ';
12990 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
12991 // FIXME: The backends expect the base reg to be in inst_basereg
12992 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
12994 ins->inst_basereg = var->inst_basereg;
12995 ins->inst_offset = var->inst_offset;
12996 spec = INS_INFO (ins->opcode);
12998 /* printf ("INS: "); mono_print_ins (ins); */
12999 /* Create a store instruction */
13000 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
13002 /* Insert it after the instruction */
13003 mono_bblock_insert_after_ins (bb, ins, store_ins);
13005 def_ins = store_ins;
13008 * We can't assign ins->dreg to var->dreg here, since the
13009 * sregs could use it. So set a flag, and do it after
13012 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
13013 dest_has_lvreg = TRUE;
13018 if (def_ins && !live_range_start [dreg]) {
13019 live_range_start [dreg] = def_ins;
13020 live_range_start_bb [dreg] = bb;
13023 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
13026 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
13027 tmp->inst_c1 = dreg;
13028 mono_bblock_insert_after_ins (bb, def_ins, tmp);
13035 num_sregs = mono_inst_get_src_registers (ins, sregs);
13036 for (srcindex = 0; srcindex < 3; ++srcindex) {
13037 regtype = spec [MONO_INST_SRC1 + srcindex];
13038 sreg = sregs [srcindex];
13040 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
13041 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
13042 MonoInst *var = get_vreg_to_inst (cfg, sreg);
13043 MonoInst *use_ins = ins;
13044 MonoInst *load_ins;
13045 guint32 load_opcode;
13047 if (var->opcode == OP_REGVAR) {
13048 sregs [srcindex] = var->dreg;
13049 //mono_inst_set_src_registers (ins, sregs);
13050 live_range_end [sreg] = use_ins;
13051 live_range_end_bb [sreg] = bb;
13053 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
13056 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
13057 /* var->dreg is a hreg */
13058 tmp->inst_c1 = sreg;
13059 mono_bblock_insert_after_ins (bb, ins, tmp);
13065 g_assert (var->opcode == OP_REGOFFSET);
13067 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
13069 g_assert (load_opcode != OP_LOADV_MEMBASE);
13071 if (vreg_to_lvreg [sreg]) {
13072 g_assert (vreg_to_lvreg [sreg] != -1);
13074 /* The variable is already loaded to an lvreg */
13075 if (G_UNLIKELY (cfg->verbose_level > 2))
13076 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
13077 sregs [srcindex] = vreg_to_lvreg [sreg];
13078 //mono_inst_set_src_registers (ins, sregs);
13082 /* Try to fuse the load into the instruction */
13083 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
13084 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
13085 sregs [0] = var->inst_basereg;
13086 //mono_inst_set_src_registers (ins, sregs);
13087 ins->inst_offset = var->inst_offset;
13088 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
13089 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
13090 sregs [1] = var->inst_basereg;
13091 //mono_inst_set_src_registers (ins, sregs);
13092 ins->inst_offset = var->inst_offset;
13094 if (MONO_IS_REAL_MOVE (ins)) {
13095 ins->opcode = OP_NOP;
13098 //printf ("%d ", srcindex); mono_print_ins (ins);
13100 sreg = alloc_dreg (cfg, stacktypes [regtype]);
13102 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
13103 if (var->dreg == prev_dreg) {
13105 * sreg refers to the value loaded by the load
13106 * emitted below, but we need to use ins->dreg
13107 * since it refers to the store emitted earlier.
13111 g_assert (sreg != -1);
13112 vreg_to_lvreg [var->dreg] = sreg;
13113 g_assert (lvregs_len < 1024);
13114 lvregs [lvregs_len ++] = var->dreg;
13118 sregs [srcindex] = sreg;
13119 //mono_inst_set_src_registers (ins, sregs);
13121 #if SIZEOF_REGISTER != 8
13122 if (regtype == 'l') {
13123 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
13124 mono_bblock_insert_before_ins (bb, ins, load_ins);
13125 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
13126 mono_bblock_insert_before_ins (bb, ins, load_ins);
13127 use_ins = load_ins;
13132 #if SIZEOF_REGISTER == 4
13133 g_assert (load_opcode != OP_LOADI8_MEMBASE);
13135 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
13136 mono_bblock_insert_before_ins (bb, ins, load_ins);
13137 use_ins = load_ins;
13141 if (var->dreg < orig_next_vreg) {
13142 live_range_end [var->dreg] = use_ins;
13143 live_range_end_bb [var->dreg] = bb;
13146 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
13149 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
13150 tmp->inst_c1 = var->dreg;
13151 mono_bblock_insert_after_ins (bb, ins, tmp);
13155 mono_inst_set_src_registers (ins, sregs);
13157 if (dest_has_lvreg) {
13158 g_assert (ins->dreg != -1);
13159 vreg_to_lvreg [prev_dreg] = ins->dreg;
13160 g_assert (lvregs_len < 1024);
13161 lvregs [lvregs_len ++] = prev_dreg;
13162 dest_has_lvreg = FALSE;
13166 tmp_reg = ins->dreg;
13167 ins->dreg = ins->sreg2;
13168 ins->sreg2 = tmp_reg;
13171 if (MONO_IS_CALL (ins)) {
13172 /* Clear vreg_to_lvreg array */
13173 for (i = 0; i < lvregs_len; i++)
13174 vreg_to_lvreg [lvregs [i]] = 0;
13176 } else if (ins->opcode == OP_NOP) {
13178 MONO_INST_NULLIFY_SREGS (ins);
13181 if (cfg->verbose_level > 2)
13182 mono_print_ins_index (1, ins);
13185 /* Extend the live range based on the liveness info */
13186 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
13187 for (i = 0; i < cfg->num_varinfo; i ++) {
13188 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
13190 if (vreg_is_volatile (cfg, vi->vreg))
13191 /* The liveness info is incomplete */
13194 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
13195 /* Live from at least the first ins of this bb */
13196 live_range_start [vi->vreg] = bb->code;
13197 live_range_start_bb [vi->vreg] = bb;
13200 if (mono_bitset_test_fast (bb->live_out_set, i)) {
13201 /* Live at least until the last ins of this bb */
13202 live_range_end [vi->vreg] = bb->last_ins;
13203 live_range_end_bb [vi->vreg] = bb;
13209 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
13211 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
13212 * by storing the current native offset into MonoMethodVar->live_range_start/end.
13214 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
13215 for (i = 0; i < cfg->num_varinfo; ++i) {
13216 int vreg = MONO_VARINFO (cfg, i)->vreg;
13219 if (live_range_start [vreg]) {
13220 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
13222 ins->inst_c1 = vreg;
13223 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
13225 if (live_range_end [vreg]) {
13226 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
13228 ins->inst_c1 = vreg;
13229 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
13230 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
13232 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
13238 if (cfg->gsharedvt_locals_var_ins) {
13239 /* Nullify if unused */
13240 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
13241 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
13244 g_free (live_range_start);
13245 g_free (live_range_end);
13246 g_free (live_range_start_bb);
13247 g_free (live_range_end_bb);
13252 * - use 'iadd' instead of 'int_add'
13253 * - handling ovf opcodes: decompose in method_to_ir.
13254 * - unify iregs/fregs
13255 * -> partly done, the missing parts are:
13256 * - a more complete unification would involve unifying the hregs as well, so
13257 * code wouldn't need if (fp) all over the place. but that would mean the hregs
13258 * would no longer map to the machine hregs, so the code generators would need to
13259 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
13260 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
13261 * fp/non-fp branches speeds it up by about 15%.
13262 * - use sext/zext opcodes instead of shifts
13264 * - get rid of TEMPLOADs if possible and use vregs instead
13265 * - clean up usage of OP_P/OP_ opcodes
13266 * - cleanup usage of DUMMY_USE
13267 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
13269 * - set the stack type and allocate a dreg in the EMIT_NEW macros
13270 * - get rid of all the <foo>2 stuff when the new JIT is ready.
13271 * - make sure handle_stack_args () is called before the branch is emitted
13272 * - when the new IR is done, get rid of all unused stuff
13273 * - COMPARE/BEQ as separate instructions or unify them ?
13274 * - keeping them separate allows specialized compare instructions like
13275 * compare_imm, compare_membase
13276 * - most back ends unify fp compare+branch, fp compare+ceq
13277 * - integrate mono_save_args into inline_method
13278 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
13279 * - handle long shift opts on 32 bit platforms somehow: they require
13280 * 3 sregs (2 for arg1 and 1 for arg2)
13281 * - make byref a 'normal' type.
13282 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
13283 * variable if needed.
13284 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
13285 * like inline_method.
13286 * - remove inlining restrictions
13287 * - fix LNEG and enable cfold of INEG
13288 * - generalize x86 optimizations like ldelema as a peephole optimization
13289 * - add store_mem_imm for amd64
13290 * - optimize the loading of the interruption flag in the managed->native wrappers
13291 * - avoid special handling of OP_NOP in passes
13292 * - move code inserting instructions into one function/macro.
13293 * - try a coalescing phase after liveness analysis
13294 * - add float -> vreg conversion + local optimizations on !x86
13295 * - figure out how to handle decomposed branches during optimizations, ie.
13296 * compare+branch, op_jump_table+op_br etc.
13297 * - promote RuntimeXHandles to vregs
13298 * - vtype cleanups:
13299 * - add a NEW_VARLOADA_VREG macro
13300 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
13301 * accessing vtype fields.
13302 * - get rid of I8CONST on 64 bit platforms
13303 * - dealing with the increase in code size due to branches created during opcode
13305 * - use extended basic blocks
13306 * - all parts of the JIT
13307 * - handle_global_vregs () && local regalloc
13308 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
13309 * - sources of increase in code size:
13312 * - isinst and castclass
13313 * - lvregs not allocated to global registers even if used multiple times
13314 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
13316 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
13317 * - add all micro optimizations from the old JIT
13318 * - put tree optimizations into the deadce pass
13319 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
13320 * specific function.
13321 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
13322 * fcompare + branchCC.
13323 * - create a helper function for allocating a stack slot, taking into account
13324 * MONO_CFG_HAS_SPILLUP.
13326 * - merge the ia64 switch changes.
13327 * - optimize mono_regstate2_alloc_int/float.
13328 * - fix the pessimistic handling of variables accessed in exception handler blocks.
13329 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
13330 * parts of the tree could be separated by other instructions, killing the tree
13331 * arguments, or stores killing loads etc. Also, should we fold loads into other
13332 * instructions if the result of the load is used multiple times ?
13333 * - make the REM_IMM optimization in mini-x86.c arch-independent.
13334 * - LAST MERGE: 108395.
13335 * - when returning vtypes in registers, generate IR and append it to the end of the
13336 * last bb instead of doing it in the epilog.
13337 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
13345 - When to decompose opcodes:
13346 - earlier: this makes some optimizations hard to implement, since the low level IR
13347 no longer contains the neccessary information. But it is easier to do.
13348 - later: harder to implement, enables more optimizations.
13349 - Branches inside bblocks:
13350 - created when decomposing complex opcodes.
13351 - branches to another bblock: harmless, but not tracked by the branch
13352 optimizations, so need to branch to a label at the start of the bblock.
13353 - branches to inside the same bblock: very problematic, trips up the local
13354 reg allocator. Can be fixed by spitting the current bblock, but that is a
13355 complex operation, since some local vregs can become global vregs etc.
13356 - Local/global vregs:
13357 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
13358 local register allocator.
13359 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
13360 structure, created by mono_create_var (). Assigned to hregs or the stack by
13361 the global register allocator.
13362 - When to do optimizations like alu->alu_imm:
13363 - earlier -> saves work later on since the IR will be smaller/simpler
13364 - later -> can work on more instructions
13365 - Handling of valuetypes:
13366 - When a vtype is pushed on the stack, a new temporary is created, an
13367 instruction computing its address (LDADDR) is emitted and pushed on
13368 the stack. Need to optimize cases when the vtype is used immediately as in
13369 argument passing, stloc etc.
13370 - Instead of the to_end stuff in the old JIT, simply call the function handling
13371 the values on the stack before emitting the last instruction of the bb.
13374 #endif /* DISABLE_JIT */