2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/assembly.h>
38 #include <mono/metadata/attrdefs.h>
39 #include <mono/metadata/loader.h>
40 #include <mono/metadata/tabledefs.h>
41 #include <mono/metadata/class.h>
42 #include <mono/metadata/object.h>
43 #include <mono/metadata/exception.h>
44 #include <mono/metadata/opcodes.h>
45 #include <mono/metadata/mono-endian.h>
46 #include <mono/metadata/tokentype.h>
47 #include <mono/metadata/tabledefs.h>
48 #include <mono/metadata/marshal.h>
49 #include <mono/metadata/debug-helpers.h>
50 #include <mono/metadata/mono-debug.h>
51 #include <mono/metadata/gc-internal.h>
52 #include <mono/metadata/security-manager.h>
53 #include <mono/metadata/threads-types.h>
54 #include <mono/metadata/security-core-clr.h>
55 #include <mono/metadata/monitor.h>
56 #include <mono/metadata/profiler-private.h>
57 #include <mono/metadata/profiler.h>
58 #include <mono/metadata/debug-mono-symfile.h>
59 #include <mono/utils/mono-compiler.h>
60 #include <mono/utils/mono-memory-model.h>
61 #include <mono/metadata/mono-basic-block.h>
68 #include "jit-icalls.h"
70 #include "debugger-agent.h"
72 #define BRANCH_COST 10
73 #define INLINE_LENGTH_LIMIT 20
74 #define INLINE_FAILURE(msg) do { \
75 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE)) { \
76 if (cfg->verbose_level >= 2) \
77 printf ("inline failed: %s\n", msg); \
78 goto inline_failure; \
81 #define CHECK_CFG_EXCEPTION do {\
82 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
85 #define METHOD_ACCESS_FAILURE do { \
86 char *method_fname = mono_method_full_name (method, TRUE); \
87 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
88 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS); \
89 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
90 g_free (method_fname); \
91 g_free (cil_method_fname); \
92 goto exception_exit; \
94 #define FIELD_ACCESS_FAILURE do { \
95 char *method_fname = mono_method_full_name (method, TRUE); \
96 char *field_fname = mono_field_full_name (field); \
97 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS); \
98 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
99 g_free (method_fname); \
100 g_free (field_fname); \
101 goto exception_exit; \
103 #define GENERIC_SHARING_FAILURE(opcode) do { \
104 if (cfg->generic_sharing_context) { \
105 if (cfg->verbose_level > 2) \
106 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
107 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
108 goto exception_exit; \
111 #define GSHAREDVT_FAILURE(opcode) do { \
112 if (cfg->gsharedvt) { \
113 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __FILE__, __LINE__); \
114 if (cfg->verbose_level >= 2) \
115 printf ("%s\n", cfg->exception_message); \
116 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
117 goto exception_exit; \
120 #define OUT_OF_MEMORY_FAILURE do { \
121 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
122 goto exception_exit; \
124 /* Determine whenever 'ins' represents a load of the 'this' argument */
125 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
127 static int ldind_to_load_membase (int opcode);
128 static int stind_to_store_membase (int opcode);
130 int mono_op_to_op_imm (int opcode);
131 int mono_op_to_op_imm_noemul (int opcode);
133 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
135 /* helper methods signatures */
136 static MonoMethodSignature *helper_sig_class_init_trampoline = NULL;
137 static MonoMethodSignature *helper_sig_domain_get = NULL;
138 static MonoMethodSignature *helper_sig_generic_class_init_trampoline = NULL;
139 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm = NULL;
140 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline = NULL;
141 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline = NULL;
142 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm = NULL;
145 * Instruction metadata
153 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
154 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
160 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
165 /* keep in sync with the enum in mini.h */
168 #include "mini-ops.h"
173 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
174 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
176 * This should contain the index of the last sreg + 1. This is not the same
177 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
179 const gint8 ins_sreg_counts[] = {
180 #include "mini-ops.h"
185 #define MONO_INIT_VARINFO(vi,id) do { \
186 (vi)->range.first_use.pos.bid = 0xffff; \
192 mono_inst_set_src_registers (MonoInst *ins, int *regs)
194 ins->sreg1 = regs [0];
195 ins->sreg2 = regs [1];
196 ins->sreg3 = regs [2];
200 mono_alloc_ireg (MonoCompile *cfg)
202 return alloc_ireg (cfg);
206 mono_alloc_freg (MonoCompile *cfg)
208 return alloc_freg (cfg);
212 mono_alloc_preg (MonoCompile *cfg)
214 return alloc_preg (cfg);
218 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
220 return alloc_dreg (cfg, stack_type);
224 * mono_alloc_ireg_ref:
226 * Allocate an IREG, and mark it as holding a GC ref.
229 mono_alloc_ireg_ref (MonoCompile *cfg)
231 return alloc_ireg_ref (cfg);
235 * mono_alloc_ireg_mp:
237 * Allocate an IREG, and mark it as holding a managed pointer.
240 mono_alloc_ireg_mp (MonoCompile *cfg)
242 return alloc_ireg_mp (cfg);
246 * mono_alloc_ireg_copy:
248 * Allocate an IREG with the same GC type as VREG.
251 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
253 if (vreg_is_ref (cfg, vreg))
254 return alloc_ireg_ref (cfg);
255 else if (vreg_is_mp (cfg, vreg))
256 return alloc_ireg_mp (cfg);
258 return alloc_ireg (cfg);
262 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
268 switch (type->type) {
271 case MONO_TYPE_BOOLEAN:
283 case MONO_TYPE_FNPTR:
285 case MONO_TYPE_CLASS:
286 case MONO_TYPE_STRING:
287 case MONO_TYPE_OBJECT:
288 case MONO_TYPE_SZARRAY:
289 case MONO_TYPE_ARRAY:
293 #if SIZEOF_REGISTER == 8
302 case MONO_TYPE_VALUETYPE:
303 if (type->data.klass->enumtype) {
304 type = mono_class_enum_basetype (type->data.klass);
307 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
310 case MONO_TYPE_TYPEDBYREF:
312 case MONO_TYPE_GENERICINST:
313 type = &type->data.generic_class->container_class->byval_arg;
317 g_assert (cfg->generic_sharing_context);
318 if (mini_type_var_is_vt (cfg, type))
323 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
329 mono_print_bb (MonoBasicBlock *bb, const char *msg)
334 printf ("\n%s %d: [IN: ", msg, bb->block_num);
335 for (i = 0; i < bb->in_count; ++i)
336 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
338 for (i = 0; i < bb->out_count; ++i)
339 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
341 for (tree = bb->code; tree; tree = tree->next)
342 mono_print_ins_index (-1, tree);
346 mono_create_helper_signatures (void)
348 helper_sig_domain_get = mono_create_icall_signature ("ptr");
349 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
350 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
351 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
352 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
353 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
354 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
358 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
359 * foo<T> (int i) { ldarg.0; box T; }
361 #define UNVERIFIED do { \
362 if (cfg->gsharedvt) { \
363 if (cfg->verbose_level > 2) \
364 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
365 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
366 goto exception_exit; \
368 if (mini_get_debug_options ()->break_on_unverified) \
374 #define LOAD_ERROR do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto load_error; } while (0)
376 #define TYPE_LOAD_ERROR(klass) do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else { cfg->exception_ptr = klass; goto load_error; } } while (0)
378 #define GET_BBLOCK(cfg,tblock,ip) do { \
379 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
381 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
382 NEW_BBLOCK (cfg, (tblock)); \
383 (tblock)->cil_code = (ip); \
384 ADD_BBLOCK (cfg, (tblock)); \
388 #if defined(TARGET_X86) || defined(TARGET_AMD64)
389 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
390 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
391 (dest)->dreg = alloc_ireg_mp ((cfg)); \
392 (dest)->sreg1 = (sr1); \
393 (dest)->sreg2 = (sr2); \
394 (dest)->inst_imm = (imm); \
395 (dest)->backend.shift_amount = (shift); \
396 MONO_ADD_INS ((cfg)->cbb, (dest)); \
400 #if SIZEOF_REGISTER == 8
401 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
402 /* FIXME: Need to add many more cases */ \
403 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
405 int dr = alloc_preg (cfg); \
406 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
407 (ins)->sreg2 = widen->dreg; \
411 #define ADD_WIDEN_OP(ins, arg1, arg2)
414 #define ADD_BINOP(op) do { \
415 MONO_INST_NEW (cfg, ins, (op)); \
417 ins->sreg1 = sp [0]->dreg; \
418 ins->sreg2 = sp [1]->dreg; \
419 type_from_op (ins, sp [0], sp [1]); \
421 /* Have to insert a widening op */ \
422 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
423 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
424 MONO_ADD_INS ((cfg)->cbb, (ins)); \
425 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
428 #define ADD_UNOP(op) do { \
429 MONO_INST_NEW (cfg, ins, (op)); \
431 ins->sreg1 = sp [0]->dreg; \
432 type_from_op (ins, sp [0], NULL); \
434 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
435 MONO_ADD_INS ((cfg)->cbb, (ins)); \
436 *sp++ = mono_decompose_opcode (cfg, ins); \
439 #define ADD_BINCOND(next_block) do { \
442 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
443 cmp->sreg1 = sp [0]->dreg; \
444 cmp->sreg2 = sp [1]->dreg; \
445 type_from_op (cmp, sp [0], sp [1]); \
447 type_from_op (ins, sp [0], sp [1]); \
448 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
449 GET_BBLOCK (cfg, tblock, target); \
450 link_bblock (cfg, bblock, tblock); \
451 ins->inst_true_bb = tblock; \
452 if ((next_block)) { \
453 link_bblock (cfg, bblock, (next_block)); \
454 ins->inst_false_bb = (next_block); \
455 start_new_bblock = 1; \
457 GET_BBLOCK (cfg, tblock, ip); \
458 link_bblock (cfg, bblock, tblock); \
459 ins->inst_false_bb = tblock; \
460 start_new_bblock = 2; \
462 if (sp != stack_start) { \
463 handle_stack_args (cfg, stack_start, sp - stack_start); \
464 CHECK_UNVERIFIABLE (cfg); \
466 MONO_ADD_INS (bblock, cmp); \
467 MONO_ADD_INS (bblock, ins); \
471 * link_bblock: Links two basic blocks
473 * links two basic blocks in the control flow graph, the 'from'
474 * argument is the starting block and the 'to' argument is the block
475 * the control flow ends to after 'from'.
478 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
480 MonoBasicBlock **newa;
484 if (from->cil_code) {
486 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
488 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
491 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
493 printf ("edge from entry to exit\n");
498 for (i = 0; i < from->out_count; ++i) {
499 if (to == from->out_bb [i]) {
505 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
506 for (i = 0; i < from->out_count; ++i) {
507 newa [i] = from->out_bb [i];
515 for (i = 0; i < to->in_count; ++i) {
516 if (from == to->in_bb [i]) {
522 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
523 for (i = 0; i < to->in_count; ++i) {
524 newa [i] = to->in_bb [i];
533 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
535 link_bblock (cfg, from, to);
539 * mono_find_block_region:
541 * We mark each basic block with a region ID. We use that to avoid BB
542 * optimizations when blocks are in different regions.
545 * A region token that encodes where this region is, and information
546 * about the clause owner for this block.
548 * The region encodes the try/catch/filter clause that owns this block
549 * as well as the type. -1 is a special value that represents a block
550 * that is in none of try/catch/filter.
553 mono_find_block_region (MonoCompile *cfg, int offset)
555 MonoMethodHeader *header = cfg->header;
556 MonoExceptionClause *clause;
559 for (i = 0; i < header->num_clauses; ++i) {
560 clause = &header->clauses [i];
561 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
562 (offset < (clause->handler_offset)))
563 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
565 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
566 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
567 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
568 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
569 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
571 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
574 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
575 return ((i + 1) << 8) | clause->flags;
582 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
584 MonoMethodHeader *header = cfg->header;
585 MonoExceptionClause *clause;
589 for (i = 0; i < header->num_clauses; ++i) {
590 clause = &header->clauses [i];
591 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
592 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
593 if (clause->flags == type)
594 res = g_list_append (res, clause);
601 mono_create_spvar_for_region (MonoCompile *cfg, int region)
605 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
609 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
610 /* prevent it from being register allocated */
611 var->flags |= MONO_INST_INDIRECT;
613 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
617 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
619 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
623 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
627 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
631 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
632 /* prevent it from being register allocated */
633 var->flags |= MONO_INST_INDIRECT;
635 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
641 * Returns the type used in the eval stack when @type is loaded.
642 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
645 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
649 inst->klass = klass = mono_class_from_mono_type (type);
651 inst->type = STACK_MP;
656 switch (type->type) {
658 inst->type = STACK_INV;
662 case MONO_TYPE_BOOLEAN:
668 inst->type = STACK_I4;
673 case MONO_TYPE_FNPTR:
674 inst->type = STACK_PTR;
676 case MONO_TYPE_CLASS:
677 case MONO_TYPE_STRING:
678 case MONO_TYPE_OBJECT:
679 case MONO_TYPE_SZARRAY:
680 case MONO_TYPE_ARRAY:
681 inst->type = STACK_OBJ;
685 inst->type = STACK_I8;
689 inst->type = STACK_R8;
691 case MONO_TYPE_VALUETYPE:
692 if (type->data.klass->enumtype) {
693 type = mono_class_enum_basetype (type->data.klass);
697 inst->type = STACK_VTYPE;
700 case MONO_TYPE_TYPEDBYREF:
701 inst->klass = mono_defaults.typed_reference_class;
702 inst->type = STACK_VTYPE;
704 case MONO_TYPE_GENERICINST:
705 type = &type->data.generic_class->container_class->byval_arg;
709 g_assert (cfg->generic_sharing_context);
710 if (mini_is_gsharedvt_type (cfg, type)) {
711 g_assert (cfg->gsharedvt);
712 inst->type = STACK_VTYPE;
714 inst->type = STACK_OBJ;
718 g_error ("unknown type 0x%02x in eval stack type", type->type);
723 * The following tables are used to quickly validate the IL code in type_from_op ().
726 bin_num_table [STACK_MAX] [STACK_MAX] = {
727 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
728 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
729 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
730 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
731 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
732 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
733 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
734 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
739 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
742 /* reduce the size of this table */
744 bin_int_table [STACK_MAX] [STACK_MAX] = {
745 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
746 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
747 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
748 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
749 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
750 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
751 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
752 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
756 bin_comp_table [STACK_MAX] [STACK_MAX] = {
757 /* Inv i L p F & O vt */
759 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
760 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
761 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
762 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
763 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
764 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
765 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
768 /* reduce the size of this table */
770 shift_table [STACK_MAX] [STACK_MAX] = {
771 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
772 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
773 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
774 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
775 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
776 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
777 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
778 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
782 * Tables to map from the non-specific opcode to the matching
783 * type-specific opcode.
785 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
787 binops_op_map [STACK_MAX] = {
788 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
791 /* handles from CEE_NEG to CEE_CONV_U8 */
793 unops_op_map [STACK_MAX] = {
794 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
797 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
799 ovfops_op_map [STACK_MAX] = {
800 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
803 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
805 ovf2ops_op_map [STACK_MAX] = {
806 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
809 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
811 ovf3ops_op_map [STACK_MAX] = {
812 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
815 /* handles from CEE_BEQ to CEE_BLT_UN */
817 beqops_op_map [STACK_MAX] = {
818 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
821 /* handles from CEE_CEQ to CEE_CLT_UN */
823 ceqops_op_map [STACK_MAX] = {
824 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
828 * Sets ins->type (the type on the eval stack) according to the
829 * type of the opcode and the arguments to it.
830 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
832 * FIXME: this function sets ins->type unconditionally in some cases, but
833 * it should set it to invalid for some types (a conv.x on an object)
836 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
838 switch (ins->opcode) {
845 /* FIXME: check unverifiable args for STACK_MP */
846 ins->type = bin_num_table [src1->type] [src2->type];
847 ins->opcode += binops_op_map [ins->type];
854 ins->type = bin_int_table [src1->type] [src2->type];
855 ins->opcode += binops_op_map [ins->type];
860 ins->type = shift_table [src1->type] [src2->type];
861 ins->opcode += binops_op_map [ins->type];
866 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
867 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
868 ins->opcode = OP_LCOMPARE;
869 else if (src1->type == STACK_R8)
870 ins->opcode = OP_FCOMPARE;
872 ins->opcode = OP_ICOMPARE;
874 case OP_ICOMPARE_IMM:
875 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
876 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
877 ins->opcode = OP_LCOMPARE_IMM;
889 ins->opcode += beqops_op_map [src1->type];
892 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
893 ins->opcode += ceqops_op_map [src1->type];
899 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
900 ins->opcode += ceqops_op_map [src1->type];
904 ins->type = neg_table [src1->type];
905 ins->opcode += unops_op_map [ins->type];
908 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
909 ins->type = src1->type;
911 ins->type = STACK_INV;
912 ins->opcode += unops_op_map [ins->type];
918 ins->type = STACK_I4;
919 ins->opcode += unops_op_map [src1->type];
922 ins->type = STACK_R8;
923 switch (src1->type) {
926 ins->opcode = OP_ICONV_TO_R_UN;
929 ins->opcode = OP_LCONV_TO_R_UN;
933 case CEE_CONV_OVF_I1:
934 case CEE_CONV_OVF_U1:
935 case CEE_CONV_OVF_I2:
936 case CEE_CONV_OVF_U2:
937 case CEE_CONV_OVF_I4:
938 case CEE_CONV_OVF_U4:
939 ins->type = STACK_I4;
940 ins->opcode += ovf3ops_op_map [src1->type];
942 case CEE_CONV_OVF_I_UN:
943 case CEE_CONV_OVF_U_UN:
944 ins->type = STACK_PTR;
945 ins->opcode += ovf2ops_op_map [src1->type];
947 case CEE_CONV_OVF_I1_UN:
948 case CEE_CONV_OVF_I2_UN:
949 case CEE_CONV_OVF_I4_UN:
950 case CEE_CONV_OVF_U1_UN:
951 case CEE_CONV_OVF_U2_UN:
952 case CEE_CONV_OVF_U4_UN:
953 ins->type = STACK_I4;
954 ins->opcode += ovf2ops_op_map [src1->type];
957 ins->type = STACK_PTR;
958 switch (src1->type) {
960 ins->opcode = OP_ICONV_TO_U;
964 #if SIZEOF_VOID_P == 8
965 ins->opcode = OP_LCONV_TO_U;
967 ins->opcode = OP_MOVE;
971 ins->opcode = OP_LCONV_TO_U;
974 ins->opcode = OP_FCONV_TO_U;
980 ins->type = STACK_I8;
981 ins->opcode += unops_op_map [src1->type];
983 case CEE_CONV_OVF_I8:
984 case CEE_CONV_OVF_U8:
985 ins->type = STACK_I8;
986 ins->opcode += ovf3ops_op_map [src1->type];
988 case CEE_CONV_OVF_U8_UN:
989 case CEE_CONV_OVF_I8_UN:
990 ins->type = STACK_I8;
991 ins->opcode += ovf2ops_op_map [src1->type];
995 ins->type = STACK_R8;
996 ins->opcode += unops_op_map [src1->type];
999 ins->type = STACK_R8;
1003 ins->type = STACK_I4;
1004 ins->opcode += ovfops_op_map [src1->type];
1007 case CEE_CONV_OVF_I:
1008 case CEE_CONV_OVF_U:
1009 ins->type = STACK_PTR;
1010 ins->opcode += ovfops_op_map [src1->type];
1013 case CEE_ADD_OVF_UN:
1015 case CEE_MUL_OVF_UN:
1017 case CEE_SUB_OVF_UN:
1018 ins->type = bin_num_table [src1->type] [src2->type];
1019 ins->opcode += ovfops_op_map [src1->type];
1020 if (ins->type == STACK_R8)
1021 ins->type = STACK_INV;
1023 case OP_LOAD_MEMBASE:
1024 ins->type = STACK_PTR;
1026 case OP_LOADI1_MEMBASE:
1027 case OP_LOADU1_MEMBASE:
1028 case OP_LOADI2_MEMBASE:
1029 case OP_LOADU2_MEMBASE:
1030 case OP_LOADI4_MEMBASE:
1031 case OP_LOADU4_MEMBASE:
1032 ins->type = STACK_PTR;
1034 case OP_LOADI8_MEMBASE:
1035 ins->type = STACK_I8;
1037 case OP_LOADR4_MEMBASE:
1038 case OP_LOADR8_MEMBASE:
1039 ins->type = STACK_R8;
1042 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1046 if (ins->type == STACK_MP)
1047 ins->klass = mono_defaults.object_class;
1052 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1058 param_table [STACK_MAX] [STACK_MAX] = {
1063 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1067 switch (args->type) {
1077 for (i = 0; i < sig->param_count; ++i) {
1078 switch (args [i].type) {
1082 if (!sig->params [i]->byref)
1086 if (sig->params [i]->byref)
1088 switch (sig->params [i]->type) {
1089 case MONO_TYPE_CLASS:
1090 case MONO_TYPE_STRING:
1091 case MONO_TYPE_OBJECT:
1092 case MONO_TYPE_SZARRAY:
1093 case MONO_TYPE_ARRAY:
1100 if (sig->params [i]->byref)
1102 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1111 /*if (!param_table [args [i].type] [sig->params [i]->type])
1119 * When we need a pointer to the current domain many times in a method, we
1120 * call mono_domain_get() once and we store the result in a local variable.
1121 * This function returns the variable that represents the MonoDomain*.
1123 inline static MonoInst *
1124 mono_get_domainvar (MonoCompile *cfg)
1126 if (!cfg->domainvar)
1127 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1128 return cfg->domainvar;
1132 * The got_var contains the address of the Global Offset Table when AOT
1136 mono_get_got_var (MonoCompile *cfg)
1138 #ifdef MONO_ARCH_NEED_GOT_VAR
1139 if (!cfg->compile_aot)
1141 if (!cfg->got_var) {
1142 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1144 return cfg->got_var;
1151 mono_get_vtable_var (MonoCompile *cfg)
1153 g_assert (cfg->generic_sharing_context);
1155 if (!cfg->rgctx_var) {
1156 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1157 /* force the var to be stack allocated */
1158 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1161 return cfg->rgctx_var;
1165 type_from_stack_type (MonoInst *ins) {
1166 switch (ins->type) {
1167 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1168 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1169 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1170 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1172 return &ins->klass->this_arg;
1173 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1174 case STACK_VTYPE: return &ins->klass->byval_arg;
1176 g_error ("stack type %d to monotype not handled\n", ins->type);
1181 static G_GNUC_UNUSED int
1182 type_to_stack_type (MonoType *t)
1184 t = mono_type_get_underlying_type (t);
1188 case MONO_TYPE_BOOLEAN:
1191 case MONO_TYPE_CHAR:
1198 case MONO_TYPE_FNPTR:
1200 case MONO_TYPE_CLASS:
1201 case MONO_TYPE_STRING:
1202 case MONO_TYPE_OBJECT:
1203 case MONO_TYPE_SZARRAY:
1204 case MONO_TYPE_ARRAY:
1212 case MONO_TYPE_VALUETYPE:
1213 case MONO_TYPE_TYPEDBYREF:
1215 case MONO_TYPE_GENERICINST:
1216 if (mono_type_generic_inst_is_valuetype (t))
1222 g_assert_not_reached ();
1229 array_access_to_klass (int opcode)
1233 return mono_defaults.byte_class;
1235 return mono_defaults.uint16_class;
1238 return mono_defaults.int_class;
1241 return mono_defaults.sbyte_class;
1244 return mono_defaults.int16_class;
1247 return mono_defaults.int32_class;
1249 return mono_defaults.uint32_class;
1252 return mono_defaults.int64_class;
1255 return mono_defaults.single_class;
1258 return mono_defaults.double_class;
1259 case CEE_LDELEM_REF:
1260 case CEE_STELEM_REF:
1261 return mono_defaults.object_class;
1263 g_assert_not_reached ();
1269 * We try to share variables when possible
1272 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1277 /* inlining can result in deeper stacks */
1278 if (slot >= cfg->header->max_stack)
1279 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1281 pos = ins->type - 1 + slot * STACK_MAX;
1283 switch (ins->type) {
1290 if ((vnum = cfg->intvars [pos]))
1291 return cfg->varinfo [vnum];
1292 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1293 cfg->intvars [pos] = res->inst_c0;
1296 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1302 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1305 * Don't use this if a generic_context is set, since that means AOT can't
1306 * look up the method using just the image+token.
1307 * table == 0 means this is a reference made from a wrapper.
1309 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1310 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1311 jump_info_token->image = image;
1312 jump_info_token->token = token;
1313 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1318 * This function is called to handle items that are left on the evaluation stack
1319 * at basic block boundaries. What happens is that we save the values to local variables
1320 * and we reload them later when first entering the target basic block (with the
1321 * handle_loaded_temps () function).
1322 * A single joint point will use the same variables (stored in the array bb->out_stack or
1323 * bb->in_stack, if the basic block is before or after the joint point).
1325 * This function needs to be called _before_ emitting the last instruction of
1326 * the bb (i.e. before emitting a branch).
1327 * If the stack merge fails at a join point, cfg->unverifiable is set.
1330 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1333 MonoBasicBlock *bb = cfg->cbb;
1334 MonoBasicBlock *outb;
1335 MonoInst *inst, **locals;
1340 if (cfg->verbose_level > 3)
1341 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1342 if (!bb->out_scount) {
1343 bb->out_scount = count;
1344 //printf ("bblock %d has out:", bb->block_num);
1346 for (i = 0; i < bb->out_count; ++i) {
1347 outb = bb->out_bb [i];
1348 /* exception handlers are linked, but they should not be considered for stack args */
1349 if (outb->flags & BB_EXCEPTION_HANDLER)
1351 //printf (" %d", outb->block_num);
1352 if (outb->in_stack) {
1354 bb->out_stack = outb->in_stack;
1360 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1361 for (i = 0; i < count; ++i) {
1363 * try to reuse temps already allocated for this purpouse, if they occupy the same
1364 * stack slot and if they are of the same type.
1365 * This won't cause conflicts since if 'local' is used to
1366 * store one of the values in the in_stack of a bblock, then
1367 * the same variable will be used for the same outgoing stack
1369 * This doesn't work when inlining methods, since the bblocks
1370 * in the inlined methods do not inherit their in_stack from
1371 * the bblock they are inlined to. See bug #58863 for an
1374 if (cfg->inlined_method)
1375 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1377 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1382 for (i = 0; i < bb->out_count; ++i) {
1383 outb = bb->out_bb [i];
1384 /* exception handlers are linked, but they should not be considered for stack args */
1385 if (outb->flags & BB_EXCEPTION_HANDLER)
1387 if (outb->in_scount) {
1388 if (outb->in_scount != bb->out_scount) {
1389 cfg->unverifiable = TRUE;
1392 continue; /* check they are the same locals */
1394 outb->in_scount = count;
1395 outb->in_stack = bb->out_stack;
1398 locals = bb->out_stack;
1400 for (i = 0; i < count; ++i) {
1401 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1402 inst->cil_code = sp [i]->cil_code;
1403 sp [i] = locals [i];
1404 if (cfg->verbose_level > 3)
1405 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1409 * It is possible that the out bblocks already have in_stack assigned, and
1410 * the in_stacks differ. In this case, we will store to all the different
1417 /* Find a bblock which has a different in_stack */
1419 while (bindex < bb->out_count) {
1420 outb = bb->out_bb [bindex];
1421 /* exception handlers are linked, but they should not be considered for stack args */
1422 if (outb->flags & BB_EXCEPTION_HANDLER) {
1426 if (outb->in_stack != locals) {
1427 for (i = 0; i < count; ++i) {
1428 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1429 inst->cil_code = sp [i]->cil_code;
1430 sp [i] = locals [i];
1431 if (cfg->verbose_level > 3)
1432 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1434 locals = outb->in_stack;
1443 /* Emit code which loads interface_offsets [klass->interface_id]
1444 * The array is stored in memory before vtable.
1447 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1449 if (cfg->compile_aot) {
1450 int ioffset_reg = alloc_preg (cfg);
1451 int iid_reg = alloc_preg (cfg);
1453 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1454 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1455 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1458 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1463 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1465 int ibitmap_reg = alloc_preg (cfg);
1466 #ifdef COMPRESSED_INTERFACE_BITMAP
1468 MonoInst *res, *ins;
1469 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1470 MONO_ADD_INS (cfg->cbb, ins);
1472 if (cfg->compile_aot)
1473 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1475 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1476 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1477 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1479 int ibitmap_byte_reg = alloc_preg (cfg);
1481 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1483 if (cfg->compile_aot) {
1484 int iid_reg = alloc_preg (cfg);
1485 int shifted_iid_reg = alloc_preg (cfg);
1486 int ibitmap_byte_address_reg = alloc_preg (cfg);
1487 int masked_iid_reg = alloc_preg (cfg);
1488 int iid_one_bit_reg = alloc_preg (cfg);
1489 int iid_bit_reg = alloc_preg (cfg);
1490 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1491 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1492 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1493 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1494 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1495 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1496 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1497 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1499 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1500 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1506 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1507 * stored in "klass_reg" implements the interface "klass".
1510 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1512 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1516 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1517 * stored in "vtable_reg" implements the interface "klass".
1520 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1522 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1526 * Emit code which checks whenever the interface id of @klass is smaller than
1527 * than the value given by max_iid_reg.
1530 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1531 MonoBasicBlock *false_target)
1533 if (cfg->compile_aot) {
1534 int iid_reg = alloc_preg (cfg);
1535 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1536 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1539 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1541 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1543 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1546 /* Same as above, but obtains max_iid from a vtable */
1548 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1549 MonoBasicBlock *false_target)
1551 int max_iid_reg = alloc_preg (cfg);
1553 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1554 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1557 /* Same as above, but obtains max_iid from a klass */
1559 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1560 MonoBasicBlock *false_target)
1562 int max_iid_reg = alloc_preg (cfg);
1564 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1565 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1569 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1571 int idepth_reg = alloc_preg (cfg);
1572 int stypes_reg = alloc_preg (cfg);
1573 int stype = alloc_preg (cfg);
1575 mono_class_setup_supertypes (klass);
1577 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1578 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1579 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1580 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1582 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1583 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1585 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1586 } else if (cfg->compile_aot) {
1587 int const_reg = alloc_preg (cfg);
1588 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1589 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1591 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1593 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1597 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1599 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1603 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1605 int intf_reg = alloc_preg (cfg);
1607 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1608 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1609 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1611 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1613 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1617 * Variant of the above that takes a register to the class, not the vtable.
1620 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1622 int intf_bit_reg = alloc_preg (cfg);
1624 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1625 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1626 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1628 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1630 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1634 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1637 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1638 } else if (cfg->compile_aot) {
1639 int const_reg = alloc_preg (cfg);
1640 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1641 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1643 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1645 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1649 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1651 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1655 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1657 if (cfg->compile_aot) {
1658 int const_reg = alloc_preg (cfg);
1659 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1660 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1662 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1664 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1668 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1671 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1674 int rank_reg = alloc_preg (cfg);
1675 int eclass_reg = alloc_preg (cfg);
1677 g_assert (!klass_inst);
1678 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1679 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1680 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1681 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1682 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1683 if (klass->cast_class == mono_defaults.object_class) {
1684 int parent_reg = alloc_preg (cfg);
1685 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1686 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1687 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1688 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1689 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1690 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1691 } else if (klass->cast_class == mono_defaults.enum_class) {
1692 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1693 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1694 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1696 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1697 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1700 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1701 /* Check that the object is a vector too */
1702 int bounds_reg = alloc_preg (cfg);
1703 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1704 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1705 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1708 int idepth_reg = alloc_preg (cfg);
1709 int stypes_reg = alloc_preg (cfg);
1710 int stype = alloc_preg (cfg);
1712 mono_class_setup_supertypes (klass);
1714 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1715 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1716 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1717 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1719 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1720 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1721 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1726 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1728 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1732 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1736 g_assert (val == 0);
1741 if ((size <= 4) && (size <= align)) {
1744 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1747 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1750 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1752 #if SIZEOF_REGISTER == 8
1754 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1760 val_reg = alloc_preg (cfg);
1762 if (SIZEOF_REGISTER == 8)
1763 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1765 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1768 /* This could be optimized further if neccesary */
1770 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1777 #if !NO_UNALIGNED_ACCESS
1778 if (SIZEOF_REGISTER == 8) {
1780 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1785 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1793 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1798 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1803 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1810 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1817 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1818 g_assert (size < 10000);
1821 /* This could be optimized further if neccesary */
1823 cur_reg = alloc_preg (cfg);
1824 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1825 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1832 #if !NO_UNALIGNED_ACCESS
1833 if (SIZEOF_REGISTER == 8) {
1835 cur_reg = alloc_preg (cfg);
1836 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1837 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1846 cur_reg = alloc_preg (cfg);
1847 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1848 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1854 cur_reg = alloc_preg (cfg);
1855 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1856 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1862 cur_reg = alloc_preg (cfg);
1863 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1864 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1872 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1875 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1878 type = mini_get_basic_type_from_generic (gsctx, type);
1879 switch (type->type) {
1880 case MONO_TYPE_VOID:
1881 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1884 case MONO_TYPE_BOOLEAN:
1887 case MONO_TYPE_CHAR:
1890 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1894 case MONO_TYPE_FNPTR:
1895 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1896 case MONO_TYPE_CLASS:
1897 case MONO_TYPE_STRING:
1898 case MONO_TYPE_OBJECT:
1899 case MONO_TYPE_SZARRAY:
1900 case MONO_TYPE_ARRAY:
1901 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1904 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1907 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1908 case MONO_TYPE_VALUETYPE:
1909 if (type->data.klass->enumtype) {
1910 type = mono_class_enum_basetype (type->data.klass);
1913 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1914 case MONO_TYPE_TYPEDBYREF:
1915 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1916 case MONO_TYPE_GENERICINST:
1917 type = &type->data.generic_class->container_class->byval_arg;
1920 case MONO_TYPE_MVAR:
1922 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1924 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1930 * target_type_is_incompatible:
1931 * @cfg: MonoCompile context
1933 * Check that the item @arg on the evaluation stack can be stored
1934 * in the target type (can be a local, or field, etc).
1935 * The cfg arg can be used to check if we need verification or just
1938 * Returns: non-0 value if arg can't be stored on a target.
1941 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1943 MonoType *simple_type;
1946 if (target->byref) {
1947 /* FIXME: check that the pointed to types match */
1948 if (arg->type == STACK_MP)
1949 return arg->klass != mono_class_from_mono_type (target);
1950 if (arg->type == STACK_PTR)
1955 simple_type = mono_type_get_underlying_type (target);
1956 switch (simple_type->type) {
1957 case MONO_TYPE_VOID:
1961 case MONO_TYPE_BOOLEAN:
1964 case MONO_TYPE_CHAR:
1967 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1971 /* STACK_MP is needed when setting pinned locals */
1972 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1977 case MONO_TYPE_FNPTR:
1979 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
1980 * in native int. (#688008).
1982 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1985 case MONO_TYPE_CLASS:
1986 case MONO_TYPE_STRING:
1987 case MONO_TYPE_OBJECT:
1988 case MONO_TYPE_SZARRAY:
1989 case MONO_TYPE_ARRAY:
1990 if (arg->type != STACK_OBJ)
1992 /* FIXME: check type compatibility */
1996 if (arg->type != STACK_I8)
2001 if (arg->type != STACK_R8)
2004 case MONO_TYPE_VALUETYPE:
2005 if (arg->type != STACK_VTYPE)
2007 klass = mono_class_from_mono_type (simple_type);
2008 if (klass != arg->klass)
2011 case MONO_TYPE_TYPEDBYREF:
2012 if (arg->type != STACK_VTYPE)
2014 klass = mono_class_from_mono_type (simple_type);
2015 if (klass != arg->klass)
2018 case MONO_TYPE_GENERICINST:
2019 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2020 if (arg->type != STACK_VTYPE)
2022 klass = mono_class_from_mono_type (simple_type);
2023 if (klass != arg->klass)
2027 if (arg->type != STACK_OBJ)
2029 /* FIXME: check type compatibility */
2033 case MONO_TYPE_MVAR:
2034 g_assert (cfg->generic_sharing_context);
2035 if (mini_type_var_is_vt (cfg, simple_type)) {
2036 if (arg->type != STACK_VTYPE)
2039 if (arg->type != STACK_OBJ)
2044 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2050 * Prepare arguments for passing to a function call.
2051 * Return a non-zero value if the arguments can't be passed to the given
2053 * The type checks are not yet complete and some conversions may need
2054 * casts on 32 or 64 bit architectures.
2056 * FIXME: implement this using target_type_is_incompatible ()
2059 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2061 MonoType *simple_type;
2065 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2069 for (i = 0; i < sig->param_count; ++i) {
2070 if (sig->params [i]->byref) {
2071 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2075 simple_type = sig->params [i];
2076 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
2078 switch (simple_type->type) {
2079 case MONO_TYPE_VOID:
2084 case MONO_TYPE_BOOLEAN:
2087 case MONO_TYPE_CHAR:
2090 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2096 case MONO_TYPE_FNPTR:
2097 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2100 case MONO_TYPE_CLASS:
2101 case MONO_TYPE_STRING:
2102 case MONO_TYPE_OBJECT:
2103 case MONO_TYPE_SZARRAY:
2104 case MONO_TYPE_ARRAY:
2105 if (args [i]->type != STACK_OBJ)
2110 if (args [i]->type != STACK_I8)
2115 if (args [i]->type != STACK_R8)
2118 case MONO_TYPE_VALUETYPE:
2119 if (simple_type->data.klass->enumtype) {
2120 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2123 if (args [i]->type != STACK_VTYPE)
2126 case MONO_TYPE_TYPEDBYREF:
2127 if (args [i]->type != STACK_VTYPE)
2130 case MONO_TYPE_GENERICINST:
2131 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2134 case MONO_TYPE_MVAR:
2136 if (args [i]->type != STACK_VTYPE)
2140 g_error ("unknown type 0x%02x in check_call_signature",
2148 callvirt_to_call (int opcode)
2153 case OP_VOIDCALLVIRT:
2162 g_assert_not_reached ();
2169 callvirt_to_call_membase (int opcode)
2173 return OP_CALL_MEMBASE;
2174 case OP_VOIDCALLVIRT:
2175 return OP_VOIDCALL_MEMBASE;
2177 return OP_FCALL_MEMBASE;
2179 return OP_LCALL_MEMBASE;
2181 return OP_VCALL_MEMBASE;
2183 g_assert_not_reached ();
2189 #ifdef MONO_ARCH_HAVE_IMT
2190 /* Either METHOD or IMT_ARG needs to be set */
2192 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2196 if (COMPILE_LLVM (cfg)) {
2197 method_reg = alloc_preg (cfg);
2200 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2201 } else if (cfg->compile_aot) {
2202 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2205 MONO_INST_NEW (cfg, ins, OP_PCONST);
2206 ins->inst_p0 = method;
2207 ins->dreg = method_reg;
2208 MONO_ADD_INS (cfg->cbb, ins);
2212 call->imt_arg_reg = method_reg;
2214 #ifdef MONO_ARCH_IMT_REG
2215 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2217 /* Need this to keep the IMT arg alive */
2218 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2223 #ifdef MONO_ARCH_IMT_REG
2224 method_reg = alloc_preg (cfg);
2227 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2228 } else if (cfg->compile_aot) {
2229 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2232 MONO_INST_NEW (cfg, ins, OP_PCONST);
2233 ins->inst_p0 = method;
2234 ins->dreg = method_reg;
2235 MONO_ADD_INS (cfg->cbb, ins);
2238 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2240 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2245 static MonoJumpInfo *
2246 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2248 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2252 ji->data.target = target;
2258 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2260 if (cfg->generic_sharing_context)
2261 return mono_class_check_context_used (klass);
2267 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2269 if (cfg->generic_sharing_context)
2270 return mono_method_check_context_used (method);
2276 * check_method_sharing:
2278 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2281 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2283 gboolean pass_vtable = FALSE;
2284 gboolean pass_mrgctx = FALSE;
2286 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2287 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2288 gboolean sharable = FALSE;
2290 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2293 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2294 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
2295 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2297 sharable = sharing_enabled && context_sharable;
2301 * Pass vtable iff target method might
2302 * be shared, which means that sharing
2303 * is enabled for its class and its
2304 * context is sharable (and it's not a
2307 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2311 if (mini_method_get_context (cmethod) &&
2312 mini_method_get_context (cmethod)->method_inst) {
2313 g_assert (!pass_vtable);
2315 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2318 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2319 MonoGenericContext *context = mini_method_get_context (cmethod);
2320 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2322 if (sharing_enabled && context_sharable)
2324 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, mono_method_signature (cmethod)))
2329 if (out_pass_vtable)
2330 *out_pass_vtable = pass_vtable;
2331 if (out_pass_mrgctx)
2332 *out_pass_mrgctx = pass_mrgctx;
2335 inline static MonoCallInst *
2336 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2337 MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
2340 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2345 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2347 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2350 call->signature = sig;
2351 call->rgctx_reg = rgctx;
2353 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2356 if (mini_type_is_vtype (cfg, sig->ret)) {
2357 call->vret_var = cfg->vret_addr;
2358 //g_assert_not_reached ();
2360 } else if (mini_type_is_vtype (cfg, sig->ret)) {
2361 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2364 temp->backend.is_pinvoke = sig->pinvoke;
2367 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2368 * address of return value to increase optimization opportunities.
2369 * Before vtype decomposition, the dreg of the call ins itself represents the
2370 * fact the call modifies the return value. After decomposition, the call will
2371 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2372 * will be transformed into an LDADDR.
2374 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2375 loada->dreg = alloc_preg (cfg);
2376 loada->inst_p0 = temp;
2377 /* We reference the call too since call->dreg could change during optimization */
2378 loada->inst_p1 = call;
2379 MONO_ADD_INS (cfg->cbb, loada);
2381 call->inst.dreg = temp->dreg;
2383 call->vret_var = loada;
2384 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2385 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2387 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2388 if (COMPILE_SOFT_FLOAT (cfg)) {
2390 * If the call has a float argument, we would need to do an r8->r4 conversion using
2391 * an icall, but that cannot be done during the call sequence since it would clobber
2392 * the call registers + the stack. So we do it before emitting the call.
2394 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2396 MonoInst *in = call->args [i];
2398 if (i >= sig->hasthis)
2399 t = sig->params [i - sig->hasthis];
2401 t = &mono_defaults.int_class->byval_arg;
2402 t = mono_type_get_underlying_type (t);
2404 if (!t->byref && t->type == MONO_TYPE_R4) {
2405 MonoInst *iargs [1];
2409 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2411 /* The result will be in an int vreg */
2412 call->args [i] = conv;
2418 call->need_unbox_trampoline = unbox_trampoline;
2421 if (COMPILE_LLVM (cfg))
2422 mono_llvm_emit_call (cfg, call);
2424 mono_arch_emit_call (cfg, call);
2426 mono_arch_emit_call (cfg, call);
2429 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2430 cfg->flags |= MONO_CFG_HAS_CALLS;
2436 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2438 #ifdef MONO_ARCH_RGCTX_REG
2439 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2440 cfg->uses_rgctx_reg = TRUE;
2441 call->rgctx_reg = TRUE;
2443 call->rgctx_arg_reg = rgctx_reg;
2450 inline static MonoInst*
2451 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2457 rgctx_reg = mono_alloc_preg (cfg);
2458 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2461 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2463 call->inst.sreg1 = addr->dreg;
2466 emit_imt_argument (cfg, call, NULL, imt_arg);
2468 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2471 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2473 return (MonoInst*)call;
2477 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2480 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2482 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2485 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2486 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2488 #ifndef DISABLE_REMOTING
2489 gboolean might_be_remote = FALSE;
2491 gboolean virtual = this != NULL;
2492 gboolean enable_for_aot = TRUE;
2496 gboolean need_unbox_trampoline;
2499 sig = mono_method_signature (method);
2502 rgctx_reg = mono_alloc_preg (cfg);
2503 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2506 if (method->string_ctor) {
2507 /* Create the real signature */
2508 /* FIXME: Cache these */
2509 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2510 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2515 context_used = mini_method_check_context_used (cfg, method);
2517 #ifndef DISABLE_REMOTING
2518 might_be_remote = this && sig->hasthis &&
2519 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2520 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2522 if (might_be_remote && context_used) {
2525 g_assert (cfg->generic_sharing_context);
2527 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2529 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2533 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2535 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2537 #ifndef DISABLE_REMOTING
2538 if (might_be_remote)
2539 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2542 call->method = method;
2543 call->inst.flags |= MONO_INST_HAS_METHOD;
2544 call->inst.inst_left = this;
2545 call->tail_call = tail;
2548 int vtable_reg, slot_reg, this_reg;
2551 this_reg = this->dreg;
2553 if (ARCH_HAVE_DELEGATE_TRAMPOLINES && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2554 MonoInst *dummy_use;
2556 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2558 /* Make a call to delegate->invoke_impl */
2559 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2560 call->inst.inst_basereg = this_reg;
2561 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2562 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2564 /* We must emit a dummy use here because the delegate trampoline will
2565 replace the 'this' argument with the delegate target making this activation
2566 no longer a root for the delegate.
2567 This is an issue for delegates that target collectible code such as dynamic
2568 methods of GC'able assemblies.
2570 For a test case look into #667921.
2572 FIXME: a dummy use is not the best way to do it as the local register allocator
2573 will put it on a caller save register and spil it around the call.
2574 Ideally, we would either put it on a callee save register or only do the store part.
2576 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2578 return (MonoInst*)call;
2581 if ((!cfg->compile_aot || enable_for_aot) &&
2582 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2583 (MONO_METHOD_IS_FINAL (method) &&
2584 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2585 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2587 * the method is not virtual, we just need to ensure this is not null
2588 * and then we can call the method directly.
2590 #ifndef DISABLE_REMOTING
2591 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2593 * The check above ensures method is not gshared, this is needed since
2594 * gshared methods can't have wrappers.
2596 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2600 if (!method->string_ctor)
2601 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2603 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2604 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2606 * the method is virtual, but we can statically dispatch since either
2607 * it's class or the method itself are sealed.
2608 * But first we need to ensure it's not a null reference.
2610 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2612 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2614 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2616 vtable_reg = alloc_preg (cfg);
2617 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2618 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2620 #ifdef MONO_ARCH_HAVE_IMT
2622 guint32 imt_slot = mono_method_get_imt_slot (method);
2623 emit_imt_argument (cfg, call, call->method, imt_arg);
2624 slot_reg = vtable_reg;
2625 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2628 if (slot_reg == -1) {
2629 slot_reg = alloc_preg (cfg);
2630 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2631 offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2634 slot_reg = vtable_reg;
2635 offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2636 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2637 #ifdef MONO_ARCH_HAVE_IMT
2639 g_assert (mono_method_signature (method)->generic_param_count);
2640 emit_imt_argument (cfg, call, call->method, imt_arg);
2645 call->inst.sreg1 = slot_reg;
2646 call->inst.inst_offset = offset;
2647 call->virtual = TRUE;
2651 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2654 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2656 return (MonoInst*)call;
2660 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2662 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this, NULL, NULL);
2666 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2673 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2676 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2678 return (MonoInst*)call;
2682 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2684 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2688 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2692 * mono_emit_abs_call:
2694 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2696 inline static MonoInst*
2697 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2698 MonoMethodSignature *sig, MonoInst **args)
2700 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2704 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2707 if (cfg->abs_patches == NULL)
2708 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2709 g_hash_table_insert (cfg->abs_patches, ji, ji);
2710 ins = mono_emit_native_call (cfg, ji, sig, args);
2711 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2716 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2718 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2719 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2723 * Native code might return non register sized integers
2724 * without initializing the upper bits.
2726 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2727 case OP_LOADI1_MEMBASE:
2728 widen_op = OP_ICONV_TO_I1;
2730 case OP_LOADU1_MEMBASE:
2731 widen_op = OP_ICONV_TO_U1;
2733 case OP_LOADI2_MEMBASE:
2734 widen_op = OP_ICONV_TO_I2;
2736 case OP_LOADU2_MEMBASE:
2737 widen_op = OP_ICONV_TO_U2;
2743 if (widen_op != -1) {
2744 int dreg = alloc_preg (cfg);
2747 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2748 widen->type = ins->type;
2758 get_memcpy_method (void)
2760 static MonoMethod *memcpy_method = NULL;
2761 if (!memcpy_method) {
2762 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2764 g_error ("Old corlib found. Install a new one");
2766 return memcpy_method;
2770 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
2772 MonoClassField *field;
2773 gpointer iter = NULL;
2775 while ((field = mono_class_get_fields (klass, &iter))) {
2778 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2780 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2781 if (mini_type_is_reference (cfg, mono_field_get_type (field))) {
2782 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2783 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2785 MonoClass *field_class = mono_class_from_mono_type (field->type);
2786 if (field_class->has_references)
2787 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
2793 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
2795 int card_table_shift_bits;
2796 gpointer card_table_mask;
2798 MonoInst *dummy_use;
2799 int nursery_shift_bits;
2800 size_t nursery_size;
2801 gboolean has_card_table_wb = FALSE;
2803 if (!cfg->gen_write_barriers)
2806 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2808 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2810 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
2811 has_card_table_wb = TRUE;
2814 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0) {
2817 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2818 wbarrier->sreg1 = ptr->dreg;
2819 wbarrier->sreg2 = value->dreg;
2820 MONO_ADD_INS (cfg->cbb, wbarrier);
2821 } else if (card_table) {
2822 int offset_reg = alloc_preg (cfg);
2823 int card_reg = alloc_preg (cfg);
2826 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2827 if (card_table_mask)
2828 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
2830 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2831 * IMM's larger than 32bits.
2833 if (cfg->compile_aot) {
2834 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
2836 MONO_INST_NEW (cfg, ins, OP_PCONST);
2837 ins->inst_p0 = card_table;
2838 ins->dreg = card_reg;
2839 MONO_ADD_INS (cfg->cbb, ins);
2842 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
2843 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
2845 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2846 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
2849 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
2853 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
2855 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
2856 unsigned need_wb = 0;
2861 /*types with references can't have alignment smaller than sizeof(void*) */
2862 if (align < SIZEOF_VOID_P)
2865 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
2866 if (size > 32 * SIZEOF_VOID_P)
2869 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
2871 /* We don't unroll more than 5 stores to avoid code bloat. */
2872 if (size > 5 * SIZEOF_VOID_P) {
2873 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
2874 size += (SIZEOF_VOID_P - 1);
2875 size &= ~(SIZEOF_VOID_P - 1);
2877 EMIT_NEW_ICONST (cfg, iargs [2], size);
2878 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
2879 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
2883 destreg = iargs [0]->dreg;
2884 srcreg = iargs [1]->dreg;
2887 dest_ptr_reg = alloc_preg (cfg);
2888 tmp_reg = alloc_preg (cfg);
2891 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
2893 while (size >= SIZEOF_VOID_P) {
2894 MonoInst *load_inst;
2895 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
2896 load_inst->dreg = tmp_reg;
2897 load_inst->inst_basereg = srcreg;
2898 load_inst->inst_offset = offset;
2899 MONO_ADD_INS (cfg->cbb, load_inst);
2901 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
2904 emit_write_barrier (cfg, iargs [0], load_inst);
2906 offset += SIZEOF_VOID_P;
2907 size -= SIZEOF_VOID_P;
2910 /*tmp += sizeof (void*)*/
2911 if (size >= SIZEOF_VOID_P) {
2912 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
2913 MONO_ADD_INS (cfg->cbb, iargs [0]);
2917 /* Those cannot be references since size < sizeof (void*) */
2919 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
2920 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
2926 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
2927 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
2933 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
2934 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
2943 * Emit code to copy a valuetype of type @klass whose address is stored in
2944 * @src->dreg to memory whose address is stored at @dest->dreg.
2947 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2949 MonoInst *iargs [4];
2950 int context_used, n;
2952 MonoMethod *memcpy_method;
2953 MonoInst *size_ins = NULL;
2954 MonoInst *memcpy_ins = NULL;
2958 * This check breaks with spilled vars... need to handle it during verification anyway.
2959 * g_assert (klass && klass == src->klass && klass == dest->klass);
2962 if (mini_is_gsharedvt_klass (cfg, klass)) {
2964 context_used = mini_class_check_context_used (cfg, klass);
2965 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
2966 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
2970 n = mono_class_native_size (klass, &align);
2972 n = mono_class_value_size (klass, &align);
2974 /* if native is true there should be no references in the struct */
2975 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
2976 /* Avoid barriers when storing to the stack */
2977 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2978 (dest->opcode == OP_LDADDR))) {
2984 context_used = mini_class_check_context_used (cfg, klass);
2986 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
2987 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
2989 } else if (context_used) {
2990 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2992 if (cfg->compile_aot) {
2993 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2995 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2996 mono_class_compute_gc_descriptor (klass);
3001 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3003 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3008 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
3009 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3010 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3015 iargs [2] = size_ins;
3017 EMIT_NEW_ICONST (cfg, iargs [2], n);
3019 memcpy_method = get_memcpy_method ();
3021 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3023 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3028 get_memset_method (void)
3030 static MonoMethod *memset_method = NULL;
3031 if (!memset_method) {
3032 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3034 g_error ("Old corlib found. Install a new one");
3036 return memset_method;
3040 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3042 MonoInst *iargs [3];
3043 int n, context_used;
3045 MonoMethod *memset_method;
3046 MonoInst *size_ins = NULL;
3047 MonoInst *bzero_ins = NULL;
3048 static MonoMethod *bzero_method;
3050 /* FIXME: Optimize this for the case when dest is an LDADDR */
3052 mono_class_init (klass);
3053 if (mini_is_gsharedvt_klass (cfg, klass)) {
3054 context_used = mini_class_check_context_used (cfg, klass);
3055 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3056 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3058 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3059 g_assert (bzero_method);
3061 iargs [1] = size_ins;
3062 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3066 n = mono_class_value_size (klass, &align);
3068 if (n <= sizeof (gpointer) * 5) {
3069 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3072 memset_method = get_memset_method ();
3074 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3075 EMIT_NEW_ICONST (cfg, iargs [2], n);
3076 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3081 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3083 MonoInst *this = NULL;
3085 g_assert (cfg->generic_sharing_context);
3087 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3088 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3089 !method->klass->valuetype)
3090 EMIT_NEW_ARGLOAD (cfg, this, 0);
3092 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3093 MonoInst *mrgctx_loc, *mrgctx_var;
3096 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3098 mrgctx_loc = mono_get_vtable_var (cfg);
3099 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3102 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3103 MonoInst *vtable_loc, *vtable_var;
3107 vtable_loc = mono_get_vtable_var (cfg);
3108 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3110 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3111 MonoInst *mrgctx_var = vtable_var;
3114 vtable_reg = alloc_preg (cfg);
3115 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3116 vtable_var->type = STACK_PTR;
3124 vtable_reg = alloc_preg (cfg);
3125 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3130 static MonoJumpInfoRgctxEntry *
3131 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3133 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3134 res->method = method;
3135 res->in_mrgctx = in_mrgctx;
3136 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3137 res->data->type = patch_type;
3138 res->data->data.target = patch_data;
3139 res->info_type = info_type;
3144 static inline MonoInst*
3145 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3147 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3151 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3152 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3154 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3155 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3157 return emit_rgctx_fetch (cfg, rgctx, entry);
3161 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3162 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3164 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3165 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3167 return emit_rgctx_fetch (cfg, rgctx, entry);
3171 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3172 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3174 MonoJumpInfoGSharedVtCall *call_info;
3175 MonoJumpInfoRgctxEntry *entry;
3178 call_info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3179 call_info->sig = sig;
3180 call_info->method = cmethod;
3182 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3183 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3185 return emit_rgctx_fetch (cfg, rgctx, entry);
3190 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3191 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3193 MonoJumpInfoRgctxEntry *entry;
3196 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3197 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3199 return emit_rgctx_fetch (cfg, rgctx, entry);
3203 * emit_get_rgctx_method:
3205 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3206 * normal constants, else emit a load from the rgctx.
3209 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3210 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3212 if (!context_used) {
3215 switch (rgctx_type) {
3216 case MONO_RGCTX_INFO_METHOD:
3217 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3219 case MONO_RGCTX_INFO_METHOD_RGCTX:
3220 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3223 g_assert_not_reached ();
3226 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3227 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3229 return emit_rgctx_fetch (cfg, rgctx, entry);
3234 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3235 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3237 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3238 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3240 return emit_rgctx_fetch (cfg, rgctx, entry);
3244 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3246 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3247 MonoRuntimeGenericContextInfoTemplate *template;
3252 for (i = 0; i < info->entries->len; ++i) {
3253 MonoRuntimeGenericContextInfoTemplate *otemplate = g_ptr_array_index (info->entries, i);
3255 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3259 template = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate));
3260 template->info_type = rgctx_type;
3261 template->data = data;
3263 idx = info->entries->len;
3265 g_ptr_array_add (info->entries, template);
3271 * emit_get_gsharedvt_info:
3273 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3276 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3281 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3282 /* Load info->entries [idx] */
3283 dreg = alloc_preg (cfg);
3284 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3290 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3292 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3296 * On return the caller must check @klass for load errors.
3299 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3301 MonoInst *vtable_arg;
3305 context_used = mini_class_check_context_used (cfg, klass);
3308 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3309 klass, MONO_RGCTX_INFO_VTABLE);
3311 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3315 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3318 if (COMPILE_LLVM (cfg))
3319 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3321 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3322 #ifdef MONO_ARCH_VTABLE_REG
3323 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3324 cfg->uses_vtable_reg = TRUE;
3331 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3335 if (cfg->gen_seq_points && cfg->method == method) {
3336 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3338 ins->flags |= MONO_INST_NONEMPTY_STACK;
3339 MONO_ADD_INS (cfg->cbb, ins);
3344 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check, MonoBasicBlock **out_bblock)
3346 if (mini_get_debug_options ()->better_cast_details) {
3347 int to_klass_reg = alloc_preg (cfg);
3348 int vtable_reg = alloc_preg (cfg);
3349 int klass_reg = alloc_preg (cfg);
3350 MonoBasicBlock *is_null_bb = NULL;
3354 NEW_BBLOCK (cfg, is_null_bb);
3356 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3357 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3360 tls_get = mono_get_jit_tls_intrinsic (cfg);
3362 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3366 MONO_ADD_INS (cfg->cbb, tls_get);
3367 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3368 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3370 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3371 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3372 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3375 MONO_START_BB (cfg, is_null_bb);
3377 *out_bblock = cfg->cbb;
3383 reset_cast_details (MonoCompile *cfg)
3385 /* Reset the variables holding the cast details */
3386 if (mini_get_debug_options ()->better_cast_details) {
3387 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3389 MONO_ADD_INS (cfg->cbb, tls_get);
3390 /* It is enough to reset the from field */
3391 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3396 * On return the caller must check @array_class for load errors
3399 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3401 int vtable_reg = alloc_preg (cfg);
3404 context_used = mini_class_check_context_used (cfg, array_class);
3406 save_cast_details (cfg, array_class, obj->dreg, FALSE, NULL);
3408 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3410 if (cfg->opt & MONO_OPT_SHARED) {
3411 int class_reg = alloc_preg (cfg);
3412 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3413 if (cfg->compile_aot) {
3414 int klass_reg = alloc_preg (cfg);
3415 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3416 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3418 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3420 } else if (context_used) {
3421 MonoInst *vtable_ins;
3423 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3424 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3426 if (cfg->compile_aot) {
3430 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3432 vt_reg = alloc_preg (cfg);
3433 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3434 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3437 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3439 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3443 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3445 reset_cast_details (cfg);
3449 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3450 * generic code is generated.
3453 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3455 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3458 MonoInst *rgctx, *addr;
3460 /* FIXME: What if the class is shared? We might not
3461 have to get the address of the method from the
3463 addr = emit_get_rgctx_method (cfg, context_used, method,
3464 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3466 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3468 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3470 gboolean pass_vtable, pass_mrgctx;
3471 MonoInst *rgctx_arg = NULL;
3473 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3474 g_assert (!pass_mrgctx);
3477 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3480 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3483 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3488 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3492 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3493 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3494 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3495 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3497 obj_reg = sp [0]->dreg;
3498 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3499 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3501 /* FIXME: generics */
3502 g_assert (klass->rank == 0);
3505 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3506 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3508 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3509 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3512 MonoInst *element_class;
3514 /* This assertion is from the unboxcast insn */
3515 g_assert (klass->rank == 0);
3517 element_class = emit_get_rgctx_klass (cfg, context_used,
3518 klass->element_class, MONO_RGCTX_INFO_KLASS);
3520 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3521 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3523 save_cast_details (cfg, klass->element_class, obj_reg, FALSE, NULL);
3524 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3525 reset_cast_details (cfg);
3528 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3529 MONO_ADD_INS (cfg->cbb, add);
3530 add->type = STACK_MP;
3537 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj, MonoBasicBlock **out_cbb)
3539 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3540 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3544 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3550 args [1] = klass_inst;
3553 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3555 NEW_BBLOCK (cfg, is_ref_bb);
3556 NEW_BBLOCK (cfg, is_nullable_bb);
3557 NEW_BBLOCK (cfg, end_bb);
3558 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3559 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3560 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3562 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3563 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3565 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3566 addr_reg = alloc_dreg (cfg, STACK_MP);
3570 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3571 MONO_ADD_INS (cfg->cbb, addr);
3573 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3576 MONO_START_BB (cfg, is_ref_bb);
3578 /* Save the ref to a temporary */
3579 dreg = alloc_ireg (cfg);
3580 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3581 addr->dreg = addr_reg;
3582 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3583 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3586 MONO_START_BB (cfg, is_nullable_bb);
3589 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3590 MonoInst *unbox_call;
3591 MonoMethodSignature *unbox_sig;
3594 var = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
3596 unbox_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3597 unbox_sig->ret = &klass->byval_arg;
3598 unbox_sig->param_count = 1;
3599 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3600 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3602 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3603 addr->dreg = addr_reg;
3606 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3609 MONO_START_BB (cfg, end_bb);
3612 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3614 *out_cbb = cfg->cbb;
3620 * Returns NULL and set the cfg exception on error.
3623 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3625 MonoInst *iargs [2];
3631 MonoInst *iargs [2];
3633 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box);
3635 if (cfg->opt & MONO_OPT_SHARED)
3636 rgctx_info = MONO_RGCTX_INFO_KLASS;
3638 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3639 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3641 if (cfg->opt & MONO_OPT_SHARED) {
3642 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3644 alloc_ftn = mono_object_new;
3647 alloc_ftn = mono_object_new_specific;
3650 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED))
3651 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3653 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3656 if (cfg->opt & MONO_OPT_SHARED) {
3657 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3658 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3660 alloc_ftn = mono_object_new;
3661 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3662 /* This happens often in argument checking code, eg. throw new FooException... */
3663 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3664 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3665 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3667 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3668 MonoMethod *managed_alloc = NULL;
3672 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3673 cfg->exception_ptr = klass;
3677 #ifndef MONO_CROSS_COMPILE
3678 managed_alloc = mono_gc_get_managed_allocator (klass, for_box);
3681 if (managed_alloc) {
3682 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3683 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3685 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3687 guint32 lw = vtable->klass->instance_size;
3688 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3689 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3690 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3693 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3697 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3701 * Returns NULL and set the cfg exception on error.
3704 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoBasicBlock **out_cbb)
3706 MonoInst *alloc, *ins;
3708 *out_cbb = cfg->cbb;
3710 if (mono_class_is_nullable (klass)) {
3711 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3714 /* FIXME: What if the class is shared? We might not
3715 have to get the method address from the RGCTX. */
3716 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3717 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3718 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3720 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3722 gboolean pass_vtable, pass_mrgctx;
3723 MonoInst *rgctx_arg = NULL;
3725 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3726 g_assert (!pass_mrgctx);
3729 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3732 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3735 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3739 if (mini_is_gsharedvt_klass (cfg, klass)) {
3740 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3741 MonoInst *res, *is_ref, *src_var, *addr;
3744 dreg = alloc_ireg (cfg);
3746 NEW_BBLOCK (cfg, is_ref_bb);
3747 NEW_BBLOCK (cfg, is_nullable_bb);
3748 NEW_BBLOCK (cfg, end_bb);
3749 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3750 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3751 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3753 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3754 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3757 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3760 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3761 ins->opcode = OP_STOREV_MEMBASE;
3763 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
3764 res->type = STACK_OBJ;
3766 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3769 MONO_START_BB (cfg, is_ref_bb);
3770 addr_reg = alloc_ireg (cfg);
3772 /* val is a vtype, so has to load the value manually */
3773 src_var = get_vreg_to_inst (cfg, val->dreg);
3775 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
3776 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
3777 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
3778 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3781 MONO_START_BB (cfg, is_nullable_bb);
3784 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
3785 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
3787 MonoMethodSignature *box_sig;
3790 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
3791 * construct that method at JIT time, so have to do things by hand.
3793 box_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3794 box_sig->ret = &mono_defaults.object_class->byval_arg;
3795 box_sig->param_count = 1;
3796 box_sig->params [0] = &klass->byval_arg;
3797 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
3798 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
3799 res->type = STACK_OBJ;
3803 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3805 MONO_START_BB (cfg, end_bb);
3807 *out_cbb = cfg->cbb;
3811 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3815 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3822 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
3825 MonoGenericContainer *container;
3826 MonoGenericInst *ginst;
3828 if (klass->generic_class) {
3829 container = klass->generic_class->container_class->generic_container;
3830 ginst = klass->generic_class->context.class_inst;
3831 } else if (klass->generic_container && context_used) {
3832 container = klass->generic_container;
3833 ginst = container->context.class_inst;
3838 for (i = 0; i < container->type_argc; ++i) {
3840 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
3842 type = ginst->type_argv [i];
3843 if (mini_type_is_reference (cfg, type))
3849 // FIXME: This doesn't work yet (class libs tests fail?)
3850 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3853 * Returns NULL and set the cfg exception on error.
3856 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3858 MonoBasicBlock *is_null_bb;
3859 int obj_reg = src->dreg;
3860 int vtable_reg = alloc_preg (cfg);
3861 MonoInst *klass_inst = NULL;
3866 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
3867 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
3868 MonoInst *cache_ins;
3870 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3875 /* klass - it's the second element of the cache entry*/
3876 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3879 args [2] = cache_ins;
3881 return mono_emit_method_call (cfg, mono_castclass, args, NULL);
3884 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3887 NEW_BBLOCK (cfg, is_null_bb);
3889 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3890 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3892 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
3894 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3895 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3896 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3898 int klass_reg = alloc_preg (cfg);
3900 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3902 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3903 /* the remoting code is broken, access the class for now */
3904 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3905 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3907 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3908 cfg->exception_ptr = klass;
3911 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3913 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3914 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3916 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3918 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3919 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3923 MONO_START_BB (cfg, is_null_bb);
3925 reset_cast_details (cfg);
3931 * Returns NULL and set the cfg exception on error.
3934 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3937 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3938 int obj_reg = src->dreg;
3939 int vtable_reg = alloc_preg (cfg);
3940 int res_reg = alloc_ireg_ref (cfg);
3941 MonoInst *klass_inst = NULL;
3946 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
3947 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
3948 MonoInst *cache_ins;
3950 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3955 /* klass - it's the second element of the cache entry*/
3956 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3959 args [2] = cache_ins;
3961 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
3964 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3967 NEW_BBLOCK (cfg, is_null_bb);
3968 NEW_BBLOCK (cfg, false_bb);
3969 NEW_BBLOCK (cfg, end_bb);
3971 /* Do the assignment at the beginning, so the other assignment can be if converted */
3972 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3973 ins->type = STACK_OBJ;
3976 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3977 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3979 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3981 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3982 g_assert (!context_used);
3983 /* the is_null_bb target simply copies the input register to the output */
3984 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3986 int klass_reg = alloc_preg (cfg);
3989 int rank_reg = alloc_preg (cfg);
3990 int eclass_reg = alloc_preg (cfg);
3992 g_assert (!context_used);
3993 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3994 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3995 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3996 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3997 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3998 if (klass->cast_class == mono_defaults.object_class) {
3999 int parent_reg = alloc_preg (cfg);
4000 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
4001 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4002 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4003 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4004 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4005 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4006 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4007 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4008 } else if (klass->cast_class == mono_defaults.enum_class) {
4009 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4010 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4011 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4012 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4014 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4015 /* Check that the object is a vector too */
4016 int bounds_reg = alloc_preg (cfg);
4017 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
4018 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4019 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4022 /* the is_null_bb target simply copies the input register to the output */
4023 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4025 } else if (mono_class_is_nullable (klass)) {
4026 g_assert (!context_used);
4027 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4028 /* the is_null_bb target simply copies the input register to the output */
4029 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4031 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4032 g_assert (!context_used);
4033 /* the remoting code is broken, access the class for now */
4034 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4035 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4037 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4038 cfg->exception_ptr = klass;
4041 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4043 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4044 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4046 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4047 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4049 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4050 /* the is_null_bb target simply copies the input register to the output */
4051 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4056 MONO_START_BB (cfg, false_bb);
4058 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4059 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4061 MONO_START_BB (cfg, is_null_bb);
4063 MONO_START_BB (cfg, end_bb);
4069 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4071 /* This opcode takes as input an object reference and a class, and returns:
4072 0) if the object is an instance of the class,
4073 1) if the object is not instance of the class,
4074 2) if the object is a proxy whose type cannot be determined */
4077 #ifndef DISABLE_REMOTING
4078 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4080 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4082 int obj_reg = src->dreg;
4083 int dreg = alloc_ireg (cfg);
4085 #ifndef DISABLE_REMOTING
4086 int klass_reg = alloc_preg (cfg);
4089 NEW_BBLOCK (cfg, true_bb);
4090 NEW_BBLOCK (cfg, false_bb);
4091 NEW_BBLOCK (cfg, end_bb);
4092 #ifndef DISABLE_REMOTING
4093 NEW_BBLOCK (cfg, false2_bb);
4094 NEW_BBLOCK (cfg, no_proxy_bb);
4097 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4098 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4100 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4101 #ifndef DISABLE_REMOTING
4102 NEW_BBLOCK (cfg, interface_fail_bb);
4105 tmp_reg = alloc_preg (cfg);
4106 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4107 #ifndef DISABLE_REMOTING
4108 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4109 MONO_START_BB (cfg, interface_fail_bb);
4110 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4112 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4114 tmp_reg = alloc_preg (cfg);
4115 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4116 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4117 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4119 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4122 #ifndef DISABLE_REMOTING
4123 tmp_reg = alloc_preg (cfg);
4124 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4125 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4127 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4128 tmp_reg = alloc_preg (cfg);
4129 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4130 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4132 tmp_reg = alloc_preg (cfg);
4133 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4134 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4135 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4137 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4138 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4140 MONO_START_BB (cfg, no_proxy_bb);
4142 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4144 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4148 MONO_START_BB (cfg, false_bb);
4150 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4151 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4153 #ifndef DISABLE_REMOTING
4154 MONO_START_BB (cfg, false2_bb);
4156 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4157 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4160 MONO_START_BB (cfg, true_bb);
4162 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4164 MONO_START_BB (cfg, end_bb);
4167 MONO_INST_NEW (cfg, ins, OP_ICONST);
4169 ins->type = STACK_I4;
4175 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4177 /* This opcode takes as input an object reference and a class, and returns:
4178 0) if the object is an instance of the class,
4179 1) if the object is a proxy whose type cannot be determined
4180 an InvalidCastException exception is thrown otherwhise*/
4183 #ifndef DISABLE_REMOTING
4184 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4186 MonoBasicBlock *ok_result_bb;
4188 int obj_reg = src->dreg;
4189 int dreg = alloc_ireg (cfg);
4190 int tmp_reg = alloc_preg (cfg);
4192 #ifndef DISABLE_REMOTING
4193 int klass_reg = alloc_preg (cfg);
4194 NEW_BBLOCK (cfg, end_bb);
4197 NEW_BBLOCK (cfg, ok_result_bb);
4199 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4200 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4202 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4204 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4205 #ifndef DISABLE_REMOTING
4206 NEW_BBLOCK (cfg, interface_fail_bb);
4208 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4209 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4210 MONO_START_BB (cfg, interface_fail_bb);
4211 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4213 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4215 tmp_reg = alloc_preg (cfg);
4216 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4217 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4218 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4220 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4221 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4223 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4224 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4225 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4228 #ifndef DISABLE_REMOTING
4229 NEW_BBLOCK (cfg, no_proxy_bb);
4231 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4232 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4233 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4235 tmp_reg = alloc_preg (cfg);
4236 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4237 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4239 tmp_reg = alloc_preg (cfg);
4240 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4241 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4242 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4244 NEW_BBLOCK (cfg, fail_1_bb);
4246 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4248 MONO_START_BB (cfg, fail_1_bb);
4250 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4251 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4253 MONO_START_BB (cfg, no_proxy_bb);
4255 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4257 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4261 MONO_START_BB (cfg, ok_result_bb);
4263 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4265 #ifndef DISABLE_REMOTING
4266 MONO_START_BB (cfg, end_bb);
4270 MONO_INST_NEW (cfg, ins, OP_ICONST);
4272 ins->type = STACK_I4;
4278 * Returns NULL and set the cfg exception on error.
4280 static G_GNUC_UNUSED MonoInst*
4281 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
4285 gpointer *trampoline;
4286 MonoInst *obj, *method_ins, *tramp_ins;
4290 obj = handle_alloc (cfg, klass, FALSE, 0);
4294 /* Inline the contents of mono_delegate_ctor */
4296 /* Set target field */
4297 /* Optimize away setting of NULL target */
4298 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
4299 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4300 if (cfg->gen_write_barriers) {
4301 dreg = alloc_preg (cfg);
4302 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target));
4303 emit_write_barrier (cfg, ptr, target);
4307 /* Set method field */
4308 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4309 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4310 if (cfg->gen_write_barriers) {
4311 dreg = alloc_preg (cfg);
4312 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method));
4313 emit_write_barrier (cfg, ptr, method_ins);
4316 * To avoid looking up the compiled code belonging to the target method
4317 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4318 * store it, and we fill it after the method has been compiled.
4320 if (!cfg->compile_aot && !method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4321 MonoInst *code_slot_ins;
4324 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4326 domain = mono_domain_get ();
4327 mono_domain_lock (domain);
4328 if (!domain_jit_info (domain)->method_code_hash)
4329 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4330 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4332 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
4333 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4335 mono_domain_unlock (domain);
4337 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
4339 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4342 /* Set invoke_impl field */
4343 if (cfg->compile_aot) {
4344 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
4346 trampoline = mono_create_delegate_trampoline (cfg->domain, klass);
4347 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4349 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4351 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4357 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4359 MonoJitICallInfo *info;
4361 /* Need to register the icall so it gets an icall wrapper */
4362 info = mono_get_array_new_va_icall (rank);
4364 cfg->flags |= MONO_CFG_HAS_VARARGS;
4366 /* mono_array_new_va () needs a vararg calling convention */
4367 cfg->disable_llvm = TRUE;
4369 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4370 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4374 mono_emit_load_got_addr (MonoCompile *cfg)
4376 MonoInst *getaddr, *dummy_use;
4378 if (!cfg->got_var || cfg->got_var_allocated)
4381 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
4382 getaddr->cil_code = cfg->header->code;
4383 getaddr->dreg = cfg->got_var->dreg;
4385 /* Add it to the start of the first bblock */
4386 if (cfg->bb_entry->code) {
4387 getaddr->next = cfg->bb_entry->code;
4388 cfg->bb_entry->code = getaddr;
4391 MONO_ADD_INS (cfg->bb_entry, getaddr);
4393 cfg->got_var_allocated = TRUE;
4396 * Add a dummy use to keep the got_var alive, since real uses might
4397 * only be generated by the back ends.
4398 * Add it to end_bblock, so the variable's lifetime covers the whole
4400 * It would be better to make the usage of the got var explicit in all
4401 * cases when the backend needs it (i.e. calls, throw etc.), so this
4402 * wouldn't be needed.
4404 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
4405 MONO_ADD_INS (cfg->bb_exit, dummy_use);
4408 static int inline_limit;
4409 static gboolean inline_limit_inited;
4412 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
4414 MonoMethodHeaderSummary header;
4416 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4417 MonoMethodSignature *sig = mono_method_signature (method);
4421 if (cfg->generic_sharing_context)
4424 if (cfg->inline_depth > 10)
4427 #ifdef MONO_ARCH_HAVE_LMF_OPS
4428 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
4429 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
4430 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
4435 if (!mono_method_get_header_summary (method, &header))
4438 /*runtime, icall and pinvoke are checked by summary call*/
4439 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
4440 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
4441 (mono_class_is_marshalbyref (method->klass)) ||
4445 /* also consider num_locals? */
4446 /* Do the size check early to avoid creating vtables */
4447 if (!inline_limit_inited) {
4448 if (g_getenv ("MONO_INLINELIMIT"))
4449 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
4451 inline_limit = INLINE_LENGTH_LIMIT;
4452 inline_limit_inited = TRUE;
4454 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
4458 * if we can initialize the class of the method right away, we do,
4459 * otherwise we don't allow inlining if the class needs initialization,
4460 * since it would mean inserting a call to mono_runtime_class_init()
4461 * inside the inlined code
4463 if (!(cfg->opt & MONO_OPT_SHARED)) {
4464 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
4465 if (cfg->run_cctors && method->klass->has_cctor) {
4466 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
4467 if (!method->klass->runtime_info)
4468 /* No vtable created yet */
4470 vtable = mono_class_vtable (cfg->domain, method->klass);
4473 /* This makes so that inline cannot trigger */
4474 /* .cctors: too many apps depend on them */
4475 /* running with a specific order... */
4476 if (! vtable->initialized)
4478 mono_runtime_class_init (vtable);
4480 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
4481 if (!method->klass->runtime_info)
4482 /* No vtable created yet */
4484 vtable = mono_class_vtable (cfg->domain, method->klass);
4487 if (!vtable->initialized)
4492 * If we're compiling for shared code
4493 * the cctor will need to be run at aot method load time, for example,
4494 * or at the end of the compilation of the inlining method.
4496 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
4501 * CAS - do not inline methods with declarative security
4502 * Note: this has to be before any possible return TRUE;
4504 if (mono_security_method_has_declsec (method))
4507 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4508 if (mono_arch_is_soft_float ()) {
4510 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4512 for (i = 0; i < sig->param_count; ++i)
4513 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4522 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
4524 if (vtable->initialized && !cfg->compile_aot)
4527 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
4530 if (!mono_class_needs_cctor_run (vtable->klass, method))
4533 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
4534 /* The initialization is already done before the method is called */
4541 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4545 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4548 if (mini_is_gsharedvt_klass (cfg, klass)) {
4551 mono_class_init (klass);
4552 size = mono_class_array_element_size (klass);
4555 mult_reg = alloc_preg (cfg);
4556 array_reg = arr->dreg;
4557 index_reg = index->dreg;
4559 #if SIZEOF_REGISTER == 8
4560 /* The array reg is 64 bits but the index reg is only 32 */
4561 if (COMPILE_LLVM (cfg)) {
4563 index2_reg = index_reg;
4565 index2_reg = alloc_preg (cfg);
4566 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4569 if (index->type == STACK_I8) {
4570 index2_reg = alloc_preg (cfg);
4571 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4573 index2_reg = index_reg;
4578 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4580 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4581 if (size == 1 || size == 2 || size == 4 || size == 8) {
4582 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4584 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
4585 ins->klass = mono_class_get_element_class (klass);
4586 ins->type = STACK_MP;
4592 add_reg = alloc_ireg_mp (cfg);
4595 MonoInst *rgctx_ins;
4598 g_assert (cfg->generic_sharing_context);
4599 context_used = mini_class_check_context_used (cfg, klass);
4600 g_assert (context_used);
4601 rgctx_ins = emit_get_gsharedvt_info (cfg, &klass->byval_arg, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
4602 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
4604 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4606 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4607 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4608 ins->klass = mono_class_get_element_class (klass);
4609 ins->type = STACK_MP;
4610 MONO_ADD_INS (cfg->cbb, ins);
4615 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4617 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4619 int bounds_reg = alloc_preg (cfg);
4620 int add_reg = alloc_ireg_mp (cfg);
4621 int mult_reg = alloc_preg (cfg);
4622 int mult2_reg = alloc_preg (cfg);
4623 int low1_reg = alloc_preg (cfg);
4624 int low2_reg = alloc_preg (cfg);
4625 int high1_reg = alloc_preg (cfg);
4626 int high2_reg = alloc_preg (cfg);
4627 int realidx1_reg = alloc_preg (cfg);
4628 int realidx2_reg = alloc_preg (cfg);
4629 int sum_reg = alloc_preg (cfg);
4630 int index1, index2, tmpreg;
4634 mono_class_init (klass);
4635 size = mono_class_array_element_size (klass);
4637 index1 = index_ins1->dreg;
4638 index2 = index_ins2->dreg;
4640 #if SIZEOF_REGISTER == 8
4641 /* The array reg is 64 bits but the index reg is only 32 */
4642 if (COMPILE_LLVM (cfg)) {
4645 tmpreg = alloc_preg (cfg);
4646 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
4648 tmpreg = alloc_preg (cfg);
4649 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
4653 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
4657 /* range checking */
4658 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4659 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4661 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4662 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4663 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4664 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4665 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4666 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4667 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4669 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4670 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4671 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4672 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4673 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
4674 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4675 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4677 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4678 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4679 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4680 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4681 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4683 ins->type = STACK_MP;
4685 MONO_ADD_INS (cfg->cbb, ins);
4692 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4696 MonoMethod *addr_method;
4699 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4702 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
4704 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4705 /* emit_ldelema_2 depends on OP_LMUL */
4706 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
4707 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
4711 element_size = mono_class_array_element_size (cmethod->klass->element_class);
4712 addr_method = mono_marshal_get_array_address (rank, element_size);
4713 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4718 static MonoBreakPolicy
4719 always_insert_breakpoint (MonoMethod *method)
4721 return MONO_BREAK_POLICY_ALWAYS;
4724 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4727 * mono_set_break_policy:
4728 * policy_callback: the new callback function
4730 * Allow embedders to decide wherther to actually obey breakpoint instructions
4731 * (both break IL instructions and Debugger.Break () method calls), for example
4732 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4733 * untrusted or semi-trusted code.
4735 * @policy_callback will be called every time a break point instruction needs to
4736 * be inserted with the method argument being the method that calls Debugger.Break()
4737 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4738 * if it wants the breakpoint to not be effective in the given method.
4739 * #MONO_BREAK_POLICY_ALWAYS is the default.
4742 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4744 if (policy_callback)
4745 break_policy_func = policy_callback;
4747 break_policy_func = always_insert_breakpoint;
4751 should_insert_brekpoint (MonoMethod *method) {
4752 switch (break_policy_func (method)) {
4753 case MONO_BREAK_POLICY_ALWAYS:
4755 case MONO_BREAK_POLICY_NEVER:
4757 case MONO_BREAK_POLICY_ON_DBG:
4758 return mono_debug_using_mono_debugger ();
4760 g_warning ("Incorrect value returned from break policy callback");
4765 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4767 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4769 MonoInst *addr, *store, *load;
4770 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4772 /* the bounds check is already done by the callers */
4773 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4775 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4776 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4777 if (mini_type_is_reference (cfg, fsig->params [2]))
4778 emit_write_barrier (cfg, addr, load);
4780 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4781 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4788 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4790 return mini_type_is_reference (cfg, &klass->byval_arg);
4794 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
4796 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
4797 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
4798 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
4799 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
4800 MonoInst *iargs [3];
4803 mono_class_setup_vtable (obj_array);
4804 g_assert (helper->slot);
4806 if (sp [0]->type != STACK_OBJ)
4808 if (sp [2]->type != STACK_OBJ)
4815 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
4819 if (mini_is_gsharedvt_klass (cfg, klass)) {
4822 // FIXME-VT: OP_ICONST optimization
4823 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
4824 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
4825 ins->opcode = OP_STOREV_MEMBASE;
4826 } else if (sp [1]->opcode == OP_ICONST) {
4827 int array_reg = sp [0]->dreg;
4828 int index_reg = sp [1]->dreg;
4829 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
4832 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
4833 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
4835 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
4836 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
4837 if (generic_class_is_reference_type (cfg, klass))
4838 emit_write_barrier (cfg, addr, sp [2]);
4845 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4850 eklass = mono_class_from_mono_type (fsig->params [2]);
4852 eklass = mono_class_from_mono_type (fsig->ret);
4856 return emit_array_store (cfg, eklass, args, FALSE);
4858 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4859 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
4865 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4867 MonoInst *ins = NULL;
4868 #ifdef MONO_ARCH_SIMD_INTRINSICS
4869 if (cfg->opt & MONO_OPT_SIMD) {
4870 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4880 emit_memory_barrier (MonoCompile *cfg, int kind)
4882 MonoInst *ins = NULL;
4883 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4884 MONO_ADD_INS (cfg->cbb, ins);
4885 ins->backend.memory_barrier_kind = kind;
4891 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4893 MonoInst *ins = NULL;
4896 /* The LLVM backend supports these intrinsics */
4897 if (cmethod->klass == mono_defaults.math_class) {
4898 if (strcmp (cmethod->name, "Sin") == 0) {
4900 } else if (strcmp (cmethod->name, "Cos") == 0) {
4902 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
4904 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
4909 MONO_INST_NEW (cfg, ins, opcode);
4910 ins->type = STACK_R8;
4911 ins->dreg = mono_alloc_freg (cfg);
4912 ins->sreg1 = args [0]->dreg;
4913 MONO_ADD_INS (cfg->cbb, ins);
4917 if (cfg->opt & MONO_OPT_CMOV) {
4918 if (strcmp (cmethod->name, "Min") == 0) {
4919 if (fsig->params [0]->type == MONO_TYPE_I4)
4921 if (fsig->params [0]->type == MONO_TYPE_U4)
4922 opcode = OP_IMIN_UN;
4923 else if (fsig->params [0]->type == MONO_TYPE_I8)
4925 else if (fsig->params [0]->type == MONO_TYPE_U8)
4926 opcode = OP_LMIN_UN;
4927 } else if (strcmp (cmethod->name, "Max") == 0) {
4928 if (fsig->params [0]->type == MONO_TYPE_I4)
4930 if (fsig->params [0]->type == MONO_TYPE_U4)
4931 opcode = OP_IMAX_UN;
4932 else if (fsig->params [0]->type == MONO_TYPE_I8)
4934 else if (fsig->params [0]->type == MONO_TYPE_U8)
4935 opcode = OP_LMAX_UN;
4940 MONO_INST_NEW (cfg, ins, opcode);
4941 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
4942 ins->dreg = mono_alloc_ireg (cfg);
4943 ins->sreg1 = args [0]->dreg;
4944 ins->sreg2 = args [1]->dreg;
4945 MONO_ADD_INS (cfg->cbb, ins);
4953 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4955 if (cmethod->klass == mono_defaults.array_class) {
4956 if (strcmp (cmethod->name, "UnsafeStore") == 0)
4957 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
4958 if (strcmp (cmethod->name, "UnsafeLoad") == 0)
4959 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
4966 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4968 MonoInst *ins = NULL;
4970 static MonoClass *runtime_helpers_class = NULL;
4971 if (! runtime_helpers_class)
4972 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
4973 "System.Runtime.CompilerServices", "RuntimeHelpers");
4975 if (cmethod->klass == mono_defaults.string_class) {
4976 if (strcmp (cmethod->name, "get_Chars") == 0) {
4977 int dreg = alloc_ireg (cfg);
4978 int index_reg = alloc_preg (cfg);
4979 int mult_reg = alloc_preg (cfg);
4980 int add_reg = alloc_preg (cfg);
4982 #if SIZEOF_REGISTER == 8
4983 /* The array reg is 64 bits but the index reg is only 32 */
4984 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
4986 index_reg = args [1]->dreg;
4988 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
4990 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4991 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
4992 add_reg = ins->dreg;
4993 /* Avoid a warning */
4995 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4998 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
4999 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5000 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5001 add_reg, G_STRUCT_OFFSET (MonoString, chars));
5003 type_from_op (ins, NULL, NULL);
5005 } else if (strcmp (cmethod->name, "get_Length") == 0) {
5006 int dreg = alloc_ireg (cfg);
5007 /* Decompose later to allow more optimizations */
5008 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5009 ins->type = STACK_I4;
5010 ins->flags |= MONO_INST_FAULT;
5011 cfg->cbb->has_array_access = TRUE;
5012 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5015 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
5016 int mult_reg = alloc_preg (cfg);
5017 int add_reg = alloc_preg (cfg);
5019 /* The corlib functions check for oob already. */
5020 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
5021 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5022 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
5023 return cfg->cbb->last_ins;
5026 } else if (cmethod->klass == mono_defaults.object_class) {
5028 if (strcmp (cmethod->name, "GetType") == 0) {
5029 int dreg = alloc_ireg_ref (cfg);
5030 int vt_reg = alloc_preg (cfg);
5031 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
5032 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
5033 type_from_op (ins, NULL, NULL);
5036 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
5037 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
5038 int dreg = alloc_ireg (cfg);
5039 int t1 = alloc_ireg (cfg);
5041 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5042 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5043 ins->type = STACK_I4;
5047 } else if (strcmp (cmethod->name, ".ctor") == 0) {
5048 MONO_INST_NEW (cfg, ins, OP_NOP);
5049 MONO_ADD_INS (cfg->cbb, ins);
5053 } else if (cmethod->klass == mono_defaults.array_class) {
5054 if (!cfg->gsharedvt && strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
5055 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
5057 #ifndef MONO_BIG_ARRAYS
5059 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5062 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5063 int dreg = alloc_ireg (cfg);
5064 int bounds_reg = alloc_ireg_mp (cfg);
5065 MonoBasicBlock *end_bb, *szarray_bb;
5066 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5068 NEW_BBLOCK (cfg, end_bb);
5069 NEW_BBLOCK (cfg, szarray_bb);
5071 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5072 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
5073 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5074 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5075 /* Non-szarray case */
5077 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5078 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
5080 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5081 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5082 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5083 MONO_START_BB (cfg, szarray_bb);
5086 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5087 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
5089 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5090 MONO_START_BB (cfg, end_bb);
5092 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5093 ins->type = STACK_I4;
5099 if (cmethod->name [0] != 'g')
5102 if (strcmp (cmethod->name, "get_Rank") == 0) {
5103 int dreg = alloc_ireg (cfg);
5104 int vtable_reg = alloc_preg (cfg);
5105 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5106 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
5107 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5108 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
5109 type_from_op (ins, NULL, NULL);
5112 } else if (strcmp (cmethod->name, "get_Length") == 0) {
5113 int dreg = alloc_ireg (cfg);
5115 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5116 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
5117 type_from_op (ins, NULL, NULL);
5122 } else if (cmethod->klass == runtime_helpers_class) {
5124 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
5125 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
5129 } else if (cmethod->klass == mono_defaults.thread_class) {
5130 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
5131 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5132 MONO_ADD_INS (cfg->cbb, ins);
5134 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
5135 return emit_memory_barrier (cfg, FullBarrier);
5137 } else if (cmethod->klass == mono_defaults.monitor_class) {
5139 /* FIXME this should be integrated to the check below once we support the trampoline version */
5140 #if defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
5141 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) {
5142 MonoMethod *fast_method = NULL;
5144 /* Avoid infinite recursion */
5145 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN && !strcmp (cfg->method->name, "FastMonitorEnterV4"))
5148 fast_method = mono_monitor_get_fast_path (cmethod);
5152 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
5156 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
5157 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
5160 if (COMPILE_LLVM (cfg)) {
5162 * Pass the argument normally, the LLVM backend will handle the
5163 * calling convention problems.
5165 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5167 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
5168 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5169 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5170 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5173 return (MonoInst*)call;
5174 } else if (strcmp (cmethod->name, "Exit") == 0) {
5177 if (COMPILE_LLVM (cfg)) {
5178 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5180 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
5181 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5182 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5183 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5186 return (MonoInst*)call;
5188 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
5190 MonoMethod *fast_method = NULL;
5192 /* Avoid infinite recursion */
5193 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
5194 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
5195 strcmp (cfg->method->name, "FastMonitorExit") == 0))
5198 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) ||
5199 strcmp (cmethod->name, "Exit") == 0)
5200 fast_method = mono_monitor_get_fast_path (cmethod);
5204 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
5207 } else if (cmethod->klass->image == mono_defaults.corlib &&
5208 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5209 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
5212 #if SIZEOF_REGISTER == 8
5213 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
5214 /* 64 bit reads are already atomic */
5215 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
5216 ins->dreg = mono_alloc_preg (cfg);
5217 ins->inst_basereg = args [0]->dreg;
5218 ins->inst_offset = 0;
5219 MONO_ADD_INS (cfg->cbb, ins);
5223 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
5224 if (strcmp (cmethod->name, "Increment") == 0) {
5225 MonoInst *ins_iconst;
5228 if (fsig->params [0]->type == MONO_TYPE_I4)
5229 opcode = OP_ATOMIC_ADD_NEW_I4;
5230 #if SIZEOF_REGISTER == 8
5231 else if (fsig->params [0]->type == MONO_TYPE_I8)
5232 opcode = OP_ATOMIC_ADD_NEW_I8;
5235 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5236 ins_iconst->inst_c0 = 1;
5237 ins_iconst->dreg = mono_alloc_ireg (cfg);
5238 MONO_ADD_INS (cfg->cbb, ins_iconst);
5240 MONO_INST_NEW (cfg, ins, opcode);
5241 ins->dreg = mono_alloc_ireg (cfg);
5242 ins->inst_basereg = args [0]->dreg;
5243 ins->inst_offset = 0;
5244 ins->sreg2 = ins_iconst->dreg;
5245 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5246 MONO_ADD_INS (cfg->cbb, ins);
5248 } else if (strcmp (cmethod->name, "Decrement") == 0) {
5249 MonoInst *ins_iconst;
5252 if (fsig->params [0]->type == MONO_TYPE_I4)
5253 opcode = OP_ATOMIC_ADD_NEW_I4;
5254 #if SIZEOF_REGISTER == 8
5255 else if (fsig->params [0]->type == MONO_TYPE_I8)
5256 opcode = OP_ATOMIC_ADD_NEW_I8;
5259 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5260 ins_iconst->inst_c0 = -1;
5261 ins_iconst->dreg = mono_alloc_ireg (cfg);
5262 MONO_ADD_INS (cfg->cbb, ins_iconst);
5264 MONO_INST_NEW (cfg, ins, opcode);
5265 ins->dreg = mono_alloc_ireg (cfg);
5266 ins->inst_basereg = args [0]->dreg;
5267 ins->inst_offset = 0;
5268 ins->sreg2 = ins_iconst->dreg;
5269 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5270 MONO_ADD_INS (cfg->cbb, ins);
5272 } else if (strcmp (cmethod->name, "Add") == 0) {
5275 if (fsig->params [0]->type == MONO_TYPE_I4)
5276 opcode = OP_ATOMIC_ADD_NEW_I4;
5277 #if SIZEOF_REGISTER == 8
5278 else if (fsig->params [0]->type == MONO_TYPE_I8)
5279 opcode = OP_ATOMIC_ADD_NEW_I8;
5283 MONO_INST_NEW (cfg, ins, opcode);
5284 ins->dreg = mono_alloc_ireg (cfg);
5285 ins->inst_basereg = args [0]->dreg;
5286 ins->inst_offset = 0;
5287 ins->sreg2 = args [1]->dreg;
5288 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5289 MONO_ADD_INS (cfg->cbb, ins);
5292 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
5294 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
5295 if (strcmp (cmethod->name, "Exchange") == 0) {
5297 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
5299 if (fsig->params [0]->type == MONO_TYPE_I4)
5300 opcode = OP_ATOMIC_EXCHANGE_I4;
5301 #if SIZEOF_REGISTER == 8
5302 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
5303 (fsig->params [0]->type == MONO_TYPE_I))
5304 opcode = OP_ATOMIC_EXCHANGE_I8;
5306 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
5307 opcode = OP_ATOMIC_EXCHANGE_I4;
5312 MONO_INST_NEW (cfg, ins, opcode);
5313 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
5314 ins->inst_basereg = args [0]->dreg;
5315 ins->inst_offset = 0;
5316 ins->sreg2 = args [1]->dreg;
5317 MONO_ADD_INS (cfg->cbb, ins);
5319 switch (fsig->params [0]->type) {
5321 ins->type = STACK_I4;
5325 ins->type = STACK_I8;
5327 case MONO_TYPE_OBJECT:
5328 ins->type = STACK_OBJ;
5331 g_assert_not_reached ();
5334 if (cfg->gen_write_barriers && is_ref)
5335 emit_write_barrier (cfg, args [0], args [1]);
5337 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
5339 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
5340 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
5342 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
5343 if (fsig->params [1]->type == MONO_TYPE_I4)
5345 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
5346 size = sizeof (gpointer);
5347 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
5350 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
5351 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5352 ins->sreg1 = args [0]->dreg;
5353 ins->sreg2 = args [1]->dreg;
5354 ins->sreg3 = args [2]->dreg;
5355 ins->type = STACK_I4;
5356 MONO_ADD_INS (cfg->cbb, ins);
5357 } else if (size == 8) {
5358 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
5359 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5360 ins->sreg1 = args [0]->dreg;
5361 ins->sreg2 = args [1]->dreg;
5362 ins->sreg3 = args [2]->dreg;
5363 ins->type = STACK_I8;
5364 MONO_ADD_INS (cfg->cbb, ins);
5366 /* g_assert_not_reached (); */
5368 if (cfg->gen_write_barriers && is_ref)
5369 emit_write_barrier (cfg, args [0], args [1]);
5371 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
5373 if (strcmp (cmethod->name, "MemoryBarrier") == 0)
5374 ins = emit_memory_barrier (cfg, FullBarrier);
5378 } else if (cmethod->klass->image == mono_defaults.corlib) {
5379 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
5380 && strcmp (cmethod->klass->name, "Debugger") == 0) {
5381 if (should_insert_brekpoint (cfg->method)) {
5382 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
5384 MONO_INST_NEW (cfg, ins, OP_NOP);
5385 MONO_ADD_INS (cfg->cbb, ins);
5389 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
5390 && strcmp (cmethod->klass->name, "Environment") == 0) {
5392 EMIT_NEW_ICONST (cfg, ins, 1);
5394 EMIT_NEW_ICONST (cfg, ins, 0);
5398 } else if (cmethod->klass == mono_defaults.math_class) {
5400 * There is general branches code for Min/Max, but it does not work for
5402 * http://everything2.com/?node_id=1051618
5404 } else if ((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") || !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) && !strcmp (cmethod->klass->name, "Selector") && !strcmp (cmethod->name, "GetHandle") && cfg->compile_aot && (args [0]->opcode == OP_GOT_ENTRY || args[0]->opcode == OP_AOTCONST)) {
5405 #ifdef MONO_ARCH_HAVE_OBJC_GET_SELECTOR
5407 MonoJumpInfoToken *ji;
5410 cfg->disable_llvm = TRUE;
5412 if (args [0]->opcode == OP_GOT_ENTRY) {
5413 pi = args [0]->inst_p1;
5414 g_assert (pi->opcode == OP_PATCH_INFO);
5415 g_assert ((int)pi->inst_p1 == MONO_PATCH_INFO_LDSTR);
5418 g_assert ((int)args [0]->inst_p1 == MONO_PATCH_INFO_LDSTR);
5419 ji = args [0]->inst_p0;
5422 NULLIFY_INS (args [0]);
5425 s = mono_ldstr (cfg->domain, ji->image, mono_metadata_token_index (ji->token));
5426 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
5427 ins->dreg = mono_alloc_ireg (cfg);
5429 ins->inst_p0 = mono_string_to_utf8 (s);
5430 MONO_ADD_INS (cfg->cbb, ins);
5435 #ifdef MONO_ARCH_SIMD_INTRINSICS
5436 if (cfg->opt & MONO_OPT_SIMD) {
5437 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5443 if (COMPILE_LLVM (cfg)) {
5444 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
5449 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
5453 * This entry point could be used later for arbitrary method
5456 inline static MonoInst*
5457 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
5458 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
5460 if (method->klass == mono_defaults.string_class) {
5461 /* managed string allocation support */
5462 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
5463 MonoInst *iargs [2];
5464 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
5465 MonoMethod *managed_alloc = NULL;
5467 g_assert (vtable); /*Should not fail since it System.String*/
5468 #ifndef MONO_CROSS_COMPILE
5469 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE);
5473 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
5474 iargs [1] = args [0];
5475 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
5482 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
5484 MonoInst *store, *temp;
5487 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
5488 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
5491 * FIXME: We should use *args++ = sp [0], but that would mean the arg
5492 * would be different than the MonoInst's used to represent arguments, and
5493 * the ldelema implementation can't deal with that.
5494 * Solution: When ldelema is used on an inline argument, create a var for
5495 * it, emit ldelema on that var, and emit the saving code below in
5496 * inline_method () if needed.
5498 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
5499 cfg->args [i] = temp;
5500 /* This uses cfg->args [i] which is set by the preceeding line */
5501 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
5502 store->cil_code = sp [0]->cil_code;
5507 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
5508 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
5510 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5512 check_inline_called_method_name_limit (MonoMethod *called_method)
5515 static const char *limit = NULL;
5517 if (limit == NULL) {
5518 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
5520 if (limit_string != NULL)
5521 limit = limit_string;
5526 if (limit [0] != '\0') {
5527 char *called_method_name = mono_method_full_name (called_method, TRUE);
5529 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
5530 g_free (called_method_name);
5532 //return (strncmp_result <= 0);
5533 return (strncmp_result == 0);
5540 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5542 check_inline_caller_method_name_limit (MonoMethod *caller_method)
5545 static const char *limit = NULL;
5547 if (limit == NULL) {
5548 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
5549 if (limit_string != NULL) {
5550 limit = limit_string;
5556 if (limit [0] != '\0') {
5557 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
5559 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
5560 g_free (caller_method_name);
5562 //return (strncmp_result <= 0);
5563 return (strncmp_result == 0);
5571 emit_init_rvar (MonoCompile *cfg, MonoInst *rvar, MonoType *rtype)
5573 static double r8_0 = 0.0;
5576 switch (rvar->type) {
5578 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
5581 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
5586 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
5589 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5590 ins->type = STACK_R8;
5591 ins->inst_p0 = (void*)&r8_0;
5592 ins->dreg = rvar->dreg;
5593 MONO_ADD_INS (cfg->cbb, ins);
5596 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (rtype));
5599 g_assert_not_reached ();
5604 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
5605 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_always)
5607 MonoInst *ins, *rvar = NULL;
5608 MonoMethodHeader *cheader;
5609 MonoBasicBlock *ebblock, *sbblock;
5611 MonoMethod *prev_inlined_method;
5612 MonoInst **prev_locals, **prev_args;
5613 MonoType **prev_arg_types;
5614 guint prev_real_offset;
5615 GHashTable *prev_cbb_hash;
5616 MonoBasicBlock **prev_cil_offset_to_bb;
5617 MonoBasicBlock *prev_cbb;
5618 unsigned char* prev_cil_start;
5619 guint32 prev_cil_offset_to_bb_len;
5620 MonoMethod *prev_current_method;
5621 MonoGenericContext *prev_generic_context;
5622 gboolean ret_var_set, prev_ret_var_set, virtual = FALSE;
5624 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
5626 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5627 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
5630 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5631 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
5635 if (cfg->verbose_level > 2)
5636 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
5638 if (!cmethod->inline_info) {
5639 cfg->stat_inlineable_methods++;
5640 cmethod->inline_info = 1;
5643 /* allocate local variables */
5644 cheader = mono_method_get_header (cmethod);
5646 if (cheader == NULL || mono_loader_get_last_error ()) {
5647 MonoLoaderError *error = mono_loader_get_last_error ();
5650 mono_metadata_free_mh (cheader);
5651 if (inline_always && error)
5652 mono_cfg_set_exception (cfg, error->exception_type);
5654 mono_loader_clear_error ();
5658 /*Must verify before creating locals as it can cause the JIT to assert.*/
5659 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
5660 mono_metadata_free_mh (cheader);
5664 /* allocate space to store the return value */
5665 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
5666 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
5669 prev_locals = cfg->locals;
5670 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
5671 for (i = 0; i < cheader->num_locals; ++i)
5672 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
5674 /* allocate start and end blocks */
5675 /* This is needed so if the inline is aborted, we can clean up */
5676 NEW_BBLOCK (cfg, sbblock);
5677 sbblock->real_offset = real_offset;
5679 NEW_BBLOCK (cfg, ebblock);
5680 ebblock->block_num = cfg->num_bblocks++;
5681 ebblock->real_offset = real_offset;
5683 prev_args = cfg->args;
5684 prev_arg_types = cfg->arg_types;
5685 prev_inlined_method = cfg->inlined_method;
5686 cfg->inlined_method = cmethod;
5687 cfg->ret_var_set = FALSE;
5688 cfg->inline_depth ++;
5689 prev_real_offset = cfg->real_offset;
5690 prev_cbb_hash = cfg->cbb_hash;
5691 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
5692 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
5693 prev_cil_start = cfg->cil_start;
5694 prev_cbb = cfg->cbb;
5695 prev_current_method = cfg->current_method;
5696 prev_generic_context = cfg->generic_context;
5697 prev_ret_var_set = cfg->ret_var_set;
5699 if (*ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
5702 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, virtual);
5704 ret_var_set = cfg->ret_var_set;
5706 cfg->inlined_method = prev_inlined_method;
5707 cfg->real_offset = prev_real_offset;
5708 cfg->cbb_hash = prev_cbb_hash;
5709 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
5710 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
5711 cfg->cil_start = prev_cil_start;
5712 cfg->locals = prev_locals;
5713 cfg->args = prev_args;
5714 cfg->arg_types = prev_arg_types;
5715 cfg->current_method = prev_current_method;
5716 cfg->generic_context = prev_generic_context;
5717 cfg->ret_var_set = prev_ret_var_set;
5718 cfg->inline_depth --;
5720 if ((costs >= 0 && costs < 60) || inline_always) {
5721 if (cfg->verbose_level > 2)
5722 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
5724 cfg->stat_inlined_methods++;
5726 /* always add some code to avoid block split failures */
5727 MONO_INST_NEW (cfg, ins, OP_NOP);
5728 MONO_ADD_INS (prev_cbb, ins);
5730 prev_cbb->next_bb = sbblock;
5731 link_bblock (cfg, prev_cbb, sbblock);
5734 * Get rid of the begin and end bblocks if possible to aid local
5737 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
5739 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
5740 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
5742 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
5743 MonoBasicBlock *prev = ebblock->in_bb [0];
5744 mono_merge_basic_blocks (cfg, prev, ebblock);
5746 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
5747 mono_merge_basic_blocks (cfg, prev_cbb, prev);
5748 cfg->cbb = prev_cbb;
5752 * Its possible that the rvar is set in some prev bblock, but not in others.
5758 for (i = 0; i < ebblock->in_count; ++i) {
5759 bb = ebblock->in_bb [i];
5761 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
5764 emit_init_rvar (cfg, rvar, fsig->ret);
5774 * If the inlined method contains only a throw, then the ret var is not
5775 * set, so set it to a dummy value.
5778 emit_init_rvar (cfg, rvar, fsig->ret);
5780 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
5783 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5786 if (cfg->verbose_level > 2)
5787 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
5788 cfg->exception_type = MONO_EXCEPTION_NONE;
5789 mono_loader_clear_error ();
5791 /* This gets rid of the newly added bblocks */
5792 cfg->cbb = prev_cbb;
5794 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5799 * Some of these comments may well be out-of-date.
5800 * Design decisions: we do a single pass over the IL code (and we do bblock
5801 * splitting/merging in the few cases when it's required: a back jump to an IL
5802 * address that was not already seen as bblock starting point).
5803 * Code is validated as we go (full verification is still better left to metadata/verify.c).
5804 * Complex operations are decomposed in simpler ones right away. We need to let the
5805 * arch-specific code peek and poke inside this process somehow (except when the
5806 * optimizations can take advantage of the full semantic info of coarse opcodes).
5807 * All the opcodes of the form opcode.s are 'normalized' to opcode.
5808 * MonoInst->opcode initially is the IL opcode or some simplification of that
5809 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
5810 * opcode with value bigger than OP_LAST.
5811 * At this point the IR can be handed over to an interpreter, a dumb code generator
5812 * or to the optimizing code generator that will translate it to SSA form.
5814 * Profiling directed optimizations.
5815 * We may compile by default with few or no optimizations and instrument the code
5816 * or the user may indicate what methods to optimize the most either in a config file
5817 * or through repeated runs where the compiler applies offline the optimizations to
5818 * each method and then decides if it was worth it.
5821 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
5822 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
5823 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
5824 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
5825 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
5826 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
5827 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
5828 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; LOAD_ERROR;}
5830 /* offset from br.s -> br like opcodes */
5831 #define BIG_BRANCH_OFFSET 13
5834 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
5836 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
5838 return b == NULL || b == bb;
5842 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
5844 unsigned char *ip = start;
5845 unsigned char *target;
5848 MonoBasicBlock *bblock;
5849 const MonoOpcode *opcode;
5852 cli_addr = ip - start;
5853 i = mono_opcode_value ((const guint8 **)&ip, end);
5856 opcode = &mono_opcodes [i];
5857 switch (opcode->argument) {
5858 case MonoInlineNone:
5861 case MonoInlineString:
5862 case MonoInlineType:
5863 case MonoInlineField:
5864 case MonoInlineMethod:
5867 case MonoShortInlineR:
5874 case MonoShortInlineVar:
5875 case MonoShortInlineI:
5878 case MonoShortInlineBrTarget:
5879 target = start + cli_addr + 2 + (signed char)ip [1];
5880 GET_BBLOCK (cfg, bblock, target);
5883 GET_BBLOCK (cfg, bblock, ip);
5885 case MonoInlineBrTarget:
5886 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
5887 GET_BBLOCK (cfg, bblock, target);
5890 GET_BBLOCK (cfg, bblock, ip);
5892 case MonoInlineSwitch: {
5893 guint32 n = read32 (ip + 1);
5896 cli_addr += 5 + 4 * n;
5897 target = start + cli_addr;
5898 GET_BBLOCK (cfg, bblock, target);
5900 for (j = 0; j < n; ++j) {
5901 target = start + cli_addr + (gint32)read32 (ip);
5902 GET_BBLOCK (cfg, bblock, target);
5912 g_assert_not_reached ();
5915 if (i == CEE_THROW) {
5916 unsigned char *bb_start = ip - 1;
5918 /* Find the start of the bblock containing the throw */
5920 while ((bb_start >= start) && !bblock) {
5921 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
5925 bblock->out_of_line = 1;
5935 static inline MonoMethod *
5936 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5940 if (m->wrapper_type != MONO_WRAPPER_NONE) {
5941 method = mono_method_get_wrapper_data (m, token);
5943 method = mono_class_inflate_generic_method (method, context);
5945 method = mono_get_method_full (m->klass->image, token, klass, context);
5951 static inline MonoMethod *
5952 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5954 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
5956 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
5962 static inline MonoClass*
5963 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
5967 if (method->wrapper_type != MONO_WRAPPER_NONE) {
5968 klass = mono_method_get_wrapper_data (method, token);
5970 klass = mono_class_inflate_generic_class (klass, context);
5972 klass = mono_class_get_full (method->klass->image, token, context);
5975 mono_class_init (klass);
5979 static inline MonoMethodSignature*
5980 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
5982 MonoMethodSignature *fsig;
5984 if (method->wrapper_type != MONO_WRAPPER_NONE) {
5987 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
5989 fsig = mono_inflate_generic_signature (fsig, context, &error);
5991 g_assert (mono_error_ok (&error));
5994 fsig = mono_metadata_parse_signature (method->klass->image, token);
6000 * Returns TRUE if the JIT should abort inlining because "callee"
6001 * is influenced by security attributes.
6004 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
6008 if ((cfg->method != caller) && mono_security_method_has_declsec (callee)) {
6012 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
6013 if (result == MONO_JIT_SECURITY_OK)
6016 if (result == MONO_JIT_LINKDEMAND_ECMA) {
6017 /* Generate code to throw a SecurityException before the actual call/link */
6018 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6021 NEW_ICONST (cfg, args [0], 4);
6022 NEW_METHODCONST (cfg, args [1], caller);
6023 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
6024 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
6025 /* don't hide previous results */
6026 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
6027 cfg->exception_data = result;
6035 throw_exception (void)
6037 static MonoMethod *method = NULL;
6040 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6041 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
6048 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
6050 MonoMethod *thrower = throw_exception ();
6053 EMIT_NEW_PCONST (cfg, args [0], ex);
6054 mono_emit_method_call (cfg, thrower, args, NULL);
6058 * Return the original method is a wrapper is specified. We can only access
6059 * the custom attributes from the original method.
6062 get_original_method (MonoMethod *method)
6064 if (method->wrapper_type == MONO_WRAPPER_NONE)
6067 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
6068 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
6071 /* in other cases we need to find the original method */
6072 return mono_marshal_method_from_wrapper (method);
6076 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
6077 MonoBasicBlock *bblock, unsigned char *ip)
6079 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6080 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
6082 emit_throw_exception (cfg, ex);
6086 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
6087 MonoBasicBlock *bblock, unsigned char *ip)
6089 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6090 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
6092 emit_throw_exception (cfg, ex);
6096 * Check that the IL instructions at ip are the array initialization
6097 * sequence and return the pointer to the data and the size.
6100 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
6103 * newarr[System.Int32]
6105 * ldtoken field valuetype ...
6106 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
6108 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
6109 guint32 token = read32 (ip + 7);
6110 guint32 field_token = read32 (ip + 2);
6111 guint32 field_index = field_token & 0xffffff;
6113 const char *data_ptr;
6115 MonoMethod *cmethod;
6116 MonoClass *dummy_class;
6117 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
6123 *out_field_token = field_token;
6125 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
6128 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
6130 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
6131 case MONO_TYPE_BOOLEAN:
6135 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
6136 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
6137 case MONO_TYPE_CHAR:
6147 return NULL; /* stupid ARM FP swapped format */
6157 if (size > mono_type_size (field->type, &dummy_align))
6160 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
6161 if (!method->klass->image->dynamic) {
6162 field_index = read32 (ip + 2) & 0xffffff;
6163 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
6164 data_ptr = mono_image_rva_map (method->klass->image, rva);
6165 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
6166 /* for aot code we do the lookup on load */
6167 if (aot && data_ptr)
6168 return GUINT_TO_POINTER (rva);
6170 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
6172 data_ptr = mono_field_get_data (field);
6180 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
6182 char *method_fname = mono_method_full_name (method, TRUE);
6184 MonoMethodHeader *header = mono_method_get_header (method);
6186 if (header->code_size == 0)
6187 method_code = g_strdup ("method body is empty.");
6189 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
6190 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
6191 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
6192 g_free (method_fname);
6193 g_free (method_code);
6194 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
6198 set_exception_object (MonoCompile *cfg, MonoException *exception)
6200 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
6201 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
6202 cfg->exception_ptr = exception;
6206 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
6209 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
6210 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
6211 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
6212 /* Optimize reg-reg moves away */
6214 * Can't optimize other opcodes, since sp[0] might point to
6215 * the last ins of a decomposed opcode.
6217 sp [0]->dreg = (cfg)->locals [n]->dreg;
6219 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
6224 * ldloca inhibits many optimizations so try to get rid of it in common
6227 static inline unsigned char *
6228 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
6237 local = read16 (ip + 2);
6241 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
6242 gboolean skip = FALSE;
6244 /* From the INITOBJ case */
6245 token = read32 (ip + 2);
6246 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
6247 CHECK_TYPELOAD (klass);
6248 if (mini_type_is_reference (cfg, &klass->byval_arg)) {
6249 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
6250 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
6251 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
6264 is_exception_class (MonoClass *class)
6267 if (class == mono_defaults.exception_class)
6269 class = class->parent;
6275 * is_jit_optimizer_disabled:
6277 * Determine whenever M's assembly has a DebuggableAttribute with the
6278 * IsJITOptimizerDisabled flag set.
6281 is_jit_optimizer_disabled (MonoMethod *m)
6283 MonoAssembly *ass = m->klass->image->assembly;
6284 MonoCustomAttrInfo* attrs;
6285 static MonoClass *klass;
6287 gboolean val = FALSE;
6290 if (ass->jit_optimizer_disabled_inited)
6291 return ass->jit_optimizer_disabled;
6294 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
6297 ass->jit_optimizer_disabled = FALSE;
6298 mono_memory_barrier ();
6299 ass->jit_optimizer_disabled_inited = TRUE;
6303 attrs = mono_custom_attrs_from_assembly (ass);
6305 for (i = 0; i < attrs->num_attrs; ++i) {
6306 MonoCustomAttrEntry *attr = &attrs->attrs [i];
6309 MonoMethodSignature *sig;
6311 if (!attr->ctor || attr->ctor->klass != klass)
6313 /* Decode the attribute. See reflection.c */
6314 len = attr->data_size;
6315 p = (const char*)attr->data;
6316 g_assert (read16 (p) == 0x0001);
6319 // FIXME: Support named parameters
6320 sig = mono_method_signature (attr->ctor);
6321 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
6323 /* Two boolean arguments */
6327 mono_custom_attrs_free (attrs);
6330 ass->jit_optimizer_disabled = val;
6331 mono_memory_barrier ();
6332 ass->jit_optimizer_disabled_inited = TRUE;
6338 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig)
6340 gboolean supported_tail_call;
6343 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6344 supported_tail_call = MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
6346 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6349 for (i = 0; i < fsig->param_count; ++i) {
6350 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
6351 /* These can point to the current method's stack */
6352 supported_tail_call = FALSE;
6354 if (fsig->hasthis && cmethod->klass->valuetype)
6355 /* this might point to the current method's stack */
6356 supported_tail_call = FALSE;
6357 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
6358 supported_tail_call = FALSE;
6359 if (cfg->method->save_lmf)
6360 supported_tail_call = FALSE;
6361 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
6362 supported_tail_call = FALSE;
6364 /* Debugging support */
6366 if (supported_tail_call) {
6367 if (!mono_debug_count ())
6368 supported_tail_call = FALSE;
6372 return supported_tail_call;
6375 /* the JIT intercepts ldflda instructions to the tlsdata field in ThreadLocal<T> and redirects
6376 * it to the thread local value based on the tls_offset field. Every other kind of access to
6377 * the field causes an assert.
6380 is_magic_tls_access (MonoClassField *field)
6382 if (strcmp (field->name, "tlsdata"))
6384 if (strcmp (field->parent->name, "ThreadLocal`1"))
6386 return field->parent->image == mono_defaults.corlib;
6389 /* emits the code needed to access a managed tls var (like ThreadStatic)
6390 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
6391 * pointer for the current thread.
6392 * Returns the MonoInst* representing the address of the tls var.
6395 emit_managed_static_data_access (MonoCompile *cfg, MonoInst *thread_ins, int offset_reg)
6398 int static_data_reg, array_reg, dreg;
6399 int offset2_reg, idx_reg;
6400 // inlined access to the tls data
6401 // idx = (offset >> 24) - 1;
6402 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
6403 static_data_reg = alloc_ireg (cfg);
6404 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
6405 idx_reg = alloc_ireg (cfg);
6406 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
6407 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
6408 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
6409 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
6410 array_reg = alloc_ireg (cfg);
6411 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
6412 offset2_reg = alloc_ireg (cfg);
6413 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
6414 dreg = alloc_ireg (cfg);
6415 EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
6420 * redirect access to the tlsdata field to the tls var given by the tls_offset field.
6421 * this address is cached per-method in cached_tls_addr.
6424 create_magic_tls_access (MonoCompile *cfg, MonoClassField *tls_field, MonoInst **cached_tls_addr, MonoInst *thread_local)
6426 MonoInst *load, *addr, *temp, *store, *thread_ins;
6427 MonoClassField *offset_field;
6429 if (*cached_tls_addr) {
6430 EMIT_NEW_TEMPLOAD (cfg, addr, (*cached_tls_addr)->inst_c0);
6433 thread_ins = mono_get_thread_intrinsic (cfg);
6434 offset_field = mono_class_get_field_from_name (tls_field->parent, "tls_offset");
6436 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, offset_field->type, thread_local->dreg, offset_field->offset);
6438 MONO_ADD_INS (cfg->cbb, thread_ins);
6440 MonoMethod *thread_method;
6441 thread_method = mono_class_get_method_from_name (mono_get_thread_class(), "CurrentInternalThread_internal", 0);
6442 thread_ins = mono_emit_method_call (cfg, thread_method, NULL, NULL);
6444 addr = emit_managed_static_data_access (cfg, thread_ins, load->dreg);
6445 addr->klass = mono_class_from_mono_type (tls_field->type);
6446 addr->type = STACK_MP;
6447 *cached_tls_addr = temp = mono_compile_create_var (cfg, type_from_stack_type (addr), OP_LOCAL);
6448 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, addr);
6450 EMIT_NEW_TEMPLOAD (cfg, addr, temp->inst_c0);
6455 * mono_method_to_ir:
6457 * Translate the .net IL into linear IR.
6460 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
6461 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
6462 guint inline_offset, gboolean is_virtual_call)
6465 MonoInst *ins, **sp, **stack_start;
6466 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
6467 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
6468 MonoMethod *cmethod, *method_definition;
6469 MonoInst **arg_array;
6470 MonoMethodHeader *header;
6472 guint32 token, ins_flag;
6474 MonoClass *constrained_call = NULL;
6475 unsigned char *ip, *end, *target, *err_pos;
6476 static double r8_0 = 0.0;
6477 MonoMethodSignature *sig;
6478 MonoGenericContext *generic_context = NULL;
6479 MonoGenericContainer *generic_container = NULL;
6480 MonoType **param_types;
6481 int i, n, start_new_bblock, dreg;
6482 int num_calls = 0, inline_costs = 0;
6483 int breakpoint_id = 0;
6485 MonoBoolean security, pinvoke;
6486 MonoSecurityManager* secman = NULL;
6487 MonoDeclSecurityActions actions;
6488 GSList *class_inits = NULL;
6489 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
6491 gboolean init_locals, seq_points, skip_dead_blocks;
6492 gboolean disable_inline, sym_seq_points = FALSE;
6493 MonoInst *cached_tls_addr = NULL;
6494 MonoDebugMethodInfo *minfo;
6495 MonoBitSet *seq_point_locs = NULL;
6496 MonoBitSet *seq_point_set_locs = NULL;
6498 disable_inline = is_jit_optimizer_disabled (method);
6500 /* serialization and xdomain stuff may need access to private fields and methods */
6501 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
6502 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
6503 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
6504 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
6505 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
6506 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
6508 dont_verify |= mono_security_smcs_hack_enabled ();
6510 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
6511 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
6512 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
6513 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
6514 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
6516 image = method->klass->image;
6517 header = mono_method_get_header (method);
6519 MonoLoaderError *error;
6521 if ((error = mono_loader_get_last_error ())) {
6522 mono_cfg_set_exception (cfg, error->exception_type);
6524 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
6525 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
6527 goto exception_exit;
6529 generic_container = mono_method_get_generic_container (method);
6530 sig = mono_method_signature (method);
6531 num_args = sig->hasthis + sig->param_count;
6532 ip = (unsigned char*)header->code;
6533 cfg->cil_start = ip;
6534 end = ip + header->code_size;
6535 cfg->stat_cil_code_size += header->code_size;
6536 init_locals = header->init_locals;
6538 seq_points = cfg->gen_seq_points && cfg->method == method;
6539 #ifdef PLATFORM_ANDROID
6540 seq_points &= cfg->method->wrapper_type == MONO_WRAPPER_NONE;
6543 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
6544 /* We could hit a seq point before attaching to the JIT (#8338) */
6548 if (cfg->gen_seq_points && cfg->method == method) {
6549 minfo = mono_debug_lookup_method (method);
6551 int i, n_il_offsets;
6555 mono_debug_symfile_get_line_numbers_full (minfo, NULL, NULL, &n_il_offsets, &il_offsets, &line_numbers, NULL, NULL);
6556 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6557 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6558 sym_seq_points = TRUE;
6559 for (i = 0; i < n_il_offsets; ++i) {
6560 if (il_offsets [i] < header->code_size)
6561 mono_bitset_set_fast (seq_point_locs, il_offsets [i]);
6563 g_free (il_offsets);
6564 g_free (line_numbers);
6569 * Methods without init_locals set could cause asserts in various passes
6574 method_definition = method;
6575 while (method_definition->is_inflated) {
6576 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
6577 method_definition = imethod->declaring;
6580 /* SkipVerification is not allowed if core-clr is enabled */
6581 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
6583 dont_verify_stloc = TRUE;
6586 if (mono_debug_using_mono_debugger ())
6587 cfg->keep_cil_nops = TRUE;
6589 if (sig->is_inflated)
6590 generic_context = mono_method_get_context (method);
6591 else if (generic_container)
6592 generic_context = &generic_container->context;
6593 cfg->generic_context = generic_context;
6595 if (!cfg->generic_sharing_context)
6596 g_assert (!sig->has_type_parameters);
6598 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
6599 g_assert (method->is_inflated);
6600 g_assert (mono_method_get_context (method)->method_inst);
6602 if (method->is_inflated && mono_method_get_context (method)->method_inst)
6603 g_assert (sig->generic_param_count);
6605 if (cfg->method == method) {
6606 cfg->real_offset = 0;
6608 cfg->real_offset = inline_offset;
6611 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
6612 cfg->cil_offset_to_bb_len = header->code_size;
6614 cfg->current_method = method;
6616 if (cfg->verbose_level > 2)
6617 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
6619 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
6621 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
6622 for (n = 0; n < sig->param_count; ++n)
6623 param_types [n + sig->hasthis] = sig->params [n];
6624 cfg->arg_types = param_types;
6626 dont_inline = g_list_prepend (dont_inline, method);
6627 if (cfg->method == method) {
6629 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
6630 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
6633 NEW_BBLOCK (cfg, start_bblock);
6634 cfg->bb_entry = start_bblock;
6635 start_bblock->cil_code = NULL;
6636 start_bblock->cil_length = 0;
6637 #if defined(__native_client_codegen__)
6638 MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
6639 ins->dreg = alloc_dreg (cfg, STACK_I4);
6640 MONO_ADD_INS (start_bblock, ins);
6644 NEW_BBLOCK (cfg, end_bblock);
6645 cfg->bb_exit = end_bblock;
6646 end_bblock->cil_code = NULL;
6647 end_bblock->cil_length = 0;
6648 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
6649 g_assert (cfg->num_bblocks == 2);
6651 arg_array = cfg->args;
6653 if (header->num_clauses) {
6654 cfg->spvars = g_hash_table_new (NULL, NULL);
6655 cfg->exvars = g_hash_table_new (NULL, NULL);
6657 /* handle exception clauses */
6658 for (i = 0; i < header->num_clauses; ++i) {
6659 MonoBasicBlock *try_bb;
6660 MonoExceptionClause *clause = &header->clauses [i];
6661 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
6662 try_bb->real_offset = clause->try_offset;
6663 try_bb->try_start = TRUE;
6664 try_bb->region = ((i + 1) << 8) | clause->flags;
6665 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
6666 tblock->real_offset = clause->handler_offset;
6667 tblock->flags |= BB_EXCEPTION_HANDLER;
6669 link_bblock (cfg, try_bb, tblock);
6671 if (*(ip + clause->handler_offset) == CEE_POP)
6672 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
6674 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
6675 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
6676 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
6677 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
6678 MONO_ADD_INS (tblock, ins);
6680 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) {
6681 /* finally clauses already have a seq point */
6682 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
6683 MONO_ADD_INS (tblock, ins);
6686 /* todo: is a fault block unsafe to optimize? */
6687 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
6688 tblock->flags |= BB_EXCEPTION_UNSAFE;
6692 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
6694 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
6696 /* catch and filter blocks get the exception object on the stack */
6697 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
6698 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
6699 MonoInst *dummy_use;
6701 /* mostly like handle_stack_args (), but just sets the input args */
6702 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
6703 tblock->in_scount = 1;
6704 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
6705 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
6708 * Add a dummy use for the exvar so its liveness info will be
6712 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
6714 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
6715 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
6716 tblock->flags |= BB_EXCEPTION_HANDLER;
6717 tblock->real_offset = clause->data.filter_offset;
6718 tblock->in_scount = 1;
6719 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
6720 /* The filter block shares the exvar with the handler block */
6721 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
6722 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
6723 MONO_ADD_INS (tblock, ins);
6727 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
6728 clause->data.catch_class &&
6729 cfg->generic_sharing_context &&
6730 mono_class_check_context_used (clause->data.catch_class)) {
6732 * In shared generic code with catch
6733 * clauses containing type variables
6734 * the exception handling code has to
6735 * be able to get to the rgctx.
6736 * Therefore we have to make sure that
6737 * the vtable/mrgctx argument (for
6738 * static or generic methods) or the
6739 * "this" argument (for non-static
6740 * methods) are live.
6742 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
6743 mini_method_get_context (method)->method_inst ||
6744 method->klass->valuetype) {
6745 mono_get_vtable_var (cfg);
6747 MonoInst *dummy_use;
6749 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
6754 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
6755 cfg->cbb = start_bblock;
6756 cfg->args = arg_array;
6757 mono_save_args (cfg, sig, inline_args);
6760 /* FIRST CODE BLOCK */
6761 NEW_BBLOCK (cfg, bblock);
6762 bblock->cil_code = ip;
6766 ADD_BBLOCK (cfg, bblock);
6768 if (cfg->method == method) {
6769 breakpoint_id = mono_debugger_method_has_breakpoint (method);
6770 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
6771 MONO_INST_NEW (cfg, ins, OP_BREAK);
6772 MONO_ADD_INS (bblock, ins);
6776 if (mono_security_cas_enabled ())
6777 secman = mono_security_manager_get_methods ();
6779 security = (secman && mono_security_method_has_declsec (method));
6780 /* at this point having security doesn't mean we have any code to generate */
6781 if (security && (cfg->method == method)) {
6782 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
6783 * And we do not want to enter the next section (with allocation) if we
6784 * have nothing to generate */
6785 security = mono_declsec_get_demands (method, &actions);
6788 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
6789 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
6791 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
6792 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6793 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
6795 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
6796 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
6800 mono_custom_attrs_free (custom);
6803 custom = mono_custom_attrs_from_class (wrapped->klass);
6804 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
6808 mono_custom_attrs_free (custom);
6811 /* not a P/Invoke after all */
6816 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
6817 /* we use a separate basic block for the initialization code */
6818 NEW_BBLOCK (cfg, init_localsbb);
6819 cfg->bb_init = init_localsbb;
6820 init_localsbb->real_offset = cfg->real_offset;
6821 start_bblock->next_bb = init_localsbb;
6822 init_localsbb->next_bb = bblock;
6823 link_bblock (cfg, start_bblock, init_localsbb);
6824 link_bblock (cfg, init_localsbb, bblock);
6826 cfg->cbb = init_localsbb;
6828 start_bblock->next_bb = bblock;
6829 link_bblock (cfg, start_bblock, bblock);
6832 if (cfg->gsharedvt && cfg->method == method) {
6833 MonoGSharedVtMethodInfo *info;
6834 MonoInst *var, *locals_var;
6837 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
6838 info->method = cfg->method;
6840 info->entries = g_ptr_array_new ();
6841 cfg->gsharedvt_info = info;
6843 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6844 /* prevent it from being register allocated */
6845 //var->flags |= MONO_INST_INDIRECT;
6846 cfg->gsharedvt_info_var = var;
6848 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
6849 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
6851 /* Allocate locals */
6852 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6853 /* prevent it from being register allocated */
6854 //locals_var->flags |= MONO_INST_INDIRECT;
6855 cfg->gsharedvt_locals_var = locals_var;
6857 dreg = alloc_ireg (cfg);
6858 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
6860 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
6861 ins->dreg = locals_var->dreg;
6863 MONO_ADD_INS (cfg->cbb, ins);
6864 cfg->gsharedvt_locals_var_ins = ins;
6866 cfg->flags |= MONO_CFG_HAS_ALLOCA;
6869 ins->flags |= MONO_INST_INIT;
6873 /* at this point we know, if security is TRUE, that some code needs to be generated */
6874 if (security && (cfg->method == method)) {
6877 cfg->stat_cas_demand_generation++;
6879 if (actions.demand.blob) {
6880 /* Add code for SecurityAction.Demand */
6881 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
6882 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
6883 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
6884 mono_emit_method_call (cfg, secman->demand, args, NULL);
6886 if (actions.noncasdemand.blob) {
6887 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
6888 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
6889 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
6890 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
6891 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
6892 mono_emit_method_call (cfg, secman->demand, args, NULL);
6894 if (actions.demandchoice.blob) {
6895 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
6896 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
6897 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
6898 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
6899 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
6903 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
6905 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
6908 if (mono_security_core_clr_enabled ()) {
6909 /* check if this is native code, e.g. an icall or a p/invoke */
6910 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
6911 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
6913 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
6914 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
6916 /* if this ia a native call then it can only be JITted from platform code */
6917 if ((icall || pinvk) && method->klass && method->klass->image) {
6918 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
6919 MonoException *ex = icall ? mono_get_exception_security () :
6920 mono_get_exception_method_access ();
6921 emit_throw_exception (cfg, ex);
6928 CHECK_CFG_EXCEPTION;
6930 if (header->code_size == 0)
6933 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
6938 if (cfg->method == method)
6939 mono_debug_init_method (cfg, bblock, breakpoint_id);
6941 for (n = 0; n < header->num_locals; ++n) {
6942 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
6947 /* We force the vtable variable here for all shared methods
6948 for the possibility that they might show up in a stack
6949 trace where their exact instantiation is needed. */
6950 if (cfg->generic_sharing_context && method == cfg->method) {
6951 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
6952 mini_method_get_context (method)->method_inst ||
6953 method->klass->valuetype) {
6954 mono_get_vtable_var (cfg);
6956 /* FIXME: Is there a better way to do this?
6957 We need the variable live for the duration
6958 of the whole method. */
6959 cfg->args [0]->flags |= MONO_INST_INDIRECT;
6963 /* add a check for this != NULL to inlined methods */
6964 if (is_virtual_call) {
6967 NEW_ARGLOAD (cfg, arg_ins, 0);
6968 MONO_ADD_INS (cfg->cbb, arg_ins);
6969 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
6972 skip_dead_blocks = !dont_verify;
6973 if (skip_dead_blocks) {
6974 original_bb = bb = mono_basic_block_split (method, &error);
6975 if (!mono_error_ok (&error)) {
6976 mono_error_cleanup (&error);
6982 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
6983 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
6986 start_new_bblock = 0;
6989 if (cfg->method == method)
6990 cfg->real_offset = ip - header->code;
6992 cfg->real_offset = inline_offset;
6997 if (start_new_bblock) {
6998 bblock->cil_length = ip - bblock->cil_code;
6999 if (start_new_bblock == 2) {
7000 g_assert (ip == tblock->cil_code);
7002 GET_BBLOCK (cfg, tblock, ip);
7004 bblock->next_bb = tblock;
7007 start_new_bblock = 0;
7008 for (i = 0; i < bblock->in_scount; ++i) {
7009 if (cfg->verbose_level > 3)
7010 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
7011 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
7015 g_slist_free (class_inits);
7018 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
7019 link_bblock (cfg, bblock, tblock);
7020 if (sp != stack_start) {
7021 handle_stack_args (cfg, stack_start, sp - stack_start);
7023 CHECK_UNVERIFIABLE (cfg);
7025 bblock->next_bb = tblock;
7028 for (i = 0; i < bblock->in_scount; ++i) {
7029 if (cfg->verbose_level > 3)
7030 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
7031 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
7034 g_slist_free (class_inits);
7039 if (skip_dead_blocks) {
7040 int ip_offset = ip - header->code;
7042 if (ip_offset == bb->end)
7046 int op_size = mono_opcode_size (ip, end);
7047 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
7049 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
7051 if (ip_offset + op_size == bb->end) {
7052 MONO_INST_NEW (cfg, ins, OP_NOP);
7053 MONO_ADD_INS (bblock, ins);
7054 start_new_bblock = 1;
7062 * Sequence points are points where the debugger can place a breakpoint.
7063 * Currently, we generate these automatically at points where the IL
7066 if (seq_points && ((sp == stack_start) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
7068 * Make methods interruptable at the beginning, and at the targets of
7069 * backward branches.
7070 * Also, do this at the start of every bblock in methods with clauses too,
7071 * to be able to handle instructions with inprecise control flow like
7073 * Backward branches are handled at the end of method-to-ir ().
7075 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
7077 /* Avoid sequence points on empty IL like .volatile */
7078 // FIXME: Enable this
7079 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
7080 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
7081 if (sp != stack_start)
7082 ins->flags |= MONO_INST_NONEMPTY_STACK;
7083 MONO_ADD_INS (cfg->cbb, ins);
7086 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
7089 bblock->real_offset = cfg->real_offset;
7091 if ((cfg->method == method) && cfg->coverage_info) {
7092 guint32 cil_offset = ip - header->code;
7093 cfg->coverage_info->data [cil_offset].cil_code = ip;
7095 /* TODO: Use an increment here */
7096 #if defined(TARGET_X86)
7097 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
7098 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
7100 MONO_ADD_INS (cfg->cbb, ins);
7102 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
7103 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
7107 if (cfg->verbose_level > 3)
7108 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
7112 if (seq_points && !sym_seq_points && sp != stack_start) {
7114 * The C# compiler uses these nops to notify the JIT that it should
7115 * insert seq points.
7117 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
7118 MONO_ADD_INS (cfg->cbb, ins);
7120 if (cfg->keep_cil_nops)
7121 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
7123 MONO_INST_NEW (cfg, ins, OP_NOP);
7125 MONO_ADD_INS (bblock, ins);
7128 if (should_insert_brekpoint (cfg->method)) {
7129 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
7131 MONO_INST_NEW (cfg, ins, OP_NOP);
7134 MONO_ADD_INS (bblock, ins);
7140 CHECK_STACK_OVF (1);
7141 n = (*ip)-CEE_LDARG_0;
7143 EMIT_NEW_ARGLOAD (cfg, ins, n);
7151 CHECK_STACK_OVF (1);
7152 n = (*ip)-CEE_LDLOC_0;
7154 EMIT_NEW_LOCLOAD (cfg, ins, n);
7163 n = (*ip)-CEE_STLOC_0;
7166 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
7168 emit_stloc_ir (cfg, sp, header, n);
7175 CHECK_STACK_OVF (1);
7178 EMIT_NEW_ARGLOAD (cfg, ins, n);
7184 CHECK_STACK_OVF (1);
7187 NEW_ARGLOADA (cfg, ins, n);
7188 MONO_ADD_INS (cfg->cbb, ins);
7198 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
7200 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
7205 CHECK_STACK_OVF (1);
7208 EMIT_NEW_LOCLOAD (cfg, ins, n);
7212 case CEE_LDLOCA_S: {
7213 unsigned char *tmp_ip;
7215 CHECK_STACK_OVF (1);
7216 CHECK_LOCAL (ip [1]);
7218 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
7224 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
7233 CHECK_LOCAL (ip [1]);
7234 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
7236 emit_stloc_ir (cfg, sp, header, ip [1]);
7241 CHECK_STACK_OVF (1);
7242 EMIT_NEW_PCONST (cfg, ins, NULL);
7243 ins->type = STACK_OBJ;
7248 CHECK_STACK_OVF (1);
7249 EMIT_NEW_ICONST (cfg, ins, -1);
7262 CHECK_STACK_OVF (1);
7263 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
7269 CHECK_STACK_OVF (1);
7271 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
7277 CHECK_STACK_OVF (1);
7278 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
7284 CHECK_STACK_OVF (1);
7285 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7286 ins->type = STACK_I8;
7287 ins->dreg = alloc_dreg (cfg, STACK_I8);
7289 ins->inst_l = (gint64)read64 (ip);
7290 MONO_ADD_INS (bblock, ins);
7296 gboolean use_aotconst = FALSE;
7298 #ifdef TARGET_POWERPC
7299 /* FIXME: Clean this up */
7300 if (cfg->compile_aot)
7301 use_aotconst = TRUE;
7304 /* FIXME: we should really allocate this only late in the compilation process */
7305 f = mono_domain_alloc (cfg->domain, sizeof (float));
7307 CHECK_STACK_OVF (1);
7313 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
7315 dreg = alloc_freg (cfg);
7316 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
7317 ins->type = STACK_R8;
7319 MONO_INST_NEW (cfg, ins, OP_R4CONST);
7320 ins->type = STACK_R8;
7321 ins->dreg = alloc_dreg (cfg, STACK_R8);
7323 MONO_ADD_INS (bblock, ins);
7333 gboolean use_aotconst = FALSE;
7335 #ifdef TARGET_POWERPC
7336 /* FIXME: Clean this up */
7337 if (cfg->compile_aot)
7338 use_aotconst = TRUE;
7341 /* FIXME: we should really allocate this only late in the compilation process */
7342 d = mono_domain_alloc (cfg->domain, sizeof (double));
7344 CHECK_STACK_OVF (1);
7350 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
7352 dreg = alloc_freg (cfg);
7353 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
7354 ins->type = STACK_R8;
7356 MONO_INST_NEW (cfg, ins, OP_R8CONST);
7357 ins->type = STACK_R8;
7358 ins->dreg = alloc_dreg (cfg, STACK_R8);
7360 MONO_ADD_INS (bblock, ins);
7369 MonoInst *temp, *store;
7371 CHECK_STACK_OVF (1);
7375 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
7376 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
7378 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
7381 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
7394 if (sp [0]->type == STACK_R8)
7395 /* we need to pop the value from the x86 FP stack */
7396 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
7402 INLINE_FAILURE ("jmp");
7403 GSHAREDVT_FAILURE (*ip);
7406 if (stack_start != sp)
7408 token = read32 (ip + 1);
7409 /* FIXME: check the signature matches */
7410 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7412 if (!cmethod || mono_loader_get_last_error ())
7415 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
7416 GENERIC_SHARING_FAILURE (CEE_JMP);
7418 if (mono_security_cas_enabled ())
7419 CHECK_CFG_EXCEPTION;
7421 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
7423 MonoMethodSignature *fsig = mono_method_signature (cmethod);
7426 /* Handle tail calls similarly to calls */
7427 n = fsig->param_count + fsig->hasthis;
7429 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
7430 call->method = cmethod;
7431 call->tail_call = TRUE;
7432 call->signature = mono_method_signature (cmethod);
7433 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
7434 call->inst.inst_p0 = cmethod;
7435 for (i = 0; i < n; ++i)
7436 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
7438 mono_arch_emit_call (cfg, call);
7439 MONO_ADD_INS (bblock, (MonoInst*)call);
7442 for (i = 0; i < num_args; ++i)
7443 /* Prevent arguments from being optimized away */
7444 arg_array [i]->flags |= MONO_INST_VOLATILE;
7446 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
7447 ins = (MonoInst*)call;
7448 ins->inst_p0 = cmethod;
7449 MONO_ADD_INS (bblock, ins);
7453 start_new_bblock = 1;
7458 case CEE_CALLVIRT: {
7459 MonoInst *addr = NULL;
7460 MonoMethodSignature *fsig = NULL;
7462 int virtual = *ip == CEE_CALLVIRT;
7463 int calli = *ip == CEE_CALLI;
7464 gboolean pass_imt_from_rgctx = FALSE;
7465 MonoInst *imt_arg = NULL;
7466 MonoInst *keep_this_alive = NULL;
7467 gboolean pass_vtable = FALSE;
7468 gboolean pass_mrgctx = FALSE;
7469 MonoInst *vtable_arg = NULL;
7470 gboolean check_this = FALSE;
7471 gboolean supported_tail_call = FALSE;
7472 gboolean tail_call = FALSE;
7473 gboolean need_seq_point = FALSE;
7474 guint32 call_opcode = *ip;
7475 gboolean emit_widen = TRUE;
7476 gboolean push_res = TRUE;
7477 gboolean skip_ret = FALSE;
7478 gboolean delegate_invoke = FALSE;
7481 token = read32 (ip + 1);
7486 //GSHAREDVT_FAILURE (*ip);
7491 fsig = mini_get_signature (method, token, generic_context);
7492 n = fsig->param_count + fsig->hasthis;
7494 if (method->dynamic && fsig->pinvoke) {
7498 * This is a call through a function pointer using a pinvoke
7499 * signature. Have to create a wrapper and call that instead.
7500 * FIXME: This is very slow, need to create a wrapper at JIT time
7501 * instead based on the signature.
7503 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
7504 EMIT_NEW_PCONST (cfg, args [1], fsig);
7506 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
7509 MonoMethod *cil_method;
7511 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7512 cil_method = cmethod;
7514 if (constrained_call) {
7515 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7516 if (cfg->verbose_level > 2)
7517 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_call));
7518 if (!((constrained_call->byval_arg.type == MONO_TYPE_VAR ||
7519 constrained_call->byval_arg.type == MONO_TYPE_MVAR) &&
7520 cfg->generic_sharing_context)) {
7521 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_call, generic_context);
7524 if (cfg->verbose_level > 2)
7525 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_call));
7527 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
7529 * This is needed since get_method_constrained can't find
7530 * the method in klass representing a type var.
7531 * The type var is guaranteed to be a reference type in this
7534 if (!mini_is_gsharedvt_klass (cfg, constrained_call))
7535 g_assert (!cmethod->klass->valuetype);
7537 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
7542 if (!cmethod || mono_loader_get_last_error ())
7544 if (!dont_verify && !cfg->skip_visibility) {
7545 MonoMethod *target_method = cil_method;
7546 if (method->is_inflated) {
7547 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
7549 if (!mono_method_can_access_method (method_definition, target_method) &&
7550 !mono_method_can_access_method (method, cil_method))
7551 METHOD_ACCESS_FAILURE;
7554 if (mono_security_core_clr_enabled ())
7555 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
7557 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
7558 /* MS.NET seems to silently convert this to a callvirt */
7563 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
7564 * converts to a callvirt.
7566 * tests/bug-515884.il is an example of this behavior
7568 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
7569 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
7570 if (!virtual && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
7574 if (!cmethod->klass->inited)
7575 if (!mono_class_init (cmethod->klass))
7576 TYPE_LOAD_ERROR (cmethod->klass);
7578 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
7579 mini_class_is_system_array (cmethod->klass)) {
7580 array_rank = cmethod->klass->rank;
7581 fsig = mono_method_signature (cmethod);
7583 fsig = mono_method_signature (cmethod);
7588 if (fsig->pinvoke) {
7589 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
7590 check_for_pending_exc, cfg->compile_aot);
7591 fsig = mono_method_signature (wrapper);
7592 } else if (constrained_call) {
7593 fsig = mono_method_signature (cmethod);
7595 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
7599 mono_save_token_info (cfg, image, token, cil_method);
7601 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7603 * Need to emit an implicit seq point after every non-void call so single stepping through nested calls like
7604 * foo (bar (), baz ())
7605 * works correctly. MS does this also:
7606 * http://stackoverflow.com/questions/6937198/making-your-net-language-step-correctly-in-the-debugger
7607 * The problem with this approach is that the debugger will stop after all calls returning a value,
7608 * even for simple cases, like:
7611 /* Special case a few common successor opcodes */
7612 if (!(ip + 5 < end && (ip [5] == CEE_POP || ip [5] == CEE_NOP)) && !(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
7613 need_seq_point = TRUE;
7616 n = fsig->param_count + fsig->hasthis;
7618 /* Don't support calls made using type arguments for now */
7620 if (cfg->gsharedvt) {
7621 if (mini_is_gsharedvt_signature (cfg, fsig))
7622 GSHAREDVT_FAILURE (*ip);
7626 if (mono_security_cas_enabled ()) {
7627 if (check_linkdemand (cfg, method, cmethod))
7628 INLINE_FAILURE ("linkdemand");
7629 CHECK_CFG_EXCEPTION;
7632 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
7633 g_assert_not_reached ();
7636 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
7639 if (!cfg->generic_sharing_context && cmethod)
7640 g_assert (!mono_method_check_context_used (cmethod));
7644 //g_assert (!virtual || fsig->hasthis);
7648 if (constrained_call) {
7649 if (mini_is_gsharedvt_klass (cfg, constrained_call)) {
7651 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
7653 if ((cmethod->klass != mono_defaults.object_class) && constrained_call->valuetype && cmethod->klass->valuetype) {
7654 /* The 'Own method' case below */
7655 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
7656 /* 'The type parameter is instantiated as a reference type' case below. */
7657 } else if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
7658 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || mini_is_gsharedvt_type (cfg, fsig->ret)) &&
7659 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || mini_is_gsharedvt_type (cfg, fsig->params [0]))))) {
7660 MonoInst *args [16];
7663 * This case handles calls to
7664 * - object:ToString()/Equals()/GetHashCode(),
7665 * - System.IComparable<T>:CompareTo()
7666 * - System.IEquatable<T>:Equals ()
7667 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
7671 if (mono_method_check_context_used (cmethod))
7672 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
7674 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
7675 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_call), constrained_call, MONO_RGCTX_INFO_KLASS);
7677 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
7678 if (fsig->hasthis && fsig->param_count) {
7679 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
7680 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
7681 ins->dreg = alloc_preg (cfg);
7682 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
7683 MONO_ADD_INS (cfg->cbb, ins);
7686 if (mini_is_gsharedvt_type (cfg, fsig->params [0])) {
7689 args [3] = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
7691 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
7692 addr_reg = ins->dreg;
7693 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
7695 EMIT_NEW_ICONST (cfg, args [3], 0);
7696 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
7699 EMIT_NEW_ICONST (cfg, args [3], 0);
7700 EMIT_NEW_ICONST (cfg, args [4], 0);
7702 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
7705 if (mini_is_gsharedvt_type (cfg, fsig->ret)) {
7706 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins, &bblock);
7707 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret)) {
7711 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
7712 MONO_ADD_INS (cfg->cbb, add);
7714 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
7715 MONO_ADD_INS (cfg->cbb, ins);
7716 /* ins represents the call result */
7721 GSHAREDVT_FAILURE (*ip);
7725 * We have the `constrained.' prefix opcode.
7727 if (constrained_call->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
7729 * The type parameter is instantiated as a valuetype,
7730 * but that type doesn't override the method we're
7731 * calling, so we need to box `this'.
7733 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
7734 ins->klass = constrained_call;
7735 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
7736 CHECK_CFG_EXCEPTION;
7737 } else if (!constrained_call->valuetype) {
7738 int dreg = alloc_ireg_ref (cfg);
7741 * The type parameter is instantiated as a reference
7742 * type. We have a managed pointer on the stack, so
7743 * we need to dereference it here.
7745 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
7746 ins->type = STACK_OBJ;
7749 if (cmethod->klass->valuetype) {
7752 /* Interface method */
7755 mono_class_setup_vtable (constrained_call);
7756 CHECK_TYPELOAD (constrained_call);
7757 ioffset = mono_class_interface_offset (constrained_call, cmethod->klass);
7759 TYPE_LOAD_ERROR (constrained_call);
7760 slot = mono_method_get_vtable_slot (cmethod);
7762 TYPE_LOAD_ERROR (cmethod->klass);
7763 cmethod = constrained_call->vtable [ioffset + slot];
7765 if (cmethod->klass == mono_defaults.enum_class) {
7766 /* Enum implements some interfaces, so treat this as the first case */
7767 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
7768 ins->klass = constrained_call;
7769 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
7770 CHECK_CFG_EXCEPTION;
7775 constrained_call = NULL;
7778 if (!calli && check_call_signature (cfg, fsig, sp))
7781 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
7782 if (cmethod && (cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
7783 delegate_invoke = TRUE;
7786 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
7788 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7789 type_to_eval_stack_type ((cfg), fsig->ret, ins);
7797 * If the callee is a shared method, then its static cctor
7798 * might not get called after the call was patched.
7800 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
7801 emit_generic_class_init (cfg, cmethod->klass);
7802 CHECK_TYPELOAD (cmethod->klass);
7806 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
7808 if (cfg->generic_sharing_context && cmethod) {
7809 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
7811 context_used = mini_method_check_context_used (cfg, cmethod);
7813 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7814 /* Generic method interface
7815 calls are resolved via a
7816 helper function and don't
7818 if (!cmethod_context || !cmethod_context->method_inst)
7819 pass_imt_from_rgctx = TRUE;
7823 * If a shared method calls another
7824 * shared method then the caller must
7825 * have a generic sharing context
7826 * because the magic trampoline
7827 * requires it. FIXME: We shouldn't
7828 * have to force the vtable/mrgctx
7829 * variable here. Instead there
7830 * should be a flag in the cfg to
7831 * request a generic sharing context.
7834 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
7835 mono_get_vtable_var (cfg);
7840 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7842 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7844 CHECK_TYPELOAD (cmethod->klass);
7845 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7850 g_assert (!vtable_arg);
7852 if (!cfg->compile_aot) {
7854 * emit_get_rgctx_method () calls mono_class_vtable () so check
7855 * for type load errors before.
7857 mono_class_setup_vtable (cmethod->klass);
7858 CHECK_TYPELOAD (cmethod->klass);
7861 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7863 /* !marshalbyref is needed to properly handle generic methods + remoting */
7864 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
7865 MONO_METHOD_IS_FINAL (cmethod)) &&
7866 !mono_class_is_marshalbyref (cmethod->klass)) {
7873 if (pass_imt_from_rgctx) {
7874 g_assert (!pass_vtable);
7877 imt_arg = emit_get_rgctx_method (cfg, context_used,
7878 cmethod, MONO_RGCTX_INFO_METHOD);
7882 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
7884 /* Calling virtual generic methods */
7885 if (cmethod && virtual &&
7886 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
7887 !(MONO_METHOD_IS_FINAL (cmethod) &&
7888 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
7889 fsig->generic_param_count &&
7890 !(cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))) {
7891 MonoInst *this_temp, *this_arg_temp, *store;
7892 MonoInst *iargs [4];
7893 gboolean use_imt = FALSE;
7895 g_assert (fsig->is_inflated);
7897 /* Prevent inlining of methods that contain indirect calls */
7898 INLINE_FAILURE ("virtual generic call");
7900 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
7901 GSHAREDVT_FAILURE (*ip);
7903 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
7904 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt)
7909 g_assert (!imt_arg);
7911 g_assert (cmethod->is_inflated);
7912 imt_arg = emit_get_rgctx_method (cfg, context_used,
7913 cmethod, MONO_RGCTX_INFO_METHOD);
7914 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
7916 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
7917 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
7918 MONO_ADD_INS (bblock, store);
7920 /* FIXME: This should be a managed pointer */
7921 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7923 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
7924 iargs [1] = emit_get_rgctx_method (cfg, context_used,
7925 cmethod, MONO_RGCTX_INFO_METHOD);
7926 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
7927 addr = mono_emit_jit_icall (cfg,
7928 mono_helper_compile_generic_method, iargs);
7930 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
7932 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
7939 * Implement a workaround for the inherent races involved in locking:
7945 * If a thread abort happens between the call to Monitor.Enter () and the start of the
7946 * try block, the Exit () won't be executed, see:
7947 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
7948 * To work around this, we extend such try blocks to include the last x bytes
7949 * of the Monitor.Enter () call.
7951 if (cmethod && cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
7952 MonoBasicBlock *tbb;
7954 GET_BBLOCK (cfg, tbb, ip + 5);
7956 * Only extend try blocks with a finally, to avoid catching exceptions thrown
7957 * from Monitor.Enter like ArgumentNullException.
7959 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
7960 /* Mark this bblock as needing to be extended */
7961 tbb->extend_try_block = TRUE;
7965 /* Conversion to a JIT intrinsic */
7966 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
7968 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7969 type_to_eval_stack_type ((cfg), fsig->ret, ins);
7976 if (cmethod && (cfg->opt & MONO_OPT_INLINE) &&
7977 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
7978 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
7979 !g_list_find (dont_inline, cmethod)) {
7981 gboolean always = FALSE;
7983 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
7984 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
7985 /* Prevent inlining of methods that call wrappers */
7986 INLINE_FAILURE ("wrapper call");
7987 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
7991 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, always);
7993 cfg->real_offset += 5;
7996 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7997 /* *sp is already set by inline_method */
8002 inline_costs += costs;
8008 /* Tail recursion elimination */
8009 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
8010 gboolean has_vtargs = FALSE;
8013 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8014 INLINE_FAILURE ("tail call");
8016 /* keep it simple */
8017 for (i = fsig->param_count - 1; i >= 0; i--) {
8018 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
8023 for (i = 0; i < n; ++i)
8024 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8025 MONO_INST_NEW (cfg, ins, OP_BR);
8026 MONO_ADD_INS (bblock, ins);
8027 tblock = start_bblock->out_bb [0];
8028 link_bblock (cfg, bblock, tblock);
8029 ins->inst_target_bb = tblock;
8030 start_new_bblock = 1;
8032 /* skip the CEE_RET, too */
8033 if (ip_in_bb (cfg, bblock, ip + 5))
8040 inline_costs += 10 * num_calls++;
8043 * Making generic calls out of gsharedvt methods.
8045 if (cmethod && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
8046 MonoRgctxInfoType info_type;
8049 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
8050 //GSHAREDVT_FAILURE (*ip);
8051 // disable for possible remoting calls
8052 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
8053 GSHAREDVT_FAILURE (*ip);
8054 if (fsig->generic_param_count) {
8055 /* virtual generic call */
8056 g_assert (mono_use_imt);
8057 g_assert (!imt_arg);
8058 /* Same as the virtual generic case above */
8059 imt_arg = emit_get_rgctx_method (cfg, context_used,
8060 cmethod, MONO_RGCTX_INFO_METHOD);
8061 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
8066 if (cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)
8067 /* test_0_multi_dim_arrays () in gshared.cs */
8068 GSHAREDVT_FAILURE (*ip);
8070 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
8071 keep_this_alive = sp [0];
8073 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
8074 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
8076 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
8077 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
8079 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8081 } else if (calli && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
8083 * We pass the address to the gsharedvt trampoline in the rgctx reg
8085 MonoInst *callee = addr;
8087 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8089 GSHAREDVT_FAILURE (*ip);
8091 addr = emit_get_rgctx_sig (cfg, context_used,
8092 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8093 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8097 /* Generic sharing */
8098 /* FIXME: only do this for generic methods if
8099 they are not shared! */
8100 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
8101 (!mono_method_is_generic_sharable (cmethod, TRUE) ||
8102 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
8103 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
8104 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
8105 INLINE_FAILURE ("gshared");
8107 g_assert (cfg->generic_sharing_context && cmethod);
8111 * We are compiling a call to a
8112 * generic method from shared code,
8113 * which means that we have to look up
8114 * the method in the rgctx and do an
8118 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8120 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8121 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8125 /* Indirect calls */
8127 if (call_opcode == CEE_CALL)
8128 g_assert (context_used);
8129 else if (call_opcode == CEE_CALLI)
8130 g_assert (!vtable_arg);
8132 /* FIXME: what the hell is this??? */
8133 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
8134 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
8136 /* Prevent inlining of methods with indirect calls */
8137 INLINE_FAILURE ("indirect call");
8139 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8144 * Instead of emitting an indirect call, emit a direct call
8145 * with the contents of the aotconst as the patch info.
8147 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8148 info_type = addr->inst_c1;
8149 info_data = addr->inst_p0;
8151 info_type = addr->inst_right->inst_c1;
8152 info_data = addr->inst_right->inst_left;
8155 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8156 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8161 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8169 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
8170 MonoInst *val = sp [fsig->param_count];
8172 if (val->type == STACK_OBJ) {
8173 MonoInst *iargs [2];
8178 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
8181 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
8182 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
8183 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
8184 emit_write_barrier (cfg, addr, val);
8185 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
8186 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
8188 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
8189 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
8190 if (!cmethod->klass->element_class->valuetype && !readonly)
8191 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
8192 CHECK_TYPELOAD (cmethod->klass);
8195 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
8198 g_assert_not_reached ();
8205 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
8209 /* Tail prefix / tail call optimization */
8211 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
8212 /* FIXME: runtime generic context pointer for jumps? */
8213 /* FIXME: handle this for generic sharing eventually */
8214 if (cmethod && (ins_flag & MONO_INST_TAILCALL) &&
8215 !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig))
8216 supported_tail_call = TRUE;
8217 if (supported_tail_call) {
8218 if (call_opcode != CEE_CALL)
8219 supported_tail_call = FALSE;
8222 if (supported_tail_call) {
8225 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8226 INLINE_FAILURE ("tail call");
8228 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
8230 if (ARCH_USE_OP_TAIL_CALL) {
8231 /* Handle tail calls similarly to normal calls */
8234 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8235 call->tail_call = TRUE;
8236 call->method = cmethod;
8237 call->signature = mono_method_signature (cmethod);
8240 * We implement tail calls by storing the actual arguments into the
8241 * argument variables, then emitting a CEE_JMP.
8243 for (i = 0; i < n; ++i) {
8244 /* Prevent argument from being register allocated */
8245 arg_array [i]->flags |= MONO_INST_VOLATILE;
8246 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8248 ins = (MonoInst*)call;
8249 ins->inst_p0 = cmethod;
8250 ins->inst_p1 = arg_array [0];
8251 MONO_ADD_INS (bblock, ins);
8252 link_bblock (cfg, bblock, end_bblock);
8253 start_new_bblock = 1;
8255 // FIXME: Eliminate unreachable epilogs
8258 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
8259 * only reachable from this call.
8261 GET_BBLOCK (cfg, tblock, ip + 5);
8262 if (tblock == bblock || tblock->in_count == 0)
8271 * Synchronized wrappers.
8272 * Its hard to determine where to replace a method with its synchronized
8273 * wrapper without causing an infinite recursion. The current solution is
8274 * to add the synchronized wrapper in the trampolines, and to
8275 * change the called method to a dummy wrapper, and resolve that wrapper
8276 * to the real method in mono_jit_compile_method ().
8278 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8279 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
8280 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
8281 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
8285 INLINE_FAILURE ("call");
8286 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual ? sp [0] : NULL,
8287 imt_arg, vtable_arg);
8290 link_bblock (cfg, bblock, end_bblock);
8291 start_new_bblock = 1;
8293 // FIXME: Eliminate unreachable epilogs
8296 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
8297 * only reachable from this call.
8299 GET_BBLOCK (cfg, tblock, ip + 5);
8300 if (tblock == bblock || tblock->in_count == 0)
8307 /* End of call, INS should contain the result of the call, if any */
8309 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
8312 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8317 if (keep_this_alive) {
8318 MonoInst *dummy_use;
8320 /* See mono_emit_method_call_full () */
8321 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
8324 CHECK_CFG_EXCEPTION;
8328 g_assert (*ip == CEE_RET);
8332 constrained_call = NULL;
8334 emit_seq_point (cfg, method, ip, FALSE, TRUE);
8338 if (cfg->method != method) {
8339 /* return from inlined method */
8341 * If in_count == 0, that means the ret is unreachable due to
8342 * being preceeded by a throw. In that case, inline_method () will
8343 * handle setting the return value
8344 * (test case: test_0_inline_throw ()).
8346 if (return_var && cfg->cbb->in_count) {
8347 MonoType *ret_type = mono_method_signature (method)->ret;
8353 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
8356 //g_assert (returnvar != -1);
8357 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
8358 cfg->ret_var_set = TRUE;
8362 MonoType *ret_type = mono_method_signature (method)->ret;
8364 if (seq_points && !sym_seq_points) {
8366 * Place a seq point here too even through the IL stack is not
8367 * empty, so a step over on
8370 * will work correctly.
8372 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
8373 MONO_ADD_INS (cfg->cbb, ins);
8376 g_assert (!return_var);
8380 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
8383 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
8386 if (!cfg->vret_addr) {
8389 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
8391 EMIT_NEW_RETLOADA (cfg, ret_addr);
8393 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
8394 ins->klass = mono_class_from_mono_type (ret_type);
8397 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
8398 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
8399 MonoInst *iargs [1];
8403 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
8404 mono_arch_emit_setret (cfg, method, conv);
8406 mono_arch_emit_setret (cfg, method, *sp);
8409 mono_arch_emit_setret (cfg, method, *sp);
8414 if (sp != stack_start)
8416 MONO_INST_NEW (cfg, ins, OP_BR);
8418 ins->inst_target_bb = end_bblock;
8419 MONO_ADD_INS (bblock, ins);
8420 link_bblock (cfg, bblock, end_bblock);
8421 start_new_bblock = 1;
8425 MONO_INST_NEW (cfg, ins, OP_BR);
8427 target = ip + 1 + (signed char)(*ip);
8429 GET_BBLOCK (cfg, tblock, target);
8430 link_bblock (cfg, bblock, tblock);
8431 ins->inst_target_bb = tblock;
8432 if (sp != stack_start) {
8433 handle_stack_args (cfg, stack_start, sp - stack_start);
8435 CHECK_UNVERIFIABLE (cfg);
8437 MONO_ADD_INS (bblock, ins);
8438 start_new_bblock = 1;
8439 inline_costs += BRANCH_COST;
8453 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
8455 target = ip + 1 + *(signed char*)ip;
8461 inline_costs += BRANCH_COST;
8465 MONO_INST_NEW (cfg, ins, OP_BR);
8468 target = ip + 4 + (gint32)read32(ip);
8470 GET_BBLOCK (cfg, tblock, target);
8471 link_bblock (cfg, bblock, tblock);
8472 ins->inst_target_bb = tblock;
8473 if (sp != stack_start) {
8474 handle_stack_args (cfg, stack_start, sp - stack_start);
8476 CHECK_UNVERIFIABLE (cfg);
8479 MONO_ADD_INS (bblock, ins);
8481 start_new_bblock = 1;
8482 inline_costs += BRANCH_COST;
8489 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
8490 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
8491 guint32 opsize = is_short ? 1 : 4;
8493 CHECK_OPSIZE (opsize);
8495 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
8498 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
8503 GET_BBLOCK (cfg, tblock, target);
8504 link_bblock (cfg, bblock, tblock);
8505 GET_BBLOCK (cfg, tblock, ip);
8506 link_bblock (cfg, bblock, tblock);
8508 if (sp != stack_start) {
8509 handle_stack_args (cfg, stack_start, sp - stack_start);
8510 CHECK_UNVERIFIABLE (cfg);
8513 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
8514 cmp->sreg1 = sp [0]->dreg;
8515 type_from_op (cmp, sp [0], NULL);
8518 #if SIZEOF_REGISTER == 4
8519 if (cmp->opcode == OP_LCOMPARE_IMM) {
8520 /* Convert it to OP_LCOMPARE */
8521 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8522 ins->type = STACK_I8;
8523 ins->dreg = alloc_dreg (cfg, STACK_I8);
8525 MONO_ADD_INS (bblock, ins);
8526 cmp->opcode = OP_LCOMPARE;
8527 cmp->sreg2 = ins->dreg;
8530 MONO_ADD_INS (bblock, cmp);
8532 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
8533 type_from_op (ins, sp [0], NULL);
8534 MONO_ADD_INS (bblock, ins);
8535 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
8536 GET_BBLOCK (cfg, tblock, target);
8537 ins->inst_true_bb = tblock;
8538 GET_BBLOCK (cfg, tblock, ip);
8539 ins->inst_false_bb = tblock;
8540 start_new_bblock = 2;
8543 inline_costs += BRANCH_COST;
8558 MONO_INST_NEW (cfg, ins, *ip);
8560 target = ip + 4 + (gint32)read32(ip);
8566 inline_costs += BRANCH_COST;
8570 MonoBasicBlock **targets;
8571 MonoBasicBlock *default_bblock;
8572 MonoJumpInfoBBTable *table;
8573 int offset_reg = alloc_preg (cfg);
8574 int target_reg = alloc_preg (cfg);
8575 int table_reg = alloc_preg (cfg);
8576 int sum_reg = alloc_preg (cfg);
8577 gboolean use_op_switch;
8581 n = read32 (ip + 1);
8584 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
8588 CHECK_OPSIZE (n * sizeof (guint32));
8589 target = ip + n * sizeof (guint32);
8591 GET_BBLOCK (cfg, default_bblock, target);
8592 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
8594 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
8595 for (i = 0; i < n; ++i) {
8596 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
8597 targets [i] = tblock;
8598 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
8602 if (sp != stack_start) {
8604 * Link the current bb with the targets as well, so handle_stack_args
8605 * will set their in_stack correctly.
8607 link_bblock (cfg, bblock, default_bblock);
8608 for (i = 0; i < n; ++i)
8609 link_bblock (cfg, bblock, targets [i]);
8611 handle_stack_args (cfg, stack_start, sp - stack_start);
8613 CHECK_UNVERIFIABLE (cfg);
8616 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
8617 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
8620 for (i = 0; i < n; ++i)
8621 link_bblock (cfg, bblock, targets [i]);
8623 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
8624 table->table = targets;
8625 table->table_size = n;
8627 use_op_switch = FALSE;
8629 /* ARM implements SWITCH statements differently */
8630 /* FIXME: Make it use the generic implementation */
8631 if (!cfg->compile_aot)
8632 use_op_switch = TRUE;
8635 if (COMPILE_LLVM (cfg))
8636 use_op_switch = TRUE;
8638 cfg->cbb->has_jump_table = 1;
8640 if (use_op_switch) {
8641 MONO_INST_NEW (cfg, ins, OP_SWITCH);
8642 ins->sreg1 = src1->dreg;
8643 ins->inst_p0 = table;
8644 ins->inst_many_bb = targets;
8645 ins->klass = GUINT_TO_POINTER (n);
8646 MONO_ADD_INS (cfg->cbb, ins);
8648 if (sizeof (gpointer) == 8)
8649 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
8651 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
8653 #if SIZEOF_REGISTER == 8
8654 /* The upper word might not be zero, and we add it to a 64 bit address later */
8655 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
8658 if (cfg->compile_aot) {
8659 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
8661 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
8662 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
8663 ins->inst_p0 = table;
8664 ins->dreg = table_reg;
8665 MONO_ADD_INS (cfg->cbb, ins);
8668 /* FIXME: Use load_memindex */
8669 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
8670 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
8671 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
8673 start_new_bblock = 1;
8674 inline_costs += (BRANCH_COST * 2);
8694 dreg = alloc_freg (cfg);
8697 dreg = alloc_lreg (cfg);
8700 dreg = alloc_ireg_ref (cfg);
8703 dreg = alloc_preg (cfg);
8706 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
8707 ins->type = ldind_type [*ip - CEE_LDIND_I1];
8708 ins->flags |= ins_flag;
8710 MONO_ADD_INS (bblock, ins);
8712 if (ins->flags & MONO_INST_VOLATILE) {
8713 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
8714 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
8715 emit_memory_barrier (cfg, FullBarrier);
8730 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
8731 ins->flags |= ins_flag;
8734 if (ins->flags & MONO_INST_VOLATILE) {
8735 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
8736 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
8737 emit_memory_barrier (cfg, FullBarrier);
8740 MONO_ADD_INS (bblock, ins);
8742 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
8743 emit_write_barrier (cfg, sp [0], sp [1]);
8752 MONO_INST_NEW (cfg, ins, (*ip));
8754 ins->sreg1 = sp [0]->dreg;
8755 ins->sreg2 = sp [1]->dreg;
8756 type_from_op (ins, sp [0], sp [1]);
8758 ins->dreg = alloc_dreg ((cfg), (ins)->type);
8760 /* Use the immediate opcodes if possible */
8761 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
8762 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
8763 if (imm_opcode != -1) {
8764 ins->opcode = imm_opcode;
8765 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
8768 sp [1]->opcode = OP_NOP;
8772 MONO_ADD_INS ((cfg)->cbb, (ins));
8774 *sp++ = mono_decompose_opcode (cfg, ins);
8791 MONO_INST_NEW (cfg, ins, (*ip));
8793 ins->sreg1 = sp [0]->dreg;
8794 ins->sreg2 = sp [1]->dreg;
8795 type_from_op (ins, sp [0], sp [1]);
8797 ADD_WIDEN_OP (ins, sp [0], sp [1]);
8798 ins->dreg = alloc_dreg ((cfg), (ins)->type);
8800 /* FIXME: Pass opcode to is_inst_imm */
8802 /* Use the immediate opcodes if possible */
8803 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
8806 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
8807 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
8808 /* Keep emulated opcodes which are optimized away later */
8809 if ((ins->opcode == OP_IREM_UN || ins->opcode == OP_IDIV_UN_IMM) && (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) && sp [1]->opcode == OP_ICONST && mono_is_power_of_two (sp [1]->inst_c0) >= 0) {
8810 imm_opcode = mono_op_to_op_imm (ins->opcode);
8813 if (imm_opcode != -1) {
8814 ins->opcode = imm_opcode;
8815 if (sp [1]->opcode == OP_I8CONST) {
8816 #if SIZEOF_REGISTER == 8
8817 ins->inst_imm = sp [1]->inst_l;
8819 ins->inst_ls_word = sp [1]->inst_ls_word;
8820 ins->inst_ms_word = sp [1]->inst_ms_word;
8824 ins->inst_imm = (gssize)(sp [1]->inst_c0);
8827 /* Might be followed by an instruction added by ADD_WIDEN_OP */
8828 if (sp [1]->next == NULL)
8829 sp [1]->opcode = OP_NOP;
8832 MONO_ADD_INS ((cfg)->cbb, (ins));
8834 *sp++ = mono_decompose_opcode (cfg, ins);
8847 case CEE_CONV_OVF_I8:
8848 case CEE_CONV_OVF_U8:
8852 /* Special case this earlier so we have long constants in the IR */
8853 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
8854 int data = sp [-1]->inst_c0;
8855 sp [-1]->opcode = OP_I8CONST;
8856 sp [-1]->type = STACK_I8;
8857 #if SIZEOF_REGISTER == 8
8858 if ((*ip) == CEE_CONV_U8)
8859 sp [-1]->inst_c0 = (guint32)data;
8861 sp [-1]->inst_c0 = data;
8863 sp [-1]->inst_ls_word = data;
8864 if ((*ip) == CEE_CONV_U8)
8865 sp [-1]->inst_ms_word = 0;
8867 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
8869 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
8876 case CEE_CONV_OVF_I4:
8877 case CEE_CONV_OVF_I1:
8878 case CEE_CONV_OVF_I2:
8879 case CEE_CONV_OVF_I:
8880 case CEE_CONV_OVF_U:
8883 if (sp [-1]->type == STACK_R8) {
8884 ADD_UNOP (CEE_CONV_OVF_I8);
8891 case CEE_CONV_OVF_U1:
8892 case CEE_CONV_OVF_U2:
8893 case CEE_CONV_OVF_U4:
8896 if (sp [-1]->type == STACK_R8) {
8897 ADD_UNOP (CEE_CONV_OVF_U8);
8904 case CEE_CONV_OVF_I1_UN:
8905 case CEE_CONV_OVF_I2_UN:
8906 case CEE_CONV_OVF_I4_UN:
8907 case CEE_CONV_OVF_I8_UN:
8908 case CEE_CONV_OVF_U1_UN:
8909 case CEE_CONV_OVF_U2_UN:
8910 case CEE_CONV_OVF_U4_UN:
8911 case CEE_CONV_OVF_U8_UN:
8912 case CEE_CONV_OVF_I_UN:
8913 case CEE_CONV_OVF_U_UN:
8920 CHECK_CFG_EXCEPTION;
8924 case CEE_ADD_OVF_UN:
8926 case CEE_MUL_OVF_UN:
8928 case CEE_SUB_OVF_UN:
8934 GSHAREDVT_FAILURE (*ip);
8937 token = read32 (ip + 1);
8938 klass = mini_get_class (method, token, generic_context);
8939 CHECK_TYPELOAD (klass);
8941 if (generic_class_is_reference_type (cfg, klass)) {
8942 MonoInst *store, *load;
8943 int dreg = alloc_ireg_ref (cfg);
8945 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
8946 load->flags |= ins_flag;
8947 MONO_ADD_INS (cfg->cbb, load);
8949 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
8950 store->flags |= ins_flag;
8951 MONO_ADD_INS (cfg->cbb, store);
8953 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
8954 emit_write_barrier (cfg, sp [0], sp [1]);
8956 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
8968 token = read32 (ip + 1);
8969 klass = mini_get_class (method, token, generic_context);
8970 CHECK_TYPELOAD (klass);
8972 /* Optimize the common ldobj+stloc combination */
8982 loc_index = ip [5] - CEE_STLOC_0;
8989 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
8990 CHECK_LOCAL (loc_index);
8992 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
8993 ins->dreg = cfg->locals [loc_index]->dreg;
8999 /* Optimize the ldobj+stobj combination */
9000 /* The reference case ends up being a load+store anyway */
9001 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
9006 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
9013 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9022 CHECK_STACK_OVF (1);
9024 n = read32 (ip + 1);
9026 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
9027 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
9028 ins->type = STACK_OBJ;
9031 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
9032 MonoInst *iargs [1];
9034 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
9035 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
9037 if (cfg->opt & MONO_OPT_SHARED) {
9038 MonoInst *iargs [3];
9040 if (cfg->compile_aot) {
9041 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
9043 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9044 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
9045 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
9046 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
9047 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
9049 if (bblock->out_of_line) {
9050 MonoInst *iargs [2];
9052 if (image == mono_defaults.corlib) {
9054 * Avoid relocations in AOT and save some space by using a
9055 * version of helper_ldstr specialized to mscorlib.
9057 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
9058 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
9060 /* Avoid creating the string object */
9061 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9062 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
9063 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
9067 if (cfg->compile_aot) {
9068 NEW_LDSTRCONST (cfg, ins, image, n);
9070 MONO_ADD_INS (bblock, ins);
9073 NEW_PCONST (cfg, ins, NULL);
9074 ins->type = STACK_OBJ;
9075 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
9077 OUT_OF_MEMORY_FAILURE;
9080 MONO_ADD_INS (bblock, ins);
9089 MonoInst *iargs [2];
9090 MonoMethodSignature *fsig;
9093 MonoInst *vtable_arg = NULL;
9096 token = read32 (ip + 1);
9097 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
9098 if (!cmethod || mono_loader_get_last_error ())
9100 fsig = mono_method_get_signature (cmethod, image, token);
9104 mono_save_token_info (cfg, image, token, cmethod);
9106 if (!mono_class_init (cmethod->klass))
9107 TYPE_LOAD_ERROR (cmethod->klass);
9109 context_used = mini_method_check_context_used (cfg, cmethod);
9111 if (mono_security_cas_enabled ()) {
9112 if (check_linkdemand (cfg, method, cmethod))
9113 INLINE_FAILURE ("linkdemand");
9114 CHECK_CFG_EXCEPTION;
9115 } else if (mono_security_core_clr_enabled ()) {
9116 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9119 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9120 emit_generic_class_init (cfg, cmethod->klass);
9121 CHECK_TYPELOAD (cmethod->klass);
9125 if (cfg->gsharedvt) {
9126 if (mini_is_gsharedvt_variable_signature (sig))
9127 GSHAREDVT_FAILURE (*ip);
9131 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
9132 mono_method_is_generic_sharable (cmethod, TRUE)) {
9133 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
9134 mono_class_vtable (cfg->domain, cmethod->klass);
9135 CHECK_TYPELOAD (cmethod->klass);
9137 vtable_arg = emit_get_rgctx_method (cfg, context_used,
9138 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9141 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
9142 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9144 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9146 CHECK_TYPELOAD (cmethod->klass);
9147 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9152 n = fsig->param_count;
9156 * Generate smaller code for the common newobj <exception> instruction in
9157 * argument checking code.
9159 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
9160 is_exception_class (cmethod->klass) && n <= 2 &&
9161 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
9162 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
9163 MonoInst *iargs [3];
9165 g_assert (!vtable_arg);
9169 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
9172 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
9176 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
9181 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
9184 g_assert_not_reached ();
9192 /* move the args to allow room for 'this' in the first position */
9198 /* check_call_signature () requires sp[0] to be set */
9199 this_ins.type = STACK_OBJ;
9201 if (check_call_signature (cfg, fsig, sp))
9206 if (mini_class_is_system_array (cmethod->klass)) {
9207 g_assert (!vtable_arg);
9209 *sp = emit_get_rgctx_method (cfg, context_used,
9210 cmethod, MONO_RGCTX_INFO_METHOD);
9212 /* Avoid varargs in the common case */
9213 if (fsig->param_count == 1)
9214 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
9215 else if (fsig->param_count == 2)
9216 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
9217 else if (fsig->param_count == 3)
9218 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
9219 else if (fsig->param_count == 4)
9220 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
9222 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
9223 } else if (cmethod->string_ctor) {
9224 g_assert (!context_used);
9225 g_assert (!vtable_arg);
9226 /* we simply pass a null pointer */
9227 EMIT_NEW_PCONST (cfg, *sp, NULL);
9228 /* now call the string ctor */
9229 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
9231 MonoInst* callvirt_this_arg = NULL;
9233 if (cmethod->klass->valuetype) {
9234 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
9235 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
9236 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
9241 * The code generated by mini_emit_virtual_call () expects
9242 * iargs [0] to be a boxed instance, but luckily the vcall
9243 * will be transformed into a normal call there.
9245 } else if (context_used) {
9246 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
9249 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9251 CHECK_TYPELOAD (cmethod->klass);
9254 * TypeInitializationExceptions thrown from the mono_runtime_class_init
9255 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
9256 * As a workaround, we call class cctors before allocating objects.
9258 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
9259 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
9260 if (cfg->verbose_level > 2)
9261 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
9262 class_inits = g_slist_prepend (class_inits, vtable);
9265 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
9268 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
9271 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
9273 /* Now call the actual ctor */
9274 /* Avoid virtual calls to ctors if possible */
9275 if (mono_class_is_marshalbyref (cmethod->klass))
9276 callvirt_this_arg = sp [0];
9279 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
9280 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9281 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9286 CHECK_CFG_EXCEPTION;
9287 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
9288 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
9289 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
9290 !g_list_find (dont_inline, cmethod)) {
9293 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
9294 cfg->real_offset += 5;
9297 inline_costs += costs - 5;
9299 INLINE_FAILURE ("inline failure");
9300 // FIXME-VT: Clean this up
9301 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
9302 GSHAREDVT_FAILURE(*ip);
9303 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
9305 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
9308 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
9309 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
9310 } else if (context_used &&
9311 (!mono_method_is_generic_sharable (cmethod, TRUE) ||
9312 !mono_class_generic_sharing_enabled (cmethod->klass))) {
9313 MonoInst *cmethod_addr;
9315 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
9316 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9318 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
9320 INLINE_FAILURE ("ctor call");
9321 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
9322 callvirt_this_arg, NULL, vtable_arg);
9326 if (alloc == NULL) {
9328 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
9329 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
9343 token = read32 (ip + 1);
9344 klass = mini_get_class (method, token, generic_context);
9345 CHECK_TYPELOAD (klass);
9346 if (sp [0]->type != STACK_OBJ)
9349 context_used = mini_class_check_context_used (cfg, klass);
9351 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9352 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
9359 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9362 if (cfg->compile_aot)
9363 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9365 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9367 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
9369 save_cast_details (cfg, klass, sp [0]->dreg, TRUE, &bblock);
9370 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
9371 reset_cast_details (cfg);
9374 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9375 MonoMethod *mono_castclass;
9376 MonoInst *iargs [1];
9379 mono_castclass = mono_marshal_get_castclass (klass);
9382 save_cast_details (cfg, klass, sp [0]->dreg, TRUE, &bblock);
9383 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
9384 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9385 reset_cast_details (cfg);
9386 CHECK_CFG_EXCEPTION;
9387 g_assert (costs > 0);
9390 cfg->real_offset += 5;
9395 inline_costs += costs;
9398 ins = handle_castclass (cfg, klass, *sp, context_used);
9399 CHECK_CFG_EXCEPTION;
9409 token = read32 (ip + 1);
9410 klass = mini_get_class (method, token, generic_context);
9411 CHECK_TYPELOAD (klass);
9412 if (sp [0]->type != STACK_OBJ)
9415 context_used = mini_class_check_context_used (cfg, klass);
9417 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9418 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
9425 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9428 if (cfg->compile_aot)
9429 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9431 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9433 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
9436 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9437 MonoMethod *mono_isinst;
9438 MonoInst *iargs [1];
9441 mono_isinst = mono_marshal_get_isinst (klass);
9444 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
9445 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9446 CHECK_CFG_EXCEPTION;
9447 g_assert (costs > 0);
9450 cfg->real_offset += 5;
9455 inline_costs += costs;
9458 ins = handle_isinst (cfg, klass, *sp, context_used);
9459 CHECK_CFG_EXCEPTION;
9466 case CEE_UNBOX_ANY: {
9470 token = read32 (ip + 1);
9471 klass = mini_get_class (method, token, generic_context);
9472 CHECK_TYPELOAD (klass);
9474 mono_save_token_info (cfg, image, token, klass);
9476 context_used = mini_class_check_context_used (cfg, klass);
9478 if (mini_is_gsharedvt_klass (cfg, klass)) {
9479 *sp = handle_unbox_gsharedvt (cfg, klass, *sp, &bblock);
9487 if (generic_class_is_reference_type (cfg, klass)) {
9488 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
9489 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9490 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
9497 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9500 /*FIXME AOT support*/
9501 if (cfg->compile_aot)
9502 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9504 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9506 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
9507 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
9510 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9511 MonoMethod *mono_castclass;
9512 MonoInst *iargs [1];
9515 mono_castclass = mono_marshal_get_castclass (klass);
9518 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
9519 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9520 CHECK_CFG_EXCEPTION;
9521 g_assert (costs > 0);
9524 cfg->real_offset += 5;
9528 inline_costs += costs;
9530 ins = handle_castclass (cfg, klass, *sp, context_used);
9531 CHECK_CFG_EXCEPTION;
9539 if (mono_class_is_nullable (klass)) {
9540 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
9547 ins = handle_unbox (cfg, klass, sp, context_used);
9553 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9566 token = read32 (ip + 1);
9567 klass = mini_get_class (method, token, generic_context);
9568 CHECK_TYPELOAD (klass);
9570 mono_save_token_info (cfg, image, token, klass);
9572 context_used = mini_class_check_context_used (cfg, klass);
9574 if (generic_class_is_reference_type (cfg, klass)) {
9580 if (klass == mono_defaults.void_class)
9582 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
9584 /* frequent check in generic code: box (struct), brtrue */
9586 // FIXME: LLVM can't handle the inconsistent bb linking
9587 if (!mono_class_is_nullable (klass) &&
9588 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
9589 (ip [5] == CEE_BRTRUE ||
9590 ip [5] == CEE_BRTRUE_S ||
9591 ip [5] == CEE_BRFALSE ||
9592 ip [5] == CEE_BRFALSE_S)) {
9593 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
9595 MonoBasicBlock *true_bb, *false_bb;
9599 if (cfg->verbose_level > 3) {
9600 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9601 printf ("<box+brtrue opt>\n");
9609 target = ip + 1 + (signed char)(*ip);
9616 target = ip + 4 + (gint)(read32 (ip));
9620 g_assert_not_reached ();
9624 * We need to link both bblocks, since it is needed for handling stack
9625 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
9626 * Branching to only one of them would lead to inconsistencies, so
9627 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
9629 GET_BBLOCK (cfg, true_bb, target);
9630 GET_BBLOCK (cfg, false_bb, ip);
9632 mono_link_bblock (cfg, cfg->cbb, true_bb);
9633 mono_link_bblock (cfg, cfg->cbb, false_bb);
9635 if (sp != stack_start) {
9636 handle_stack_args (cfg, stack_start, sp - stack_start);
9638 CHECK_UNVERIFIABLE (cfg);
9641 if (COMPILE_LLVM (cfg)) {
9642 dreg = alloc_ireg (cfg);
9643 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
9644 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
9646 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
9648 /* The JIT can't eliminate the iconst+compare */
9649 MONO_INST_NEW (cfg, ins, OP_BR);
9650 ins->inst_target_bb = is_true ? true_bb : false_bb;
9651 MONO_ADD_INS (cfg->cbb, ins);
9654 start_new_bblock = 1;
9658 *sp++ = handle_box (cfg, val, klass, context_used, &bblock);
9660 CHECK_CFG_EXCEPTION;
9669 token = read32 (ip + 1);
9670 klass = mini_get_class (method, token, generic_context);
9671 CHECK_TYPELOAD (klass);
9673 mono_save_token_info (cfg, image, token, klass);
9675 context_used = mini_class_check_context_used (cfg, klass);
9677 if (mono_class_is_nullable (klass)) {
9680 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
9681 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
9685 ins = handle_unbox (cfg, klass, sp, context_used);
9698 MonoClassField *field;
9699 #ifndef DISABLE_REMOTING
9703 gboolean is_instance;
9705 gpointer addr = NULL;
9706 gboolean is_special_static;
9708 MonoInst *store_val = NULL;
9711 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
9713 if (op == CEE_STFLD) {
9721 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
9723 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
9726 if (op == CEE_STSFLD) {
9734 token = read32 (ip + 1);
9735 if (method->wrapper_type != MONO_WRAPPER_NONE) {
9736 field = mono_method_get_wrapper_data (method, token);
9737 klass = field->parent;
9740 field = mono_field_from_token (image, token, &klass, generic_context);
9744 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
9745 FIELD_ACCESS_FAILURE;
9746 mono_class_init (klass);
9748 if (is_instance && *ip != CEE_LDFLDA && is_magic_tls_access (field))
9751 /* if the class is Critical then transparent code cannot access it's fields */
9752 if (!is_instance && mono_security_core_clr_enabled ())
9753 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
9755 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
9756 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
9757 if (mono_security_core_clr_enabled ())
9758 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
9762 * LDFLD etc. is usable on static fields as well, so convert those cases to
9765 if (is_instance && field->type->attrs & FIELD_ATTRIBUTE_STATIC) {
9777 g_assert_not_reached ();
9779 is_instance = FALSE;
9782 context_used = mini_class_check_context_used (cfg, klass);
9786 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
9787 if (op == CEE_STFLD) {
9788 if (target_type_is_incompatible (cfg, field->type, sp [1]))
9790 #ifndef DISABLE_REMOTING
9791 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
9792 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
9793 MonoInst *iargs [5];
9795 GSHAREDVT_FAILURE (op);
9798 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9799 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
9800 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
9804 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
9805 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
9806 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9807 CHECK_CFG_EXCEPTION;
9808 g_assert (costs > 0);
9810 cfg->real_offset += 5;
9813 inline_costs += costs;
9815 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
9822 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
9824 if (mini_is_gsharedvt_klass (cfg, klass)) {
9825 MonoInst *offset_ins;
9827 context_used = mini_class_check_context_used (cfg, klass);
9829 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9830 dreg = alloc_ireg_mp (cfg);
9831 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
9832 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
9833 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
9835 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
9837 if (sp [0]->opcode != OP_LDADDR)
9838 store->flags |= MONO_INST_FAULT;
9840 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
9841 /* insert call to write barrier */
9845 dreg = alloc_ireg_mp (cfg);
9846 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
9847 emit_write_barrier (cfg, ptr, sp [1]);
9850 store->flags |= ins_flag;
9857 #ifndef DISABLE_REMOTING
9858 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
9859 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
9860 MonoInst *iargs [4];
9862 GSHAREDVT_FAILURE (op);
9865 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9866 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
9867 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
9868 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
9869 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
9870 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9871 CHECK_CFG_EXCEPTION;
9873 g_assert (costs > 0);
9875 cfg->real_offset += 5;
9879 inline_costs += costs;
9881 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
9887 if (sp [0]->type == STACK_VTYPE) {
9890 /* Have to compute the address of the variable */
9892 var = get_vreg_to_inst (cfg, sp [0]->dreg);
9894 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
9896 g_assert (var->klass == klass);
9898 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
9902 if (op == CEE_LDFLDA) {
9903 if (is_magic_tls_access (field)) {
9904 GSHAREDVT_FAILURE (*ip);
9906 *sp++ = create_magic_tls_access (cfg, field, &cached_tls_addr, ins);
9908 if (sp [0]->type == STACK_OBJ) {
9909 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
9910 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
9913 dreg = alloc_ireg_mp (cfg);
9915 if (mini_is_gsharedvt_klass (cfg, klass)) {
9916 MonoInst *offset_ins;
9918 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9919 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
9921 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
9923 ins->klass = mono_class_from_mono_type (field->type);
9924 ins->type = STACK_MP;
9930 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
9932 if (mini_is_gsharedvt_klass (cfg, klass)) {
9933 MonoInst *offset_ins;
9935 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9936 dreg = alloc_ireg_mp (cfg);
9937 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
9938 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
9940 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
9942 load->flags |= ins_flag;
9943 if (sp [0]->opcode != OP_LDADDR)
9944 load->flags |= MONO_INST_FAULT;
9958 * We can only support shared generic static
9959 * field access on architectures where the
9960 * trampoline code has been extended to handle
9961 * the generic class init.
9963 #ifndef MONO_ARCH_VTABLE_REG
9964 GENERIC_SHARING_FAILURE (op);
9967 context_used = mini_class_check_context_used (cfg, klass);
9969 ftype = mono_field_get_type (field);
9971 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
9974 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
9975 * to be called here.
9977 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
9978 mono_class_vtable (cfg->domain, klass);
9979 CHECK_TYPELOAD (klass);
9981 mono_domain_lock (cfg->domain);
9982 if (cfg->domain->special_static_fields)
9983 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
9984 mono_domain_unlock (cfg->domain);
9986 is_special_static = mono_class_field_is_special_static (field);
9988 /* Generate IR to compute the field address */
9989 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
9991 * Fast access to TLS data
9992 * Inline version of get_thread_static_data () in
9996 int idx, static_data_reg, array_reg, dreg;
9997 MonoInst *thread_ins;
9999 GSHAREDVT_FAILURE (op);
10001 // offset &= 0x7fffffff;
10002 // idx = (offset >> 24) - 1;
10003 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
10005 thread_ins = mono_get_thread_intrinsic (cfg);
10006 MONO_ADD_INS (cfg->cbb, thread_ins);
10007 static_data_reg = alloc_ireg (cfg);
10008 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
10010 if (cfg->compile_aot) {
10011 int offset_reg, offset2_reg, idx_reg;
10013 /* For TLS variables, this will return the TLS offset */
10014 EMIT_NEW_SFLDACONST (cfg, ins, field);
10015 offset_reg = ins->dreg;
10016 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
10017 idx_reg = alloc_ireg (cfg);
10018 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
10019 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
10020 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
10021 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
10022 array_reg = alloc_ireg (cfg);
10023 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
10024 offset2_reg = alloc_ireg (cfg);
10025 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
10026 dreg = alloc_ireg (cfg);
10027 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
10029 offset = (gsize)addr & 0x7fffffff;
10030 idx = (offset >> 24) - 1;
10032 array_reg = alloc_ireg (cfg);
10033 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
10034 dreg = alloc_ireg (cfg);
10035 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
10037 } else if ((cfg->opt & MONO_OPT_SHARED) ||
10038 (cfg->compile_aot && is_special_static) ||
10039 (context_used && is_special_static)) {
10040 MonoInst *iargs [2];
10042 g_assert (field->parent);
10043 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10044 if (context_used) {
10045 iargs [1] = emit_get_rgctx_field (cfg, context_used,
10046 field, MONO_RGCTX_INFO_CLASS_FIELD);
10048 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10050 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10051 } else if (context_used) {
10052 MonoInst *static_data;
10055 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
10056 method->klass->name_space, method->klass->name, method->name,
10057 depth, field->offset);
10060 if (mono_class_needs_cctor_run (klass, method))
10061 emit_generic_class_init (cfg, klass);
10064 * The pointer we're computing here is
10066 * super_info.static_data + field->offset
10068 static_data = emit_get_rgctx_klass (cfg, context_used,
10069 klass, MONO_RGCTX_INFO_STATIC_DATA);
10071 if (mini_is_gsharedvt_klass (cfg, klass)) {
10072 MonoInst *offset_ins;
10074 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10075 dreg = alloc_ireg_mp (cfg);
10076 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
10077 } else if (field->offset == 0) {
10080 int addr_reg = mono_alloc_preg (cfg);
10081 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
10083 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
10084 MonoInst *iargs [2];
10086 g_assert (field->parent);
10087 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10088 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10089 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10091 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
10093 CHECK_TYPELOAD (klass);
10095 if (mini_field_access_needs_cctor_run (cfg, method, vtable)) {
10096 if (!(g_slist_find (class_inits, vtable))) {
10097 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
10098 if (cfg->verbose_level > 2)
10099 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
10100 class_inits = g_slist_prepend (class_inits, vtable);
10103 if (cfg->run_cctors) {
10105 /* This makes so that inline cannot trigger */
10106 /* .cctors: too many apps depend on them */
10107 /* running with a specific order... */
10108 if (! vtable->initialized)
10109 INLINE_FAILURE ("class init");
10110 ex = mono_runtime_class_init_full (vtable, FALSE);
10112 set_exception_object (cfg, ex);
10113 goto exception_exit;
10117 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10119 if (cfg->compile_aot)
10120 EMIT_NEW_SFLDACONST (cfg, ins, field);
10122 EMIT_NEW_PCONST (cfg, ins, addr);
10124 MonoInst *iargs [1];
10125 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
10126 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
10130 /* Generate IR to do the actual load/store operation */
10132 if (op == CEE_LDSFLDA) {
10133 ins->klass = mono_class_from_mono_type (ftype);
10134 ins->type = STACK_PTR;
10136 } else if (op == CEE_STSFLD) {
10139 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
10140 store->flags |= ins_flag;
10142 gboolean is_const = FALSE;
10143 MonoVTable *vtable = NULL;
10144 gpointer addr = NULL;
10146 if (!context_used) {
10147 vtable = mono_class_vtable (cfg->domain, klass);
10148 CHECK_TYPELOAD (klass);
10150 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
10151 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
10152 int ro_type = ftype->type;
10154 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10155 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
10156 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
10159 GSHAREDVT_FAILURE (op);
10161 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
10164 case MONO_TYPE_BOOLEAN:
10166 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
10170 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
10173 case MONO_TYPE_CHAR:
10175 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
10179 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
10184 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
10188 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
10193 case MONO_TYPE_PTR:
10194 case MONO_TYPE_FNPTR:
10195 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10196 type_to_eval_stack_type ((cfg), field->type, *sp);
10199 case MONO_TYPE_STRING:
10200 case MONO_TYPE_OBJECT:
10201 case MONO_TYPE_CLASS:
10202 case MONO_TYPE_SZARRAY:
10203 case MONO_TYPE_ARRAY:
10204 if (!mono_gc_is_moving ()) {
10205 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10206 type_to_eval_stack_type ((cfg), field->type, *sp);
10214 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
10219 case MONO_TYPE_VALUETYPE:
10229 CHECK_STACK_OVF (1);
10231 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
10232 load->flags |= ins_flag;
10245 token = read32 (ip + 1);
10246 klass = mini_get_class (method, token, generic_context);
10247 CHECK_TYPELOAD (klass);
10248 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
10249 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
10250 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
10251 generic_class_is_reference_type (cfg, klass)) {
10252 /* insert call to write barrier */
10253 emit_write_barrier (cfg, sp [0], sp [1]);
10265 const char *data_ptr;
10267 guint32 field_token;
10273 token = read32 (ip + 1);
10275 klass = mini_get_class (method, token, generic_context);
10276 CHECK_TYPELOAD (klass);
10278 context_used = mini_class_check_context_used (cfg, klass);
10280 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
10281 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
10282 ins->sreg1 = sp [0]->dreg;
10283 ins->type = STACK_I4;
10284 ins->dreg = alloc_ireg (cfg);
10285 MONO_ADD_INS (cfg->cbb, ins);
10286 *sp = mono_decompose_opcode (cfg, ins);
10289 if (context_used) {
10290 MonoInst *args [3];
10291 MonoClass *array_class = mono_array_class_get (klass, 1);
10292 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
10294 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
10297 args [0] = emit_get_rgctx_klass (cfg, context_used,
10298 array_class, MONO_RGCTX_INFO_VTABLE);
10303 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
10305 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
10307 if (cfg->opt & MONO_OPT_SHARED) {
10308 /* Decompose now to avoid problems with references to the domainvar */
10309 MonoInst *iargs [3];
10311 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10312 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10313 iargs [2] = sp [0];
10315 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
10317 /* Decompose later since it is needed by abcrem */
10318 MonoClass *array_type = mono_array_class_get (klass, 1);
10319 mono_class_vtable (cfg->domain, array_type);
10320 CHECK_TYPELOAD (array_type);
10322 MONO_INST_NEW (cfg, ins, OP_NEWARR);
10323 ins->dreg = alloc_ireg_ref (cfg);
10324 ins->sreg1 = sp [0]->dreg;
10325 ins->inst_newa_class = klass;
10326 ins->type = STACK_OBJ;
10327 ins->klass = array_type;
10328 MONO_ADD_INS (cfg->cbb, ins);
10329 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10330 cfg->cbb->has_array_access = TRUE;
10332 /* Needed so mono_emit_load_get_addr () gets called */
10333 mono_get_got_var (cfg);
10343 * we inline/optimize the initialization sequence if possible.
10344 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
10345 * for small sizes open code the memcpy
10346 * ensure the rva field is big enough
10348 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
10349 MonoMethod *memcpy_method = get_memcpy_method ();
10350 MonoInst *iargs [3];
10351 int add_reg = alloc_ireg_mp (cfg);
10353 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
10354 if (cfg->compile_aot) {
10355 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
10357 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
10359 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
10360 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
10369 if (sp [0]->type != STACK_OBJ)
10372 MONO_INST_NEW (cfg, ins, OP_LDLEN);
10373 ins->dreg = alloc_preg (cfg);
10374 ins->sreg1 = sp [0]->dreg;
10375 ins->type = STACK_I4;
10376 /* This flag will be inherited by the decomposition */
10377 ins->flags |= MONO_INST_FAULT;
10378 MONO_ADD_INS (cfg->cbb, ins);
10379 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10380 cfg->cbb->has_array_access = TRUE;
10388 if (sp [0]->type != STACK_OBJ)
10391 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10393 klass = mini_get_class (method, read32 (ip + 1), generic_context);
10394 CHECK_TYPELOAD (klass);
10395 /* we need to make sure that this array is exactly the type it needs
10396 * to be for correctness. the wrappers are lax with their usage
10397 * so we need to ignore them here
10399 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
10400 MonoClass *array_class = mono_array_class_get (klass, 1);
10401 mini_emit_check_array_type (cfg, sp [0], array_class);
10402 CHECK_TYPELOAD (array_class);
10406 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10411 case CEE_LDELEM_I1:
10412 case CEE_LDELEM_U1:
10413 case CEE_LDELEM_I2:
10414 case CEE_LDELEM_U2:
10415 case CEE_LDELEM_I4:
10416 case CEE_LDELEM_U4:
10417 case CEE_LDELEM_I8:
10419 case CEE_LDELEM_R4:
10420 case CEE_LDELEM_R8:
10421 case CEE_LDELEM_REF: {
10427 if (*ip == CEE_LDELEM) {
10429 token = read32 (ip + 1);
10430 klass = mini_get_class (method, token, generic_context);
10431 CHECK_TYPELOAD (klass);
10432 mono_class_init (klass);
10435 klass = array_access_to_klass (*ip);
10437 if (sp [0]->type != STACK_OBJ)
10440 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10442 if (mini_is_gsharedvt_klass (cfg, klass)) {
10443 // FIXME-VT: OP_ICONST optimization
10444 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10445 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10446 ins->opcode = OP_LOADV_MEMBASE;
10447 } else if (sp [1]->opcode == OP_ICONST) {
10448 int array_reg = sp [0]->dreg;
10449 int index_reg = sp [1]->dreg;
10450 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
10452 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
10453 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
10455 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10456 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10459 if (*ip == CEE_LDELEM)
10466 case CEE_STELEM_I1:
10467 case CEE_STELEM_I2:
10468 case CEE_STELEM_I4:
10469 case CEE_STELEM_I8:
10470 case CEE_STELEM_R4:
10471 case CEE_STELEM_R8:
10472 case CEE_STELEM_REF:
10477 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10479 if (*ip == CEE_STELEM) {
10481 token = read32 (ip + 1);
10482 klass = mini_get_class (method, token, generic_context);
10483 CHECK_TYPELOAD (klass);
10484 mono_class_init (klass);
10487 klass = array_access_to_klass (*ip);
10489 if (sp [0]->type != STACK_OBJ)
10492 emit_array_store (cfg, klass, sp, TRUE);
10494 if (*ip == CEE_STELEM)
10501 case CEE_CKFINITE: {
10505 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
10506 ins->sreg1 = sp [0]->dreg;
10507 ins->dreg = alloc_freg (cfg);
10508 ins->type = STACK_R8;
10509 MONO_ADD_INS (bblock, ins);
10511 *sp++ = mono_decompose_opcode (cfg, ins);
10516 case CEE_REFANYVAL: {
10517 MonoInst *src_var, *src;
10519 int klass_reg = alloc_preg (cfg);
10520 int dreg = alloc_preg (cfg);
10522 GSHAREDVT_FAILURE (*ip);
10525 MONO_INST_NEW (cfg, ins, *ip);
10528 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
10529 CHECK_TYPELOAD (klass);
10530 mono_class_init (klass);
10532 context_used = mini_class_check_context_used (cfg, klass);
10535 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10537 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
10538 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
10539 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
10541 if (context_used) {
10542 MonoInst *klass_ins;
10544 klass_ins = emit_get_rgctx_klass (cfg, context_used,
10545 klass, MONO_RGCTX_INFO_KLASS);
10548 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
10549 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
10551 mini_emit_class_check (cfg, klass_reg, klass);
10553 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
10554 ins->type = STACK_MP;
10559 case CEE_MKREFANY: {
10560 MonoInst *loc, *addr;
10562 GSHAREDVT_FAILURE (*ip);
10565 MONO_INST_NEW (cfg, ins, *ip);
10568 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
10569 CHECK_TYPELOAD (klass);
10570 mono_class_init (klass);
10572 context_used = mini_class_check_context_used (cfg, klass);
10574 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
10575 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
10577 if (context_used) {
10578 MonoInst *const_ins;
10579 int type_reg = alloc_preg (cfg);
10581 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
10582 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
10583 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
10584 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
10585 } else if (cfg->compile_aot) {
10586 int const_reg = alloc_preg (cfg);
10587 int type_reg = alloc_preg (cfg);
10589 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
10590 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
10591 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
10592 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
10594 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
10595 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
10597 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
10599 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
10600 ins->type = STACK_VTYPE;
10601 ins->klass = mono_defaults.typed_reference_class;
10606 case CEE_LDTOKEN: {
10608 MonoClass *handle_class;
10610 CHECK_STACK_OVF (1);
10613 n = read32 (ip + 1);
10615 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
10616 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
10617 handle = mono_method_get_wrapper_data (method, n);
10618 handle_class = mono_method_get_wrapper_data (method, n + 1);
10619 if (handle_class == mono_defaults.typehandle_class)
10620 handle = &((MonoClass*)handle)->byval_arg;
10623 handle = mono_ldtoken (image, n, &handle_class, generic_context);
10627 mono_class_init (handle_class);
10628 if (cfg->generic_sharing_context) {
10629 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
10630 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
10631 /* This case handles ldtoken
10632 of an open type, like for
10635 } else if (handle_class == mono_defaults.typehandle_class) {
10636 /* If we get a MONO_TYPE_CLASS
10637 then we need to provide the
10639 instantiation of it. */
10640 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
10643 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type (handle));
10644 } else if (handle_class == mono_defaults.fieldhandle_class)
10645 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
10646 else if (handle_class == mono_defaults.methodhandle_class)
10647 context_used = mini_method_check_context_used (cfg, handle);
10649 g_assert_not_reached ();
10652 if ((cfg->opt & MONO_OPT_SHARED) &&
10653 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
10654 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
10655 MonoInst *addr, *vtvar, *iargs [3];
10656 int method_context_used;
10658 method_context_used = mini_method_check_context_used (cfg, method);
10660 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
10662 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10663 EMIT_NEW_ICONST (cfg, iargs [1], n);
10664 if (method_context_used) {
10665 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
10666 method, MONO_RGCTX_INFO_METHOD);
10667 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
10669 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
10670 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
10672 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
10674 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
10676 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
10678 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
10679 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
10680 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
10681 (cmethod->klass == mono_defaults.systemtype_class) &&
10682 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
10683 MonoClass *tclass = mono_class_from_mono_type (handle);
10685 mono_class_init (tclass);
10686 if (context_used) {
10687 ins = emit_get_rgctx_klass (cfg, context_used,
10688 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
10689 } else if (cfg->compile_aot) {
10690 if (method->wrapper_type) {
10691 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
10692 /* Special case for static synchronized wrappers */
10693 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
10695 /* FIXME: n is not a normal token */
10696 cfg->disable_aot = TRUE;
10697 EMIT_NEW_PCONST (cfg, ins, NULL);
10700 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
10703 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
10705 ins->type = STACK_OBJ;
10706 ins->klass = cmethod->klass;
10709 MonoInst *addr, *vtvar;
10711 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
10713 if (context_used) {
10714 if (handle_class == mono_defaults.typehandle_class) {
10715 ins = emit_get_rgctx_klass (cfg, context_used,
10716 mono_class_from_mono_type (handle),
10717 MONO_RGCTX_INFO_TYPE);
10718 } else if (handle_class == mono_defaults.methodhandle_class) {
10719 ins = emit_get_rgctx_method (cfg, context_used,
10720 handle, MONO_RGCTX_INFO_METHOD);
10721 } else if (handle_class == mono_defaults.fieldhandle_class) {
10722 ins = emit_get_rgctx_field (cfg, context_used,
10723 handle, MONO_RGCTX_INFO_CLASS_FIELD);
10725 g_assert_not_reached ();
10727 } else if (cfg->compile_aot) {
10728 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
10730 EMIT_NEW_PCONST (cfg, ins, handle);
10732 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
10733 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
10734 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
10744 MONO_INST_NEW (cfg, ins, OP_THROW);
10746 ins->sreg1 = sp [0]->dreg;
10748 bblock->out_of_line = TRUE;
10749 MONO_ADD_INS (bblock, ins);
10750 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
10751 MONO_ADD_INS (bblock, ins);
10754 link_bblock (cfg, bblock, end_bblock);
10755 start_new_bblock = 1;
10757 case CEE_ENDFINALLY:
10758 /* mono_save_seq_point_info () depends on this */
10759 if (sp != stack_start)
10760 emit_seq_point (cfg, method, ip, FALSE, FALSE);
10761 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
10762 MONO_ADD_INS (bblock, ins);
10764 start_new_bblock = 1;
10767 * Control will leave the method so empty the stack, otherwise
10768 * the next basic block will start with a nonempty stack.
10770 while (sp != stack_start) {
10775 case CEE_LEAVE_S: {
10778 if (*ip == CEE_LEAVE) {
10780 target = ip + 5 + (gint32)read32(ip + 1);
10783 target = ip + 2 + (signed char)(ip [1]);
10786 /* empty the stack */
10787 while (sp != stack_start) {
10792 * If this leave statement is in a catch block, check for a
10793 * pending exception, and rethrow it if necessary.
10794 * We avoid doing this in runtime invoke wrappers, since those are called
10795 * by native code which excepts the wrapper to catch all exceptions.
10797 for (i = 0; i < header->num_clauses; ++i) {
10798 MonoExceptionClause *clause = &header->clauses [i];
10801 * Use <= in the final comparison to handle clauses with multiple
10802 * leave statements, like in bug #78024.
10803 * The ordering of the exception clauses guarantees that we find the
10804 * innermost clause.
10806 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
10808 MonoBasicBlock *dont_throw;
10813 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
10816 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
10818 NEW_BBLOCK (cfg, dont_throw);
10821 * Currently, we always rethrow the abort exception, despite the
10822 * fact that this is not correct. See thread6.cs for an example.
10823 * But propagating the abort exception is more important than
10824 * getting the sematics right.
10826 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
10827 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
10828 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
10830 MONO_START_BB (cfg, dont_throw);
10835 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
10837 MonoExceptionClause *clause;
10839 for (tmp = handlers; tmp; tmp = tmp->next) {
10840 clause = tmp->data;
10841 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
10843 link_bblock (cfg, bblock, tblock);
10844 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
10845 ins->inst_target_bb = tblock;
10846 ins->inst_eh_block = clause;
10847 MONO_ADD_INS (bblock, ins);
10848 bblock->has_call_handler = 1;
10849 if (COMPILE_LLVM (cfg)) {
10850 MonoBasicBlock *target_bb;
10853 * Link the finally bblock with the target, since it will
10854 * conceptually branch there.
10855 * FIXME: Have to link the bblock containing the endfinally.
10857 GET_BBLOCK (cfg, target_bb, target);
10858 link_bblock (cfg, tblock, target_bb);
10861 g_list_free (handlers);
10864 MONO_INST_NEW (cfg, ins, OP_BR);
10865 MONO_ADD_INS (bblock, ins);
10866 GET_BBLOCK (cfg, tblock, target);
10867 link_bblock (cfg, bblock, tblock);
10868 ins->inst_target_bb = tblock;
10869 start_new_bblock = 1;
10871 if (*ip == CEE_LEAVE)
10880 * Mono specific opcodes
10882 case MONO_CUSTOM_PREFIX: {
10884 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10888 case CEE_MONO_ICALL: {
10890 MonoJitICallInfo *info;
10892 token = read32 (ip + 2);
10893 func = mono_method_get_wrapper_data (method, token);
10894 info = mono_find_jit_icall_by_addr (func);
10896 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
10899 CHECK_STACK (info->sig->param_count);
10900 sp -= info->sig->param_count;
10902 ins = mono_emit_jit_icall (cfg, info->func, sp);
10903 if (!MONO_TYPE_IS_VOID (info->sig->ret))
10907 inline_costs += 10 * num_calls++;
10911 case CEE_MONO_LDPTR: {
10914 CHECK_STACK_OVF (1);
10916 token = read32 (ip + 2);
10918 ptr = mono_method_get_wrapper_data (method, token);
10919 /* FIXME: Generalize this */
10920 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
10921 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
10926 EMIT_NEW_PCONST (cfg, ins, ptr);
10929 inline_costs += 10 * num_calls++;
10930 /* Can't embed random pointers into AOT code */
10931 cfg->disable_aot = 1;
10934 case CEE_MONO_JIT_ICALL_ADDR: {
10935 MonoJitICallInfo *callinfo;
10938 CHECK_STACK_OVF (1);
10940 token = read32 (ip + 2);
10942 ptr = mono_method_get_wrapper_data (method, token);
10943 callinfo = mono_find_jit_icall_by_addr (ptr);
10944 g_assert (callinfo);
10945 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
10948 inline_costs += 10 * num_calls++;
10951 case CEE_MONO_ICALL_ADDR: {
10952 MonoMethod *cmethod;
10955 CHECK_STACK_OVF (1);
10957 token = read32 (ip + 2);
10959 cmethod = mono_method_get_wrapper_data (method, token);
10961 if (cfg->compile_aot) {
10962 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
10964 ptr = mono_lookup_internal_call (cmethod);
10966 EMIT_NEW_PCONST (cfg, ins, ptr);
10972 case CEE_MONO_VTADDR: {
10973 MonoInst *src_var, *src;
10979 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10980 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
10985 case CEE_MONO_NEWOBJ: {
10986 MonoInst *iargs [2];
10988 CHECK_STACK_OVF (1);
10990 token = read32 (ip + 2);
10991 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
10992 mono_class_init (klass);
10993 NEW_DOMAINCONST (cfg, iargs [0]);
10994 MONO_ADD_INS (cfg->cbb, iargs [0]);
10995 NEW_CLASSCONST (cfg, iargs [1], klass);
10996 MONO_ADD_INS (cfg->cbb, iargs [1]);
10997 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
10999 inline_costs += 10 * num_calls++;
11002 case CEE_MONO_OBJADDR:
11005 MONO_INST_NEW (cfg, ins, OP_MOVE);
11006 ins->dreg = alloc_ireg_mp (cfg);
11007 ins->sreg1 = sp [0]->dreg;
11008 ins->type = STACK_MP;
11009 MONO_ADD_INS (cfg->cbb, ins);
11013 case CEE_MONO_LDNATIVEOBJ:
11015 * Similar to LDOBJ, but instead load the unmanaged
11016 * representation of the vtype to the stack.
11021 token = read32 (ip + 2);
11022 klass = mono_method_get_wrapper_data (method, token);
11023 g_assert (klass->valuetype);
11024 mono_class_init (klass);
11027 MonoInst *src, *dest, *temp;
11030 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
11031 temp->backend.is_pinvoke = 1;
11032 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
11033 mini_emit_stobj (cfg, dest, src, klass, TRUE);
11035 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
11036 dest->type = STACK_VTYPE;
11037 dest->klass = klass;
11043 case CEE_MONO_RETOBJ: {
11045 * Same as RET, but return the native representation of a vtype
11048 g_assert (cfg->ret);
11049 g_assert (mono_method_signature (method)->pinvoke);
11054 token = read32 (ip + 2);
11055 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11057 if (!cfg->vret_addr) {
11058 g_assert (cfg->ret_var_is_local);
11060 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
11062 EMIT_NEW_RETLOADA (cfg, ins);
11064 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
11066 if (sp != stack_start)
11069 MONO_INST_NEW (cfg, ins, OP_BR);
11070 ins->inst_target_bb = end_bblock;
11071 MONO_ADD_INS (bblock, ins);
11072 link_bblock (cfg, bblock, end_bblock);
11073 start_new_bblock = 1;
11077 case CEE_MONO_CISINST:
11078 case CEE_MONO_CCASTCLASS: {
11083 token = read32 (ip + 2);
11084 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11085 if (ip [1] == CEE_MONO_CISINST)
11086 ins = handle_cisinst (cfg, klass, sp [0]);
11088 ins = handle_ccastclass (cfg, klass, sp [0]);
11094 case CEE_MONO_SAVE_LMF:
11095 case CEE_MONO_RESTORE_LMF:
11096 #ifdef MONO_ARCH_HAVE_LMF_OPS
11097 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
11098 MONO_ADD_INS (bblock, ins);
11099 cfg->need_lmf_area = TRUE;
11103 case CEE_MONO_CLASSCONST:
11104 CHECK_STACK_OVF (1);
11106 token = read32 (ip + 2);
11107 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
11110 inline_costs += 10 * num_calls++;
11112 case CEE_MONO_NOT_TAKEN:
11113 bblock->out_of_line = TRUE;
11117 CHECK_STACK_OVF (1);
11119 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
11120 ins->dreg = alloc_preg (cfg);
11121 ins->inst_offset = (gint32)read32 (ip + 2);
11122 ins->type = STACK_PTR;
11123 MONO_ADD_INS (bblock, ins);
11127 case CEE_MONO_DYN_CALL: {
11128 MonoCallInst *call;
11130 /* It would be easier to call a trampoline, but that would put an
11131 * extra frame on the stack, confusing exception handling. So
11132 * implement it inline using an opcode for now.
11135 if (!cfg->dyn_call_var) {
11136 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11137 /* prevent it from being register allocated */
11138 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
11141 /* Has to use a call inst since it local regalloc expects it */
11142 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
11143 ins = (MonoInst*)call;
11145 ins->sreg1 = sp [0]->dreg;
11146 ins->sreg2 = sp [1]->dreg;
11147 MONO_ADD_INS (bblock, ins);
11149 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
11150 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
11154 inline_costs += 10 * num_calls++;
11158 case CEE_MONO_MEMORY_BARRIER: {
11160 emit_memory_barrier (cfg, (int)read32 (ip + 1));
11164 case CEE_MONO_JIT_ATTACH: {
11165 MonoInst *args [16];
11166 MonoInst *ad_ins, *lmf_ins;
11167 MonoBasicBlock *next_bb = NULL;
11169 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11171 EMIT_NEW_PCONST (cfg, ins, NULL);
11172 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11178 ad_ins = mono_get_domain_intrinsic (cfg);
11179 lmf_ins = mono_get_lmf_intrinsic (cfg);
11182 if (MONO_ARCH_HAVE_TLS_GET && ad_ins && lmf_ins) {
11183 NEW_BBLOCK (cfg, next_bb);
11185 MONO_ADD_INS (cfg->cbb, ad_ins);
11186 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ad_ins->dreg, 0);
11187 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
11189 MONO_ADD_INS (cfg->cbb, lmf_ins);
11190 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, lmf_ins->dreg, 0);
11191 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
11194 if (cfg->compile_aot) {
11195 /* AOT code is only used in the root domain */
11196 EMIT_NEW_PCONST (cfg, args [0], NULL);
11198 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
11200 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
11201 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11204 MONO_START_BB (cfg, next_bb);
11210 case CEE_MONO_JIT_DETACH: {
11211 MonoInst *args [16];
11213 /* Restore the original domain */
11214 dreg = alloc_ireg (cfg);
11215 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
11216 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
11221 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
11227 case CEE_PREFIX1: {
11230 case CEE_ARGLIST: {
11231 /* somewhat similar to LDTOKEN */
11232 MonoInst *addr, *vtvar;
11233 CHECK_STACK_OVF (1);
11234 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
11236 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11237 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
11239 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11240 ins->type = STACK_VTYPE;
11241 ins->klass = mono_defaults.argumenthandle_class;
11254 * The following transforms:
11255 * CEE_CEQ into OP_CEQ
11256 * CEE_CGT into OP_CGT
11257 * CEE_CGT_UN into OP_CGT_UN
11258 * CEE_CLT into OP_CLT
11259 * CEE_CLT_UN into OP_CLT_UN
11261 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
11263 MONO_INST_NEW (cfg, ins, cmp->opcode);
11265 cmp->sreg1 = sp [0]->dreg;
11266 cmp->sreg2 = sp [1]->dreg;
11267 type_from_op (cmp, sp [0], sp [1]);
11269 if ((sp [0]->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
11270 cmp->opcode = OP_LCOMPARE;
11271 else if (sp [0]->type == STACK_R8)
11272 cmp->opcode = OP_FCOMPARE;
11274 cmp->opcode = OP_ICOMPARE;
11275 MONO_ADD_INS (bblock, cmp);
11276 ins->type = STACK_I4;
11277 ins->dreg = alloc_dreg (cfg, ins->type);
11278 type_from_op (ins, sp [0], sp [1]);
11280 if (cmp->opcode == OP_FCOMPARE) {
11282 * The backends expect the fceq opcodes to do the
11285 cmp->opcode = OP_NOP;
11286 ins->sreg1 = cmp->sreg1;
11287 ins->sreg2 = cmp->sreg2;
11289 MONO_ADD_INS (bblock, ins);
11295 MonoInst *argconst;
11296 MonoMethod *cil_method;
11298 CHECK_STACK_OVF (1);
11300 n = read32 (ip + 2);
11301 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
11302 if (!cmethod || mono_loader_get_last_error ())
11304 mono_class_init (cmethod->klass);
11306 mono_save_token_info (cfg, image, n, cmethod);
11308 context_used = mini_method_check_context_used (cfg, cmethod);
11310 cil_method = cmethod;
11311 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
11312 METHOD_ACCESS_FAILURE;
11314 if (mono_security_cas_enabled ()) {
11315 if (check_linkdemand (cfg, method, cmethod))
11316 INLINE_FAILURE ("linkdemand");
11317 CHECK_CFG_EXCEPTION;
11318 } else if (mono_security_core_clr_enabled ()) {
11319 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
11323 * Optimize the common case of ldftn+delegate creation
11325 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
11326 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
11327 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
11328 MonoInst *target_ins;
11329 MonoMethod *invoke;
11330 int invoke_context_used;
11332 invoke = mono_get_delegate_invoke (ctor_method->klass);
11333 if (!invoke || !mono_method_signature (invoke))
11336 invoke_context_used = mini_method_check_context_used (cfg, invoke);
11338 target_ins = sp [-1];
11340 if (mono_security_core_clr_enabled ())
11341 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
11343 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
11344 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
11345 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
11346 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
11347 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
11351 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
11352 /* FIXME: SGEN support */
11353 if (invoke_context_used == 0) {
11355 if (cfg->verbose_level > 3)
11356 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
11358 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
11359 CHECK_CFG_EXCEPTION;
11368 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
11369 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
11373 inline_costs += 10 * num_calls++;
11376 case CEE_LDVIRTFTN: {
11377 MonoInst *args [2];
11381 n = read32 (ip + 2);
11382 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
11383 if (!cmethod || mono_loader_get_last_error ())
11385 mono_class_init (cmethod->klass);
11387 context_used = mini_method_check_context_used (cfg, cmethod);
11389 if (mono_security_cas_enabled ()) {
11390 if (check_linkdemand (cfg, method, cmethod))
11391 INLINE_FAILURE ("linkdemand");
11392 CHECK_CFG_EXCEPTION;
11393 } else if (mono_security_core_clr_enabled ()) {
11394 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
11400 args [1] = emit_get_rgctx_method (cfg, context_used,
11401 cmethod, MONO_RGCTX_INFO_METHOD);
11404 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
11406 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
11409 inline_costs += 10 * num_calls++;
11413 CHECK_STACK_OVF (1);
11415 n = read16 (ip + 2);
11417 EMIT_NEW_ARGLOAD (cfg, ins, n);
11422 CHECK_STACK_OVF (1);
11424 n = read16 (ip + 2);
11426 NEW_ARGLOADA (cfg, ins, n);
11427 MONO_ADD_INS (cfg->cbb, ins);
11435 n = read16 (ip + 2);
11437 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
11439 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
11443 CHECK_STACK_OVF (1);
11445 n = read16 (ip + 2);
11447 EMIT_NEW_LOCLOAD (cfg, ins, n);
11452 unsigned char *tmp_ip;
11453 CHECK_STACK_OVF (1);
11455 n = read16 (ip + 2);
11458 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
11464 EMIT_NEW_LOCLOADA (cfg, ins, n);
11473 n = read16 (ip + 2);
11475 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
11477 emit_stloc_ir (cfg, sp, header, n);
11484 if (sp != stack_start)
11486 if (cfg->method != method)
11488 * Inlining this into a loop in a parent could lead to
11489 * stack overflows which is different behavior than the
11490 * non-inlined case, thus disable inlining in this case.
11492 goto inline_failure;
11494 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
11495 ins->dreg = alloc_preg (cfg);
11496 ins->sreg1 = sp [0]->dreg;
11497 ins->type = STACK_PTR;
11498 MONO_ADD_INS (cfg->cbb, ins);
11500 cfg->flags |= MONO_CFG_HAS_ALLOCA;
11502 ins->flags |= MONO_INST_INIT;
11507 case CEE_ENDFILTER: {
11508 MonoExceptionClause *clause, *nearest;
11509 int cc, nearest_num;
11513 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
11515 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
11516 ins->sreg1 = (*sp)->dreg;
11517 MONO_ADD_INS (bblock, ins);
11518 start_new_bblock = 1;
11523 for (cc = 0; cc < header->num_clauses; ++cc) {
11524 clause = &header->clauses [cc];
11525 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
11526 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
11527 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
11532 g_assert (nearest);
11533 if ((ip - header->code) != nearest->handler_offset)
11538 case CEE_UNALIGNED_:
11539 ins_flag |= MONO_INST_UNALIGNED;
11540 /* FIXME: record alignment? we can assume 1 for now */
11544 case CEE_VOLATILE_:
11545 ins_flag |= MONO_INST_VOLATILE;
11549 ins_flag |= MONO_INST_TAILCALL;
11550 cfg->flags |= MONO_CFG_HAS_TAIL;
11551 /* Can't inline tail calls at this time */
11552 inline_costs += 100000;
11559 token = read32 (ip + 2);
11560 klass = mini_get_class (method, token, generic_context);
11561 CHECK_TYPELOAD (klass);
11562 if (generic_class_is_reference_type (cfg, klass))
11563 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
11565 mini_emit_initobj (cfg, *sp, NULL, klass);
11569 case CEE_CONSTRAINED_:
11571 token = read32 (ip + 2);
11572 constrained_call = mini_get_class (method, token, generic_context);
11573 CHECK_TYPELOAD (constrained_call);
11577 case CEE_INITBLK: {
11578 MonoInst *iargs [3];
11582 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
11583 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
11584 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
11585 /* emit_memset only works when val == 0 */
11586 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
11588 iargs [0] = sp [0];
11589 iargs [1] = sp [1];
11590 iargs [2] = sp [2];
11591 if (ip [1] == CEE_CPBLK) {
11592 MonoMethod *memcpy_method = get_memcpy_method ();
11593 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11595 MonoMethod *memset_method = get_memset_method ();
11596 mono_emit_method_call (cfg, memset_method, iargs, NULL);
11606 ins_flag |= MONO_INST_NOTYPECHECK;
11608 ins_flag |= MONO_INST_NORANGECHECK;
11609 /* we ignore the no-nullcheck for now since we
11610 * really do it explicitly only when doing callvirt->call
11614 case CEE_RETHROW: {
11616 int handler_offset = -1;
11618 for (i = 0; i < header->num_clauses; ++i) {
11619 MonoExceptionClause *clause = &header->clauses [i];
11620 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
11621 handler_offset = clause->handler_offset;
11626 bblock->flags |= BB_EXCEPTION_UNSAFE;
11628 g_assert (handler_offset != -1);
11630 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
11631 MONO_INST_NEW (cfg, ins, OP_RETHROW);
11632 ins->sreg1 = load->dreg;
11633 MONO_ADD_INS (bblock, ins);
11635 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11636 MONO_ADD_INS (bblock, ins);
11639 link_bblock (cfg, bblock, end_bblock);
11640 start_new_bblock = 1;
11648 GSHAREDVT_FAILURE (*ip);
11650 CHECK_STACK_OVF (1);
11652 token = read32 (ip + 2);
11653 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic && !generic_context) {
11654 MonoType *type = mono_type_create_from_typespec (image, token);
11655 val = mono_type_size (type, &ialign);
11657 MonoClass *klass = mono_class_get_full (image, token, generic_context);
11658 CHECK_TYPELOAD (klass);
11659 mono_class_init (klass);
11660 val = mono_type_size (&klass->byval_arg, &ialign);
11662 EMIT_NEW_ICONST (cfg, ins, val);
11667 case CEE_REFANYTYPE: {
11668 MonoInst *src_var, *src;
11670 GSHAREDVT_FAILURE (*ip);
11676 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11678 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11679 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11680 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
11685 case CEE_READONLY_:
11698 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
11708 g_warning ("opcode 0x%02x not handled", *ip);
11712 if (start_new_bblock != 1)
11715 bblock->cil_length = ip - bblock->cil_code;
11716 if (bblock->next_bb) {
11717 /* This could already be set because of inlining, #693905 */
11718 MonoBasicBlock *bb = bblock;
11720 while (bb->next_bb)
11722 bb->next_bb = end_bblock;
11724 bblock->next_bb = end_bblock;
11727 if (cfg->method == method && cfg->domainvar) {
11729 MonoInst *get_domain;
11731 cfg->cbb = init_localsbb;
11733 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
11734 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
11737 get_domain->dreg = alloc_preg (cfg);
11738 MONO_ADD_INS (cfg->cbb, get_domain);
11740 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
11741 MONO_ADD_INS (cfg->cbb, store);
11744 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
11745 if (cfg->compile_aot)
11746 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
11747 mono_get_got_var (cfg);
11750 if (cfg->method == method && cfg->got_var)
11751 mono_emit_load_got_addr (cfg);
11756 cfg->cbb = init_localsbb;
11758 for (i = 0; i < header->num_locals; ++i) {
11759 MonoType *ptype = header->locals [i];
11760 int t = ptype->type;
11761 dreg = cfg->locals [i]->dreg;
11763 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
11764 t = mono_class_enum_basetype (ptype->data.klass)->type;
11765 if (ptype->byref) {
11766 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
11767 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
11768 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
11769 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
11770 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
11771 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
11772 MONO_INST_NEW (cfg, ins, OP_R8CONST);
11773 ins->type = STACK_R8;
11774 ins->inst_p0 = (void*)&r8_0;
11775 ins->dreg = alloc_dreg (cfg, STACK_R8);
11776 MONO_ADD_INS (init_localsbb, ins);
11777 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
11778 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
11779 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
11780 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
11781 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, ptype)) {
11782 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
11784 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
11789 if (cfg->init_ref_vars && cfg->method == method) {
11790 /* Emit initialization for ref vars */
11791 // FIXME: Avoid duplication initialization for IL locals.
11792 for (i = 0; i < cfg->num_varinfo; ++i) {
11793 MonoInst *ins = cfg->varinfo [i];
11795 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
11796 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
11801 MonoBasicBlock *bb;
11804 * Make seq points at backward branch targets interruptable.
11806 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
11807 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
11808 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
11811 /* Add a sequence point for method entry/exit events */
11813 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
11814 MONO_ADD_INS (init_localsbb, ins);
11815 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
11816 MONO_ADD_INS (cfg->bb_exit, ins);
11820 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
11821 * the code they refer to was dead (#11880).
11823 if (sym_seq_points) {
11824 for (i = 0; i < header->code_size; ++i) {
11825 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
11828 NEW_SEQ_POINT (cfg, ins, i, FALSE);
11829 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
11836 if (cfg->method == method) {
11837 MonoBasicBlock *bb;
11838 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
11839 bb->region = mono_find_block_region (cfg, bb->real_offset);
11841 mono_create_spvar_for_region (cfg, bb->region);
11842 if (cfg->verbose_level > 2)
11843 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
11847 g_slist_free (class_inits);
11848 dont_inline = g_list_remove (dont_inline, method);
11850 if (inline_costs < 0) {
11853 /* Method is too large */
11854 mname = mono_method_full_name (method, TRUE);
11855 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
11856 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
11858 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
11859 mono_basic_block_free (original_bb);
11863 if ((cfg->verbose_level > 2) && (cfg->method == method))
11864 mono_print_code (cfg, "AFTER METHOD-TO-IR");
11866 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
11867 mono_basic_block_free (original_bb);
11868 return inline_costs;
11871 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
11878 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
11882 set_exception_type_from_invalid_il (cfg, method, ip);
11886 g_slist_free (class_inits);
11887 mono_basic_block_free (original_bb);
11888 dont_inline = g_list_remove (dont_inline, method);
11889 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
11894 store_membase_reg_to_store_membase_imm (int opcode)
11897 case OP_STORE_MEMBASE_REG:
11898 return OP_STORE_MEMBASE_IMM;
11899 case OP_STOREI1_MEMBASE_REG:
11900 return OP_STOREI1_MEMBASE_IMM;
11901 case OP_STOREI2_MEMBASE_REG:
11902 return OP_STOREI2_MEMBASE_IMM;
11903 case OP_STOREI4_MEMBASE_REG:
11904 return OP_STOREI4_MEMBASE_IMM;
11905 case OP_STOREI8_MEMBASE_REG:
11906 return OP_STOREI8_MEMBASE_IMM;
11908 g_assert_not_reached ();
11915 mono_op_to_op_imm (int opcode)
11919 return OP_IADD_IMM;
11921 return OP_ISUB_IMM;
11923 return OP_IDIV_IMM;
11925 return OP_IDIV_UN_IMM;
11927 return OP_IREM_IMM;
11929 return OP_IREM_UN_IMM;
11931 return OP_IMUL_IMM;
11933 return OP_IAND_IMM;
11937 return OP_IXOR_IMM;
11939 return OP_ISHL_IMM;
11941 return OP_ISHR_IMM;
11943 return OP_ISHR_UN_IMM;
11946 return OP_LADD_IMM;
11948 return OP_LSUB_IMM;
11950 return OP_LAND_IMM;
11954 return OP_LXOR_IMM;
11956 return OP_LSHL_IMM;
11958 return OP_LSHR_IMM;
11960 return OP_LSHR_UN_IMM;
11963 return OP_COMPARE_IMM;
11965 return OP_ICOMPARE_IMM;
11967 return OP_LCOMPARE_IMM;
11969 case OP_STORE_MEMBASE_REG:
11970 return OP_STORE_MEMBASE_IMM;
11971 case OP_STOREI1_MEMBASE_REG:
11972 return OP_STOREI1_MEMBASE_IMM;
11973 case OP_STOREI2_MEMBASE_REG:
11974 return OP_STOREI2_MEMBASE_IMM;
11975 case OP_STOREI4_MEMBASE_REG:
11976 return OP_STOREI4_MEMBASE_IMM;
11978 #if defined(TARGET_X86) || defined (TARGET_AMD64)
11980 return OP_X86_PUSH_IMM;
11981 case OP_X86_COMPARE_MEMBASE_REG:
11982 return OP_X86_COMPARE_MEMBASE_IMM;
11984 #if defined(TARGET_AMD64)
11985 case OP_AMD64_ICOMPARE_MEMBASE_REG:
11986 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
11988 case OP_VOIDCALL_REG:
11989 return OP_VOIDCALL;
11997 return OP_LOCALLOC_IMM;
12004 ldind_to_load_membase (int opcode)
12008 return OP_LOADI1_MEMBASE;
12010 return OP_LOADU1_MEMBASE;
12012 return OP_LOADI2_MEMBASE;
12014 return OP_LOADU2_MEMBASE;
12016 return OP_LOADI4_MEMBASE;
12018 return OP_LOADU4_MEMBASE;
12020 return OP_LOAD_MEMBASE;
12021 case CEE_LDIND_REF:
12022 return OP_LOAD_MEMBASE;
12024 return OP_LOADI8_MEMBASE;
12026 return OP_LOADR4_MEMBASE;
12028 return OP_LOADR8_MEMBASE;
12030 g_assert_not_reached ();
12037 stind_to_store_membase (int opcode)
12041 return OP_STOREI1_MEMBASE_REG;
12043 return OP_STOREI2_MEMBASE_REG;
12045 return OP_STOREI4_MEMBASE_REG;
12047 case CEE_STIND_REF:
12048 return OP_STORE_MEMBASE_REG;
12050 return OP_STOREI8_MEMBASE_REG;
12052 return OP_STORER4_MEMBASE_REG;
12054 return OP_STORER8_MEMBASE_REG;
12056 g_assert_not_reached ();
12063 mono_load_membase_to_load_mem (int opcode)
12065 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
12066 #if defined(TARGET_X86) || defined(TARGET_AMD64)
12068 case OP_LOAD_MEMBASE:
12069 return OP_LOAD_MEM;
12070 case OP_LOADU1_MEMBASE:
12071 return OP_LOADU1_MEM;
12072 case OP_LOADU2_MEMBASE:
12073 return OP_LOADU2_MEM;
12074 case OP_LOADI4_MEMBASE:
12075 return OP_LOADI4_MEM;
12076 case OP_LOADU4_MEMBASE:
12077 return OP_LOADU4_MEM;
12078 #if SIZEOF_REGISTER == 8
12079 case OP_LOADI8_MEMBASE:
12080 return OP_LOADI8_MEM;
12089 op_to_op_dest_membase (int store_opcode, int opcode)
12091 #if defined(TARGET_X86)
12092 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
12097 return OP_X86_ADD_MEMBASE_REG;
12099 return OP_X86_SUB_MEMBASE_REG;
12101 return OP_X86_AND_MEMBASE_REG;
12103 return OP_X86_OR_MEMBASE_REG;
12105 return OP_X86_XOR_MEMBASE_REG;
12108 return OP_X86_ADD_MEMBASE_IMM;
12111 return OP_X86_SUB_MEMBASE_IMM;
12114 return OP_X86_AND_MEMBASE_IMM;
12117 return OP_X86_OR_MEMBASE_IMM;
12120 return OP_X86_XOR_MEMBASE_IMM;
12126 #if defined(TARGET_AMD64)
12127 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
12132 return OP_X86_ADD_MEMBASE_REG;
12134 return OP_X86_SUB_MEMBASE_REG;
12136 return OP_X86_AND_MEMBASE_REG;
12138 return OP_X86_OR_MEMBASE_REG;
12140 return OP_X86_XOR_MEMBASE_REG;
12142 return OP_X86_ADD_MEMBASE_IMM;
12144 return OP_X86_SUB_MEMBASE_IMM;
12146 return OP_X86_AND_MEMBASE_IMM;
12148 return OP_X86_OR_MEMBASE_IMM;
12150 return OP_X86_XOR_MEMBASE_IMM;
12152 return OP_AMD64_ADD_MEMBASE_REG;
12154 return OP_AMD64_SUB_MEMBASE_REG;
12156 return OP_AMD64_AND_MEMBASE_REG;
12158 return OP_AMD64_OR_MEMBASE_REG;
12160 return OP_AMD64_XOR_MEMBASE_REG;
12163 return OP_AMD64_ADD_MEMBASE_IMM;
12166 return OP_AMD64_SUB_MEMBASE_IMM;
12169 return OP_AMD64_AND_MEMBASE_IMM;
12172 return OP_AMD64_OR_MEMBASE_IMM;
12175 return OP_AMD64_XOR_MEMBASE_IMM;
12185 op_to_op_store_membase (int store_opcode, int opcode)
12187 #if defined(TARGET_X86) || defined(TARGET_AMD64)
12190 if (store_opcode == OP_STOREI1_MEMBASE_REG)
12191 return OP_X86_SETEQ_MEMBASE;
12193 if (store_opcode == OP_STOREI1_MEMBASE_REG)
12194 return OP_X86_SETNE_MEMBASE;
12202 op_to_op_src1_membase (int load_opcode, int opcode)
12205 /* FIXME: This has sign extension issues */
12207 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
12208 return OP_X86_COMPARE_MEMBASE8_IMM;
12211 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
12216 return OP_X86_PUSH_MEMBASE;
12217 case OP_COMPARE_IMM:
12218 case OP_ICOMPARE_IMM:
12219 return OP_X86_COMPARE_MEMBASE_IMM;
12222 return OP_X86_COMPARE_MEMBASE_REG;
12226 #ifdef TARGET_AMD64
12227 /* FIXME: This has sign extension issues */
12229 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
12230 return OP_X86_COMPARE_MEMBASE8_IMM;
12235 #ifdef __mono_ilp32__
12236 if (load_opcode == OP_LOADI8_MEMBASE)
12238 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12240 return OP_X86_PUSH_MEMBASE;
12242 /* FIXME: This only works for 32 bit immediates
12243 case OP_COMPARE_IMM:
12244 case OP_LCOMPARE_IMM:
12245 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12246 return OP_AMD64_COMPARE_MEMBASE_IMM;
12248 case OP_ICOMPARE_IMM:
12249 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
12250 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
12254 #ifdef __mono_ilp32__
12255 if (load_opcode == OP_LOAD_MEMBASE)
12256 return OP_AMD64_ICOMPARE_MEMBASE_REG;
12257 if (load_opcode == OP_LOADI8_MEMBASE)
12259 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12261 return OP_AMD64_COMPARE_MEMBASE_REG;
12264 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
12265 return OP_AMD64_ICOMPARE_MEMBASE_REG;
12274 op_to_op_src2_membase (int load_opcode, int opcode)
12277 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
12283 return OP_X86_COMPARE_REG_MEMBASE;
12285 return OP_X86_ADD_REG_MEMBASE;
12287 return OP_X86_SUB_REG_MEMBASE;
12289 return OP_X86_AND_REG_MEMBASE;
12291 return OP_X86_OR_REG_MEMBASE;
12293 return OP_X86_XOR_REG_MEMBASE;
12297 #ifdef TARGET_AMD64
12298 #ifdef __mono_ilp32__
12299 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
12301 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
12305 return OP_AMD64_ICOMPARE_REG_MEMBASE;
12307 return OP_X86_ADD_REG_MEMBASE;
12309 return OP_X86_SUB_REG_MEMBASE;
12311 return OP_X86_AND_REG_MEMBASE;
12313 return OP_X86_OR_REG_MEMBASE;
12315 return OP_X86_XOR_REG_MEMBASE;
12317 #ifdef __mono_ilp32__
12318 } else if (load_opcode == OP_LOADI8_MEMBASE) {
12320 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
12325 return OP_AMD64_COMPARE_REG_MEMBASE;
12327 return OP_AMD64_ADD_REG_MEMBASE;
12329 return OP_AMD64_SUB_REG_MEMBASE;
12331 return OP_AMD64_AND_REG_MEMBASE;
12333 return OP_AMD64_OR_REG_MEMBASE;
12335 return OP_AMD64_XOR_REG_MEMBASE;
12344 mono_op_to_op_imm_noemul (int opcode)
12347 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
12353 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
12360 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
12365 return mono_op_to_op_imm (opcode);
12370 * mono_handle_global_vregs:
12372 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
12376 mono_handle_global_vregs (MonoCompile *cfg)
12378 gint32 *vreg_to_bb;
12379 MonoBasicBlock *bb;
12382 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
12384 #ifdef MONO_ARCH_SIMD_INTRINSICS
12385 if (cfg->uses_simd_intrinsics)
12386 mono_simd_simplify_indirection (cfg);
12389 /* Find local vregs used in more than one bb */
12390 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12391 MonoInst *ins = bb->code;
12392 int block_num = bb->block_num;
12394 if (cfg->verbose_level > 2)
12395 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
12398 for (; ins; ins = ins->next) {
12399 const char *spec = INS_INFO (ins->opcode);
12400 int regtype = 0, regindex;
12403 if (G_UNLIKELY (cfg->verbose_level > 2))
12404 mono_print_ins (ins);
12406 g_assert (ins->opcode >= MONO_CEE_LAST);
12408 for (regindex = 0; regindex < 4; regindex ++) {
12411 if (regindex == 0) {
12412 regtype = spec [MONO_INST_DEST];
12413 if (regtype == ' ')
12416 } else if (regindex == 1) {
12417 regtype = spec [MONO_INST_SRC1];
12418 if (regtype == ' ')
12421 } else if (regindex == 2) {
12422 regtype = spec [MONO_INST_SRC2];
12423 if (regtype == ' ')
12426 } else if (regindex == 3) {
12427 regtype = spec [MONO_INST_SRC3];
12428 if (regtype == ' ')
12433 #if SIZEOF_REGISTER == 4
12434 /* In the LLVM case, the long opcodes are not decomposed */
12435 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
12437 * Since some instructions reference the original long vreg,
12438 * and some reference the two component vregs, it is quite hard
12439 * to determine when it needs to be global. So be conservative.
12441 if (!get_vreg_to_inst (cfg, vreg)) {
12442 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
12444 if (cfg->verbose_level > 2)
12445 printf ("LONG VREG R%d made global.\n", vreg);
12449 * Make the component vregs volatile since the optimizations can
12450 * get confused otherwise.
12452 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
12453 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
12457 g_assert (vreg != -1);
12459 prev_bb = vreg_to_bb [vreg];
12460 if (prev_bb == 0) {
12461 /* 0 is a valid block num */
12462 vreg_to_bb [vreg] = block_num + 1;
12463 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
12464 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
12467 if (!get_vreg_to_inst (cfg, vreg)) {
12468 if (G_UNLIKELY (cfg->verbose_level > 2))
12469 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
12473 if (vreg_is_ref (cfg, vreg))
12474 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
12476 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
12479 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
12482 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
12485 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
12488 g_assert_not_reached ();
12492 /* Flag as having been used in more than one bb */
12493 vreg_to_bb [vreg] = -1;
12499 /* If a variable is used in only one bblock, convert it into a local vreg */
12500 for (i = 0; i < cfg->num_varinfo; i++) {
12501 MonoInst *var = cfg->varinfo [i];
12502 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
12504 switch (var->type) {
12510 #if SIZEOF_REGISTER == 8
12513 #if !defined(TARGET_X86)
12514 /* Enabling this screws up the fp stack on x86 */
12517 if (mono_arch_is_soft_float ())
12520 /* Arguments are implicitly global */
12521 /* Putting R4 vars into registers doesn't work currently */
12522 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
12523 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var) {
12525 * Make that the variable's liveness interval doesn't contain a call, since
12526 * that would cause the lvreg to be spilled, making the whole optimization
12529 /* This is too slow for JIT compilation */
12531 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
12533 int def_index, call_index, ins_index;
12534 gboolean spilled = FALSE;
12539 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
12540 const char *spec = INS_INFO (ins->opcode);
12542 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
12543 def_index = ins_index;
12545 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
12546 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
12547 if (call_index > def_index) {
12553 if (MONO_IS_CALL (ins))
12554 call_index = ins_index;
12564 if (G_UNLIKELY (cfg->verbose_level > 2))
12565 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
12566 var->flags |= MONO_INST_IS_DEAD;
12567 cfg->vreg_to_inst [var->dreg] = NULL;
12574 * Compress the varinfo and vars tables so the liveness computation is faster and
12575 * takes up less space.
12578 for (i = 0; i < cfg->num_varinfo; ++i) {
12579 MonoInst *var = cfg->varinfo [i];
12580 if (pos < i && cfg->locals_start == i)
12581 cfg->locals_start = pos;
12582 if (!(var->flags & MONO_INST_IS_DEAD)) {
12584 cfg->varinfo [pos] = cfg->varinfo [i];
12585 cfg->varinfo [pos]->inst_c0 = pos;
12586 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
12587 cfg->vars [pos].idx = pos;
12588 #if SIZEOF_REGISTER == 4
12589 if (cfg->varinfo [pos]->type == STACK_I8) {
12590 /* Modify the two component vars too */
12593 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
12594 var1->inst_c0 = pos;
12595 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
12596 var1->inst_c0 = pos;
12603 cfg->num_varinfo = pos;
12604 if (cfg->locals_start > cfg->num_varinfo)
12605 cfg->locals_start = cfg->num_varinfo;
12609 * mono_spill_global_vars:
12611 * Generate spill code for variables which are not allocated to registers,
12612 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
12613 * code is generated which could be optimized by the local optimization passes.
12616 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
12618 MonoBasicBlock *bb;
12620 int orig_next_vreg;
12621 guint32 *vreg_to_lvreg;
12623 guint32 i, lvregs_len;
12624 gboolean dest_has_lvreg = FALSE;
12625 guint32 stacktypes [128];
12626 MonoInst **live_range_start, **live_range_end;
12627 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
12628 int *gsharedvt_vreg_to_idx = NULL;
12630 *need_local_opts = FALSE;
12632 memset (spec2, 0, sizeof (spec2));
12634 /* FIXME: Move this function to mini.c */
12635 stacktypes ['i'] = STACK_PTR;
12636 stacktypes ['l'] = STACK_I8;
12637 stacktypes ['f'] = STACK_R8;
12638 #ifdef MONO_ARCH_SIMD_INTRINSICS
12639 stacktypes ['x'] = STACK_VTYPE;
12642 #if SIZEOF_REGISTER == 4
12643 /* Create MonoInsts for longs */
12644 for (i = 0; i < cfg->num_varinfo; i++) {
12645 MonoInst *ins = cfg->varinfo [i];
12647 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
12648 switch (ins->type) {
12653 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
12656 g_assert (ins->opcode == OP_REGOFFSET);
12658 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
12660 tree->opcode = OP_REGOFFSET;
12661 tree->inst_basereg = ins->inst_basereg;
12662 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
12664 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
12666 tree->opcode = OP_REGOFFSET;
12667 tree->inst_basereg = ins->inst_basereg;
12668 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
12678 if (cfg->compute_gc_maps) {
12679 /* registers need liveness info even for !non refs */
12680 for (i = 0; i < cfg->num_varinfo; i++) {
12681 MonoInst *ins = cfg->varinfo [i];
12683 if (ins->opcode == OP_REGVAR)
12684 ins->flags |= MONO_INST_GC_TRACK;
12688 if (cfg->gsharedvt) {
12689 gsharedvt_vreg_to_idx = g_new0 (int, cfg->next_vreg);
12691 for (i = 0; i < cfg->num_varinfo; ++i) {
12692 MonoInst *ins = cfg->varinfo [i];
12695 if (mini_is_gsharedvt_variable_type (cfg, ins->inst_vtype)) {
12696 if (i >= cfg->locals_start) {
12698 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
12699 gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
12700 ins->opcode = OP_GSHAREDVT_LOCAL;
12701 ins->inst_imm = idx;
12704 gsharedvt_vreg_to_idx [ins->dreg] = -1;
12705 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
12711 /* FIXME: widening and truncation */
12714 * As an optimization, when a variable allocated to the stack is first loaded into
12715 * an lvreg, we will remember the lvreg and use it the next time instead of loading
12716 * the variable again.
12718 orig_next_vreg = cfg->next_vreg;
12719 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
12720 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
12724 * These arrays contain the first and last instructions accessing a given
12726 * Since we emit bblocks in the same order we process them here, and we
12727 * don't split live ranges, these will precisely describe the live range of
12728 * the variable, i.e. the instruction range where a valid value can be found
12729 * in the variables location.
12730 * The live range is computed using the liveness info computed by the liveness pass.
12731 * We can't use vmv->range, since that is an abstract live range, and we need
12732 * one which is instruction precise.
12733 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
12735 /* FIXME: Only do this if debugging info is requested */
12736 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
12737 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
12738 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
12739 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
12741 /* Add spill loads/stores */
12742 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12745 if (cfg->verbose_level > 2)
12746 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
12748 /* Clear vreg_to_lvreg array */
12749 for (i = 0; i < lvregs_len; i++)
12750 vreg_to_lvreg [lvregs [i]] = 0;
12754 MONO_BB_FOR_EACH_INS (bb, ins) {
12755 const char *spec = INS_INFO (ins->opcode);
12756 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
12757 gboolean store, no_lvreg;
12758 int sregs [MONO_MAX_SRC_REGS];
12760 if (G_UNLIKELY (cfg->verbose_level > 2))
12761 mono_print_ins (ins);
12763 if (ins->opcode == OP_NOP)
12767 * We handle LDADDR here as well, since it can only be decomposed
12768 * when variable addresses are known.
12770 if (ins->opcode == OP_LDADDR) {
12771 MonoInst *var = ins->inst_p0;
12773 if (var->opcode == OP_VTARG_ADDR) {
12774 /* Happens on SPARC/S390 where vtypes are passed by reference */
12775 MonoInst *vtaddr = var->inst_left;
12776 if (vtaddr->opcode == OP_REGVAR) {
12777 ins->opcode = OP_MOVE;
12778 ins->sreg1 = vtaddr->dreg;
12780 else if (var->inst_left->opcode == OP_REGOFFSET) {
12781 ins->opcode = OP_LOAD_MEMBASE;
12782 ins->inst_basereg = vtaddr->inst_basereg;
12783 ins->inst_offset = vtaddr->inst_offset;
12786 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg] < 0) {
12787 /* gsharedvt arg passed by ref */
12788 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
12790 ins->opcode = OP_LOAD_MEMBASE;
12791 ins->inst_basereg = var->inst_basereg;
12792 ins->inst_offset = var->inst_offset;
12793 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg]) {
12794 MonoInst *load, *load2, *load3;
12795 int idx = gsharedvt_vreg_to_idx [var->dreg] - 1;
12796 int reg1, reg2, reg3;
12797 MonoInst *info_var = cfg->gsharedvt_info_var;
12798 MonoInst *locals_var = cfg->gsharedvt_locals_var;
12802 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
12805 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
12807 g_assert (info_var);
12808 g_assert (locals_var);
12810 /* Mark the instruction used to compute the locals var as used */
12811 cfg->gsharedvt_locals_var_ins = NULL;
12813 /* Load the offset */
12814 if (info_var->opcode == OP_REGOFFSET) {
12815 reg1 = alloc_ireg (cfg);
12816 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
12817 } else if (info_var->opcode == OP_REGVAR) {
12819 reg1 = info_var->dreg;
12821 g_assert_not_reached ();
12823 reg2 = alloc_ireg (cfg);
12824 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
12825 /* Load the locals area address */
12826 reg3 = alloc_ireg (cfg);
12827 if (locals_var->opcode == OP_REGOFFSET) {
12828 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
12829 } else if (locals_var->opcode == OP_REGVAR) {
12830 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
12832 g_assert_not_reached ();
12834 /* Compute the address */
12835 ins->opcode = OP_PADD;
12839 mono_bblock_insert_before_ins (bb, ins, load3);
12840 mono_bblock_insert_before_ins (bb, load3, load2);
12842 mono_bblock_insert_before_ins (bb, load2, load);
12844 g_assert (var->opcode == OP_REGOFFSET);
12846 ins->opcode = OP_ADD_IMM;
12847 ins->sreg1 = var->inst_basereg;
12848 ins->inst_imm = var->inst_offset;
12851 *need_local_opts = TRUE;
12852 spec = INS_INFO (ins->opcode);
12855 if (ins->opcode < MONO_CEE_LAST) {
12856 mono_print_ins (ins);
12857 g_assert_not_reached ();
12861 * Store opcodes have destbasereg in the dreg, but in reality, it is an
12865 if (MONO_IS_STORE_MEMBASE (ins)) {
12866 tmp_reg = ins->dreg;
12867 ins->dreg = ins->sreg2;
12868 ins->sreg2 = tmp_reg;
12871 spec2 [MONO_INST_DEST] = ' ';
12872 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
12873 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
12874 spec2 [MONO_INST_SRC3] = ' ';
12876 } else if (MONO_IS_STORE_MEMINDEX (ins))
12877 g_assert_not_reached ();
12882 if (G_UNLIKELY (cfg->verbose_level > 2)) {
12883 printf ("\t %.3s %d", spec, ins->dreg);
12884 num_sregs = mono_inst_get_src_registers (ins, sregs);
12885 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
12886 printf (" %d", sregs [srcindex]);
12893 regtype = spec [MONO_INST_DEST];
12894 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
12897 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
12898 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
12899 MonoInst *store_ins;
12901 MonoInst *def_ins = ins;
12902 int dreg = ins->dreg; /* The original vreg */
12904 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
12906 if (var->opcode == OP_REGVAR) {
12907 ins->dreg = var->dreg;
12908 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
12910 * Instead of emitting a load+store, use a _membase opcode.
12912 g_assert (var->opcode == OP_REGOFFSET);
12913 if (ins->opcode == OP_MOVE) {
12917 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
12918 ins->inst_basereg = var->inst_basereg;
12919 ins->inst_offset = var->inst_offset;
12922 spec = INS_INFO (ins->opcode);
12926 g_assert (var->opcode == OP_REGOFFSET);
12928 prev_dreg = ins->dreg;
12930 /* Invalidate any previous lvreg for this vreg */
12931 vreg_to_lvreg [ins->dreg] = 0;
12935 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
12937 store_opcode = OP_STOREI8_MEMBASE_REG;
12940 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
12942 #if SIZEOF_REGISTER != 8
12943 if (regtype == 'l') {
12944 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
12945 mono_bblock_insert_after_ins (bb, ins, store_ins);
12946 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
12947 mono_bblock_insert_after_ins (bb, ins, store_ins);
12948 def_ins = store_ins;
12953 g_assert (store_opcode != OP_STOREV_MEMBASE);
12955 /* Try to fuse the store into the instruction itself */
12956 /* FIXME: Add more instructions */
12957 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
12958 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
12959 ins->inst_imm = ins->inst_c0;
12960 ins->inst_destbasereg = var->inst_basereg;
12961 ins->inst_offset = var->inst_offset;
12962 spec = INS_INFO (ins->opcode);
12963 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
12964 ins->opcode = store_opcode;
12965 ins->inst_destbasereg = var->inst_basereg;
12966 ins->inst_offset = var->inst_offset;
12970 tmp_reg = ins->dreg;
12971 ins->dreg = ins->sreg2;
12972 ins->sreg2 = tmp_reg;
12975 spec2 [MONO_INST_DEST] = ' ';
12976 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
12977 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
12978 spec2 [MONO_INST_SRC3] = ' ';
12980 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
12981 // FIXME: The backends expect the base reg to be in inst_basereg
12982 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
12984 ins->inst_basereg = var->inst_basereg;
12985 ins->inst_offset = var->inst_offset;
12986 spec = INS_INFO (ins->opcode);
12988 /* printf ("INS: "); mono_print_ins (ins); */
12989 /* Create a store instruction */
12990 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
12992 /* Insert it after the instruction */
12993 mono_bblock_insert_after_ins (bb, ins, store_ins);
12995 def_ins = store_ins;
12998 * We can't assign ins->dreg to var->dreg here, since the
12999 * sregs could use it. So set a flag, and do it after
13002 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
13003 dest_has_lvreg = TRUE;
13008 if (def_ins && !live_range_start [dreg]) {
13009 live_range_start [dreg] = def_ins;
13010 live_range_start_bb [dreg] = bb;
13013 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
13016 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
13017 tmp->inst_c1 = dreg;
13018 mono_bblock_insert_after_ins (bb, def_ins, tmp);
13025 num_sregs = mono_inst_get_src_registers (ins, sregs);
13026 for (srcindex = 0; srcindex < 3; ++srcindex) {
13027 regtype = spec [MONO_INST_SRC1 + srcindex];
13028 sreg = sregs [srcindex];
13030 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
13031 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
13032 MonoInst *var = get_vreg_to_inst (cfg, sreg);
13033 MonoInst *use_ins = ins;
13034 MonoInst *load_ins;
13035 guint32 load_opcode;
13037 if (var->opcode == OP_REGVAR) {
13038 sregs [srcindex] = var->dreg;
13039 //mono_inst_set_src_registers (ins, sregs);
13040 live_range_end [sreg] = use_ins;
13041 live_range_end_bb [sreg] = bb;
13043 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
13046 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
13047 /* var->dreg is a hreg */
13048 tmp->inst_c1 = sreg;
13049 mono_bblock_insert_after_ins (bb, ins, tmp);
13055 g_assert (var->opcode == OP_REGOFFSET);
13057 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
13059 g_assert (load_opcode != OP_LOADV_MEMBASE);
13061 if (vreg_to_lvreg [sreg]) {
13062 g_assert (vreg_to_lvreg [sreg] != -1);
13064 /* The variable is already loaded to an lvreg */
13065 if (G_UNLIKELY (cfg->verbose_level > 2))
13066 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
13067 sregs [srcindex] = vreg_to_lvreg [sreg];
13068 //mono_inst_set_src_registers (ins, sregs);
13072 /* Try to fuse the load into the instruction */
13073 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
13074 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
13075 sregs [0] = var->inst_basereg;
13076 //mono_inst_set_src_registers (ins, sregs);
13077 ins->inst_offset = var->inst_offset;
13078 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
13079 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
13080 sregs [1] = var->inst_basereg;
13081 //mono_inst_set_src_registers (ins, sregs);
13082 ins->inst_offset = var->inst_offset;
13084 if (MONO_IS_REAL_MOVE (ins)) {
13085 ins->opcode = OP_NOP;
13088 //printf ("%d ", srcindex); mono_print_ins (ins);
13090 sreg = alloc_dreg (cfg, stacktypes [regtype]);
13092 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
13093 if (var->dreg == prev_dreg) {
13095 * sreg refers to the value loaded by the load
13096 * emitted below, but we need to use ins->dreg
13097 * since it refers to the store emitted earlier.
13101 g_assert (sreg != -1);
13102 vreg_to_lvreg [var->dreg] = sreg;
13103 g_assert (lvregs_len < 1024);
13104 lvregs [lvregs_len ++] = var->dreg;
13108 sregs [srcindex] = sreg;
13109 //mono_inst_set_src_registers (ins, sregs);
13111 #if SIZEOF_REGISTER != 8
13112 if (regtype == 'l') {
13113 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
13114 mono_bblock_insert_before_ins (bb, ins, load_ins);
13115 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
13116 mono_bblock_insert_before_ins (bb, ins, load_ins);
13117 use_ins = load_ins;
13122 #if SIZEOF_REGISTER == 4
13123 g_assert (load_opcode != OP_LOADI8_MEMBASE);
13125 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
13126 mono_bblock_insert_before_ins (bb, ins, load_ins);
13127 use_ins = load_ins;
13131 if (var->dreg < orig_next_vreg) {
13132 live_range_end [var->dreg] = use_ins;
13133 live_range_end_bb [var->dreg] = bb;
13136 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
13139 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
13140 tmp->inst_c1 = var->dreg;
13141 mono_bblock_insert_after_ins (bb, ins, tmp);
13145 mono_inst_set_src_registers (ins, sregs);
13147 if (dest_has_lvreg) {
13148 g_assert (ins->dreg != -1);
13149 vreg_to_lvreg [prev_dreg] = ins->dreg;
13150 g_assert (lvregs_len < 1024);
13151 lvregs [lvregs_len ++] = prev_dreg;
13152 dest_has_lvreg = FALSE;
13156 tmp_reg = ins->dreg;
13157 ins->dreg = ins->sreg2;
13158 ins->sreg2 = tmp_reg;
13161 if (MONO_IS_CALL (ins)) {
13162 /* Clear vreg_to_lvreg array */
13163 for (i = 0; i < lvregs_len; i++)
13164 vreg_to_lvreg [lvregs [i]] = 0;
13166 } else if (ins->opcode == OP_NOP) {
13168 MONO_INST_NULLIFY_SREGS (ins);
13171 if (cfg->verbose_level > 2)
13172 mono_print_ins_index (1, ins);
13175 /* Extend the live range based on the liveness info */
13176 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
13177 for (i = 0; i < cfg->num_varinfo; i ++) {
13178 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
13180 if (vreg_is_volatile (cfg, vi->vreg))
13181 /* The liveness info is incomplete */
13184 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
13185 /* Live from at least the first ins of this bb */
13186 live_range_start [vi->vreg] = bb->code;
13187 live_range_start_bb [vi->vreg] = bb;
13190 if (mono_bitset_test_fast (bb->live_out_set, i)) {
13191 /* Live at least until the last ins of this bb */
13192 live_range_end [vi->vreg] = bb->last_ins;
13193 live_range_end_bb [vi->vreg] = bb;
13199 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
13201 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
13202 * by storing the current native offset into MonoMethodVar->live_range_start/end.
13204 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
13205 for (i = 0; i < cfg->num_varinfo; ++i) {
13206 int vreg = MONO_VARINFO (cfg, i)->vreg;
13209 if (live_range_start [vreg]) {
13210 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
13212 ins->inst_c1 = vreg;
13213 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
13215 if (live_range_end [vreg]) {
13216 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
13218 ins->inst_c1 = vreg;
13219 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
13220 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
13222 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
13228 if (cfg->gsharedvt_locals_var_ins) {
13229 /* Nullify if unused */
13230 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
13231 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
13234 g_free (live_range_start);
13235 g_free (live_range_end);
13236 g_free (live_range_start_bb);
13237 g_free (live_range_end_bb);
13242 * - use 'iadd' instead of 'int_add'
13243 * - handling ovf opcodes: decompose in method_to_ir.
13244 * - unify iregs/fregs
13245 * -> partly done, the missing parts are:
13246 * - a more complete unification would involve unifying the hregs as well, so
13247 * code wouldn't need if (fp) all over the place. but that would mean the hregs
13248 * would no longer map to the machine hregs, so the code generators would need to
13249 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
13250 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
13251 * fp/non-fp branches speeds it up by about 15%.
13252 * - use sext/zext opcodes instead of shifts
13254 * - get rid of TEMPLOADs if possible and use vregs instead
13255 * - clean up usage of OP_P/OP_ opcodes
13256 * - cleanup usage of DUMMY_USE
13257 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
13259 * - set the stack type and allocate a dreg in the EMIT_NEW macros
13260 * - get rid of all the <foo>2 stuff when the new JIT is ready.
13261 * - make sure handle_stack_args () is called before the branch is emitted
13262 * - when the new IR is done, get rid of all unused stuff
13263 * - COMPARE/BEQ as separate instructions or unify them ?
13264 * - keeping them separate allows specialized compare instructions like
13265 * compare_imm, compare_membase
13266 * - most back ends unify fp compare+branch, fp compare+ceq
13267 * - integrate mono_save_args into inline_method
13268 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
13269 * - handle long shift opts on 32 bit platforms somehow: they require
13270 * 3 sregs (2 for arg1 and 1 for arg2)
13271 * - make byref a 'normal' type.
13272 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
13273 * variable if needed.
13274 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
13275 * like inline_method.
13276 * - remove inlining restrictions
13277 * - fix LNEG and enable cfold of INEG
13278 * - generalize x86 optimizations like ldelema as a peephole optimization
13279 * - add store_mem_imm for amd64
13280 * - optimize the loading of the interruption flag in the managed->native wrappers
13281 * - avoid special handling of OP_NOP in passes
13282 * - move code inserting instructions into one function/macro.
13283 * - try a coalescing phase after liveness analysis
13284 * - add float -> vreg conversion + local optimizations on !x86
13285 * - figure out how to handle decomposed branches during optimizations, ie.
13286 * compare+branch, op_jump_table+op_br etc.
13287 * - promote RuntimeXHandles to vregs
13288 * - vtype cleanups:
13289 * - add a NEW_VARLOADA_VREG macro
13290 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
13291 * accessing vtype fields.
13292 * - get rid of I8CONST on 64 bit platforms
13293 * - dealing with the increase in code size due to branches created during opcode
13295 * - use extended basic blocks
13296 * - all parts of the JIT
13297 * - handle_global_vregs () && local regalloc
13298 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
13299 * - sources of increase in code size:
13302 * - isinst and castclass
13303 * - lvregs not allocated to global registers even if used multiple times
13304 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
13306 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
13307 * - add all micro optimizations from the old JIT
13308 * - put tree optimizations into the deadce pass
13309 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
13310 * specific function.
13311 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
13312 * fcompare + branchCC.
13313 * - create a helper function for allocating a stack slot, taking into account
13314 * MONO_CFG_HAS_SPILLUP.
13316 * - merge the ia64 switch changes.
13317 * - optimize mono_regstate2_alloc_int/float.
13318 * - fix the pessimistic handling of variables accessed in exception handler blocks.
13319 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
13320 * parts of the tree could be separated by other instructions, killing the tree
13321 * arguments, or stores killing loads etc. Also, should we fold loads into other
13322 * instructions if the result of the load is used multiple times ?
13323 * - make the REM_IMM optimization in mini-x86.c arch-independent.
13324 * - LAST MERGE: 108395.
13325 * - when returning vtypes in registers, generate IR and append it to the end of the
13326 * last bb instead of doing it in the epilog.
13327 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
13335 - When to decompose opcodes:
13336 - earlier: this makes some optimizations hard to implement, since the low level IR
13337 no longer contains the neccessary information. But it is easier to do.
13338 - later: harder to implement, enables more optimizations.
13339 - Branches inside bblocks:
13340 - created when decomposing complex opcodes.
13341 - branches to another bblock: harmless, but not tracked by the branch
13342 optimizations, so need to branch to a label at the start of the bblock.
13343 - branches to inside the same bblock: very problematic, trips up the local
13344 reg allocator. Can be fixed by spitting the current bblock, but that is a
13345 complex operation, since some local vregs can become global vregs etc.
13346 - Local/global vregs:
13347 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
13348 local register allocator.
13349 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
13350 structure, created by mono_create_var (). Assigned to hregs or the stack by
13351 the global register allocator.
13352 - When to do optimizations like alu->alu_imm:
13353 - earlier -> saves work later on since the IR will be smaller/simpler
13354 - later -> can work on more instructions
13355 - Handling of valuetypes:
13356 - When a vtype is pushed on the stack, a new temporary is created, an
13357 instruction computing its address (LDADDR) is emitted and pushed on
13358 the stack. Need to optimize cases when the vtype is used immediately as in
13359 argument passing, stloc etc.
13360 - Instead of the to_end stuff in the old JIT, simply call the function handling
13361 the values on the stack before emitting the last instruction of the bb.
13364 #endif /* DISABLE_JIT */