2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/assembly.h>
38 #include <mono/metadata/attrdefs.h>
39 #include <mono/metadata/loader.h>
40 #include <mono/metadata/tabledefs.h>
41 #include <mono/metadata/class.h>
42 #include <mono/metadata/object.h>
43 #include <mono/metadata/exception.h>
44 #include <mono/metadata/opcodes.h>
45 #include <mono/metadata/mono-endian.h>
46 #include <mono/metadata/tokentype.h>
47 #include <mono/metadata/tabledefs.h>
48 #include <mono/metadata/marshal.h>
49 #include <mono/metadata/debug-helpers.h>
50 #include <mono/metadata/mono-debug.h>
51 #include <mono/metadata/gc-internal.h>
52 #include <mono/metadata/security-manager.h>
53 #include <mono/metadata/threads-types.h>
54 #include <mono/metadata/security-core-clr.h>
55 #include <mono/metadata/monitor.h>
56 #include <mono/metadata/profiler-private.h>
57 #include <mono/metadata/profiler.h>
58 #include <mono/metadata/debug-mono-symfile.h>
59 #include <mono/utils/mono-compiler.h>
60 #include <mono/utils/mono-memory-model.h>
61 #include <mono/metadata/mono-basic-block.h>
68 #include "jit-icalls.h"
70 #include "debugger-agent.h"
72 #define BRANCH_COST 10
73 #define INLINE_LENGTH_LIMIT 20
74 #define INLINE_FAILURE(msg) do { \
75 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE)) { \
76 if (cfg->verbose_level >= 2) \
77 printf ("inline failed: %s\n", msg); \
78 goto inline_failure; \
81 #define CHECK_CFG_EXCEPTION do {\
82 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
85 #define METHOD_ACCESS_FAILURE do { \
86 char *method_fname = mono_method_full_name (method, TRUE); \
87 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
88 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS); \
89 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
90 g_free (method_fname); \
91 g_free (cil_method_fname); \
92 goto exception_exit; \
94 #define FIELD_ACCESS_FAILURE do { \
95 char *method_fname = mono_method_full_name (method, TRUE); \
96 char *field_fname = mono_field_full_name (field); \
97 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS); \
98 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
99 g_free (method_fname); \
100 g_free (field_fname); \
101 goto exception_exit; \
103 #define GENERIC_SHARING_FAILURE(opcode) do { \
104 if (cfg->generic_sharing_context) { \
105 if (cfg->verbose_level > 2) \
106 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
107 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
108 goto exception_exit; \
111 #define GSHAREDVT_FAILURE(opcode) do { \
112 if (cfg->gsharedvt) { \
113 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __FILE__, __LINE__); \
114 if (cfg->verbose_level >= 2) \
115 printf ("%s\n", cfg->exception_message); \
116 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
117 goto exception_exit; \
120 #define OUT_OF_MEMORY_FAILURE do { \
121 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
122 goto exception_exit; \
124 /* Determine whenever 'ins' represents a load of the 'this' argument */
125 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
127 static int ldind_to_load_membase (int opcode);
128 static int stind_to_store_membase (int opcode);
130 int mono_op_to_op_imm (int opcode);
131 int mono_op_to_op_imm_noemul (int opcode);
133 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
135 /* helper methods signatures */
136 static MonoMethodSignature *helper_sig_class_init_trampoline = NULL;
137 static MonoMethodSignature *helper_sig_domain_get = NULL;
138 static MonoMethodSignature *helper_sig_generic_class_init_trampoline = NULL;
139 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm = NULL;
140 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline = NULL;
141 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline = NULL;
142 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm = NULL;
145 * Instruction metadata
153 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
154 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
160 #if SIZEOF_REGISTER == 8
165 /* keep in sync with the enum in mini.h */
168 #include "mini-ops.h"
173 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
174 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
176 * This should contain the index of the last sreg + 1. This is not the same
177 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
179 const gint8 ins_sreg_counts[] = {
180 #include "mini-ops.h"
185 #define MONO_INIT_VARINFO(vi,id) do { \
186 (vi)->range.first_use.pos.bid = 0xffff; \
192 mono_inst_set_src_registers (MonoInst *ins, int *regs)
194 ins->sreg1 = regs [0];
195 ins->sreg2 = regs [1];
196 ins->sreg3 = regs [2];
200 mono_alloc_ireg (MonoCompile *cfg)
202 return alloc_ireg (cfg);
206 mono_alloc_freg (MonoCompile *cfg)
208 return alloc_freg (cfg);
212 mono_alloc_preg (MonoCompile *cfg)
214 return alloc_preg (cfg);
218 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
220 return alloc_dreg (cfg, stack_type);
224 * mono_alloc_ireg_ref:
226 * Allocate an IREG, and mark it as holding a GC ref.
229 mono_alloc_ireg_ref (MonoCompile *cfg)
231 return alloc_ireg_ref (cfg);
235 * mono_alloc_ireg_mp:
237 * Allocate an IREG, and mark it as holding a managed pointer.
240 mono_alloc_ireg_mp (MonoCompile *cfg)
242 return alloc_ireg_mp (cfg);
246 * mono_alloc_ireg_copy:
248 * Allocate an IREG with the same GC type as VREG.
251 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
253 if (vreg_is_ref (cfg, vreg))
254 return alloc_ireg_ref (cfg);
255 else if (vreg_is_mp (cfg, vreg))
256 return alloc_ireg_mp (cfg);
258 return alloc_ireg (cfg);
262 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
268 switch (type->type) {
271 case MONO_TYPE_BOOLEAN:
283 case MONO_TYPE_FNPTR:
285 case MONO_TYPE_CLASS:
286 case MONO_TYPE_STRING:
287 case MONO_TYPE_OBJECT:
288 case MONO_TYPE_SZARRAY:
289 case MONO_TYPE_ARRAY:
293 #if SIZEOF_REGISTER == 8
302 case MONO_TYPE_VALUETYPE:
303 if (type->data.klass->enumtype) {
304 type = mono_class_enum_basetype (type->data.klass);
307 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
310 case MONO_TYPE_TYPEDBYREF:
312 case MONO_TYPE_GENERICINST:
313 type = &type->data.generic_class->container_class->byval_arg;
317 g_assert (cfg->generic_sharing_context);
318 if (mini_type_var_is_vt (cfg, type))
323 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
329 mono_print_bb (MonoBasicBlock *bb, const char *msg)
334 printf ("\n%s %d: [IN: ", msg, bb->block_num);
335 for (i = 0; i < bb->in_count; ++i)
336 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
338 for (i = 0; i < bb->out_count; ++i)
339 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
341 for (tree = bb->code; tree; tree = tree->next)
342 mono_print_ins_index (-1, tree);
346 mono_create_helper_signatures (void)
348 helper_sig_domain_get = mono_create_icall_signature ("ptr");
349 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
350 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
351 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
352 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
353 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
354 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
358 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
359 * foo<T> (int i) { ldarg.0; box T; }
361 #define UNVERIFIED do { \
362 if (cfg->gsharedvt) { \
363 if (cfg->verbose_level > 2) \
364 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
365 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
366 goto exception_exit; \
368 if (mini_get_debug_options ()->break_on_unverified) \
374 #define LOAD_ERROR do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto load_error; } while (0)
376 #define TYPE_LOAD_ERROR(klass) do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else { cfg->exception_ptr = klass; goto load_error; } } while (0)
378 #define GET_BBLOCK(cfg,tblock,ip) do { \
379 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
381 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
382 NEW_BBLOCK (cfg, (tblock)); \
383 (tblock)->cil_code = (ip); \
384 ADD_BBLOCK (cfg, (tblock)); \
388 #if defined(TARGET_X86) || defined(TARGET_AMD64)
389 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
390 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
391 (dest)->dreg = alloc_ireg_mp ((cfg)); \
392 (dest)->sreg1 = (sr1); \
393 (dest)->sreg2 = (sr2); \
394 (dest)->inst_imm = (imm); \
395 (dest)->backend.shift_amount = (shift); \
396 MONO_ADD_INS ((cfg)->cbb, (dest)); \
400 #if SIZEOF_REGISTER == 8
401 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
402 /* FIXME: Need to add many more cases */ \
403 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
405 int dr = alloc_preg (cfg); \
406 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
407 (ins)->sreg2 = widen->dreg; \
411 #define ADD_WIDEN_OP(ins, arg1, arg2)
414 #define ADD_BINOP(op) do { \
415 MONO_INST_NEW (cfg, ins, (op)); \
417 ins->sreg1 = sp [0]->dreg; \
418 ins->sreg2 = sp [1]->dreg; \
419 type_from_op (ins, sp [0], sp [1]); \
421 /* Have to insert a widening op */ \
422 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
423 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
424 MONO_ADD_INS ((cfg)->cbb, (ins)); \
425 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
428 #define ADD_UNOP(op) do { \
429 MONO_INST_NEW (cfg, ins, (op)); \
431 ins->sreg1 = sp [0]->dreg; \
432 type_from_op (ins, sp [0], NULL); \
434 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
435 MONO_ADD_INS ((cfg)->cbb, (ins)); \
436 *sp++ = mono_decompose_opcode (cfg, ins); \
439 #define ADD_BINCOND(next_block) do { \
442 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
443 cmp->sreg1 = sp [0]->dreg; \
444 cmp->sreg2 = sp [1]->dreg; \
445 type_from_op (cmp, sp [0], sp [1]); \
447 type_from_op (ins, sp [0], sp [1]); \
448 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
449 GET_BBLOCK (cfg, tblock, target); \
450 link_bblock (cfg, bblock, tblock); \
451 ins->inst_true_bb = tblock; \
452 if ((next_block)) { \
453 link_bblock (cfg, bblock, (next_block)); \
454 ins->inst_false_bb = (next_block); \
455 start_new_bblock = 1; \
457 GET_BBLOCK (cfg, tblock, ip); \
458 link_bblock (cfg, bblock, tblock); \
459 ins->inst_false_bb = tblock; \
460 start_new_bblock = 2; \
462 if (sp != stack_start) { \
463 handle_stack_args (cfg, stack_start, sp - stack_start); \
464 CHECK_UNVERIFIABLE (cfg); \
466 MONO_ADD_INS (bblock, cmp); \
467 MONO_ADD_INS (bblock, ins); \
471 * link_bblock: Links two basic blocks
473 * links two basic blocks in the control flow graph, the 'from'
474 * argument is the starting block and the 'to' argument is the block
475 * the control flow ends to after 'from'.
478 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
480 MonoBasicBlock **newa;
484 if (from->cil_code) {
486 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
488 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
491 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
493 printf ("edge from entry to exit\n");
498 for (i = 0; i < from->out_count; ++i) {
499 if (to == from->out_bb [i]) {
505 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
506 for (i = 0; i < from->out_count; ++i) {
507 newa [i] = from->out_bb [i];
515 for (i = 0; i < to->in_count; ++i) {
516 if (from == to->in_bb [i]) {
522 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
523 for (i = 0; i < to->in_count; ++i) {
524 newa [i] = to->in_bb [i];
533 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
535 link_bblock (cfg, from, to);
539 * mono_find_block_region:
541 * We mark each basic block with a region ID. We use that to avoid BB
542 * optimizations when blocks are in different regions.
545 * A region token that encodes where this region is, and information
546 * about the clause owner for this block.
548 * The region encodes the try/catch/filter clause that owns this block
549 * as well as the type. -1 is a special value that represents a block
550 * that is in none of try/catch/filter.
553 mono_find_block_region (MonoCompile *cfg, int offset)
555 MonoMethodHeader *header = cfg->header;
556 MonoExceptionClause *clause;
559 for (i = 0; i < header->num_clauses; ++i) {
560 clause = &header->clauses [i];
561 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
562 (offset < (clause->handler_offset)))
563 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
565 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
566 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
567 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
568 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
569 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
571 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
574 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
575 return ((i + 1) << 8) | clause->flags;
582 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
584 MonoMethodHeader *header = cfg->header;
585 MonoExceptionClause *clause;
589 for (i = 0; i < header->num_clauses; ++i) {
590 clause = &header->clauses [i];
591 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
592 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
593 if (clause->flags == type)
594 res = g_list_append (res, clause);
601 mono_create_spvar_for_region (MonoCompile *cfg, int region)
605 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
609 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
610 /* prevent it from being register allocated */
611 var->flags |= MONO_INST_INDIRECT;
613 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
617 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
619 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
623 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
627 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
631 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
632 /* prevent it from being register allocated */
633 var->flags |= MONO_INST_INDIRECT;
635 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
641 * Returns the type used in the eval stack when @type is loaded.
642 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
645 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
649 inst->klass = klass = mono_class_from_mono_type (type);
651 inst->type = STACK_MP;
656 switch (type->type) {
658 inst->type = STACK_INV;
662 case MONO_TYPE_BOOLEAN:
668 inst->type = STACK_I4;
673 case MONO_TYPE_FNPTR:
674 inst->type = STACK_PTR;
676 case MONO_TYPE_CLASS:
677 case MONO_TYPE_STRING:
678 case MONO_TYPE_OBJECT:
679 case MONO_TYPE_SZARRAY:
680 case MONO_TYPE_ARRAY:
681 inst->type = STACK_OBJ;
685 inst->type = STACK_I8;
689 inst->type = STACK_R8;
691 case MONO_TYPE_VALUETYPE:
692 if (type->data.klass->enumtype) {
693 type = mono_class_enum_basetype (type->data.klass);
697 inst->type = STACK_VTYPE;
700 case MONO_TYPE_TYPEDBYREF:
701 inst->klass = mono_defaults.typed_reference_class;
702 inst->type = STACK_VTYPE;
704 case MONO_TYPE_GENERICINST:
705 type = &type->data.generic_class->container_class->byval_arg;
709 g_assert (cfg->generic_sharing_context);
710 if (mini_is_gsharedvt_type (cfg, type)) {
711 g_assert (cfg->gsharedvt);
712 inst->type = STACK_VTYPE;
714 inst->type = STACK_OBJ;
718 g_error ("unknown type 0x%02x in eval stack type", type->type);
723 * The following tables are used to quickly validate the IL code in type_from_op ().
726 bin_num_table [STACK_MAX] [STACK_MAX] = {
727 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
728 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
729 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
730 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
731 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
732 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
733 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
734 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
739 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
742 /* reduce the size of this table */
744 bin_int_table [STACK_MAX] [STACK_MAX] = {
745 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
746 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
747 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
748 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
749 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
750 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
751 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
752 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
756 bin_comp_table [STACK_MAX] [STACK_MAX] = {
757 /* Inv i L p F & O vt */
759 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
760 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
761 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
762 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
763 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
764 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
765 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
768 /* reduce the size of this table */
770 shift_table [STACK_MAX] [STACK_MAX] = {
771 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
772 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
773 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
774 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
775 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
776 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
777 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
778 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
782 * Tables to map from the non-specific opcode to the matching
783 * type-specific opcode.
785 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
787 binops_op_map [STACK_MAX] = {
788 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
791 /* handles from CEE_NEG to CEE_CONV_U8 */
793 unops_op_map [STACK_MAX] = {
794 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
797 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
799 ovfops_op_map [STACK_MAX] = {
800 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
803 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
805 ovf2ops_op_map [STACK_MAX] = {
806 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
809 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
811 ovf3ops_op_map [STACK_MAX] = {
812 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
815 /* handles from CEE_BEQ to CEE_BLT_UN */
817 beqops_op_map [STACK_MAX] = {
818 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
821 /* handles from CEE_CEQ to CEE_CLT_UN */
823 ceqops_op_map [STACK_MAX] = {
824 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
828 * Sets ins->type (the type on the eval stack) according to the
829 * type of the opcode and the arguments to it.
830 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
832 * FIXME: this function sets ins->type unconditionally in some cases, but
833 * it should set it to invalid for some types (a conv.x on an object)
836 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
838 switch (ins->opcode) {
845 /* FIXME: check unverifiable args for STACK_MP */
846 ins->type = bin_num_table [src1->type] [src2->type];
847 ins->opcode += binops_op_map [ins->type];
854 ins->type = bin_int_table [src1->type] [src2->type];
855 ins->opcode += binops_op_map [ins->type];
860 ins->type = shift_table [src1->type] [src2->type];
861 ins->opcode += binops_op_map [ins->type];
866 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
867 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
868 ins->opcode = OP_LCOMPARE;
869 else if (src1->type == STACK_R8)
870 ins->opcode = OP_FCOMPARE;
872 ins->opcode = OP_ICOMPARE;
874 case OP_ICOMPARE_IMM:
875 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
876 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
877 ins->opcode = OP_LCOMPARE_IMM;
889 ins->opcode += beqops_op_map [src1->type];
892 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
893 ins->opcode += ceqops_op_map [src1->type];
899 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
900 ins->opcode += ceqops_op_map [src1->type];
904 ins->type = neg_table [src1->type];
905 ins->opcode += unops_op_map [ins->type];
908 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
909 ins->type = src1->type;
911 ins->type = STACK_INV;
912 ins->opcode += unops_op_map [ins->type];
918 ins->type = STACK_I4;
919 ins->opcode += unops_op_map [src1->type];
922 ins->type = STACK_R8;
923 switch (src1->type) {
926 ins->opcode = OP_ICONV_TO_R_UN;
929 ins->opcode = OP_LCONV_TO_R_UN;
933 case CEE_CONV_OVF_I1:
934 case CEE_CONV_OVF_U1:
935 case CEE_CONV_OVF_I2:
936 case CEE_CONV_OVF_U2:
937 case CEE_CONV_OVF_I4:
938 case CEE_CONV_OVF_U4:
939 ins->type = STACK_I4;
940 ins->opcode += ovf3ops_op_map [src1->type];
942 case CEE_CONV_OVF_I_UN:
943 case CEE_CONV_OVF_U_UN:
944 ins->type = STACK_PTR;
945 ins->opcode += ovf2ops_op_map [src1->type];
947 case CEE_CONV_OVF_I1_UN:
948 case CEE_CONV_OVF_I2_UN:
949 case CEE_CONV_OVF_I4_UN:
950 case CEE_CONV_OVF_U1_UN:
951 case CEE_CONV_OVF_U2_UN:
952 case CEE_CONV_OVF_U4_UN:
953 ins->type = STACK_I4;
954 ins->opcode += ovf2ops_op_map [src1->type];
957 ins->type = STACK_PTR;
958 switch (src1->type) {
960 ins->opcode = OP_ICONV_TO_U;
964 #if SIZEOF_VOID_P == 8
965 ins->opcode = OP_LCONV_TO_U;
967 ins->opcode = OP_MOVE;
971 ins->opcode = OP_LCONV_TO_U;
974 ins->opcode = OP_FCONV_TO_U;
980 ins->type = STACK_I8;
981 ins->opcode += unops_op_map [src1->type];
983 case CEE_CONV_OVF_I8:
984 case CEE_CONV_OVF_U8:
985 ins->type = STACK_I8;
986 ins->opcode += ovf3ops_op_map [src1->type];
988 case CEE_CONV_OVF_U8_UN:
989 case CEE_CONV_OVF_I8_UN:
990 ins->type = STACK_I8;
991 ins->opcode += ovf2ops_op_map [src1->type];
995 ins->type = STACK_R8;
996 ins->opcode += unops_op_map [src1->type];
999 ins->type = STACK_R8;
1003 ins->type = STACK_I4;
1004 ins->opcode += ovfops_op_map [src1->type];
1007 case CEE_CONV_OVF_I:
1008 case CEE_CONV_OVF_U:
1009 ins->type = STACK_PTR;
1010 ins->opcode += ovfops_op_map [src1->type];
1013 case CEE_ADD_OVF_UN:
1015 case CEE_MUL_OVF_UN:
1017 case CEE_SUB_OVF_UN:
1018 ins->type = bin_num_table [src1->type] [src2->type];
1019 ins->opcode += ovfops_op_map [src1->type];
1020 if (ins->type == STACK_R8)
1021 ins->type = STACK_INV;
1023 case OP_LOAD_MEMBASE:
1024 ins->type = STACK_PTR;
1026 case OP_LOADI1_MEMBASE:
1027 case OP_LOADU1_MEMBASE:
1028 case OP_LOADI2_MEMBASE:
1029 case OP_LOADU2_MEMBASE:
1030 case OP_LOADI4_MEMBASE:
1031 case OP_LOADU4_MEMBASE:
1032 ins->type = STACK_PTR;
1034 case OP_LOADI8_MEMBASE:
1035 ins->type = STACK_I8;
1037 case OP_LOADR4_MEMBASE:
1038 case OP_LOADR8_MEMBASE:
1039 ins->type = STACK_R8;
1042 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1046 if (ins->type == STACK_MP)
1047 ins->klass = mono_defaults.object_class;
1052 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1058 param_table [STACK_MAX] [STACK_MAX] = {
1063 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1067 switch (args->type) {
1077 for (i = 0; i < sig->param_count; ++i) {
1078 switch (args [i].type) {
1082 if (!sig->params [i]->byref)
1086 if (sig->params [i]->byref)
1088 switch (sig->params [i]->type) {
1089 case MONO_TYPE_CLASS:
1090 case MONO_TYPE_STRING:
1091 case MONO_TYPE_OBJECT:
1092 case MONO_TYPE_SZARRAY:
1093 case MONO_TYPE_ARRAY:
1100 if (sig->params [i]->byref)
1102 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1111 /*if (!param_table [args [i].type] [sig->params [i]->type])
1119 * When we need a pointer to the current domain many times in a method, we
1120 * call mono_domain_get() once and we store the result in a local variable.
1121 * This function returns the variable that represents the MonoDomain*.
1123 inline static MonoInst *
1124 mono_get_domainvar (MonoCompile *cfg)
1126 if (!cfg->domainvar)
1127 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1128 return cfg->domainvar;
1132 * The got_var contains the address of the Global Offset Table when AOT
1136 mono_get_got_var (MonoCompile *cfg)
1138 #ifdef MONO_ARCH_NEED_GOT_VAR
1139 if (!cfg->compile_aot)
1141 if (!cfg->got_var) {
1142 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1144 return cfg->got_var;
1151 mono_get_vtable_var (MonoCompile *cfg)
1153 g_assert (cfg->generic_sharing_context);
1155 if (!cfg->rgctx_var) {
1156 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1157 /* force the var to be stack allocated */
1158 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1161 return cfg->rgctx_var;
1165 type_from_stack_type (MonoInst *ins) {
1166 switch (ins->type) {
1167 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1168 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1169 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1170 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1172 return &ins->klass->this_arg;
1173 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1174 case STACK_VTYPE: return &ins->klass->byval_arg;
1176 g_error ("stack type %d to monotype not handled\n", ins->type);
1181 static G_GNUC_UNUSED int
1182 type_to_stack_type (MonoType *t)
1184 t = mono_type_get_underlying_type (t);
1188 case MONO_TYPE_BOOLEAN:
1191 case MONO_TYPE_CHAR:
1198 case MONO_TYPE_FNPTR:
1200 case MONO_TYPE_CLASS:
1201 case MONO_TYPE_STRING:
1202 case MONO_TYPE_OBJECT:
1203 case MONO_TYPE_SZARRAY:
1204 case MONO_TYPE_ARRAY:
1212 case MONO_TYPE_VALUETYPE:
1213 case MONO_TYPE_TYPEDBYREF:
1215 case MONO_TYPE_GENERICINST:
1216 if (mono_type_generic_inst_is_valuetype (t))
1222 g_assert_not_reached ();
1229 array_access_to_klass (int opcode)
1233 return mono_defaults.byte_class;
1235 return mono_defaults.uint16_class;
1238 return mono_defaults.int_class;
1241 return mono_defaults.sbyte_class;
1244 return mono_defaults.int16_class;
1247 return mono_defaults.int32_class;
1249 return mono_defaults.uint32_class;
1252 return mono_defaults.int64_class;
1255 return mono_defaults.single_class;
1258 return mono_defaults.double_class;
1259 case CEE_LDELEM_REF:
1260 case CEE_STELEM_REF:
1261 return mono_defaults.object_class;
1263 g_assert_not_reached ();
1269 * We try to share variables when possible
1272 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1277 /* inlining can result in deeper stacks */
1278 if (slot >= cfg->header->max_stack)
1279 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1281 pos = ins->type - 1 + slot * STACK_MAX;
1283 switch (ins->type) {
1290 if ((vnum = cfg->intvars [pos]))
1291 return cfg->varinfo [vnum];
1292 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1293 cfg->intvars [pos] = res->inst_c0;
1296 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1302 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1305 * Don't use this if a generic_context is set, since that means AOT can't
1306 * look up the method using just the image+token.
1307 * table == 0 means this is a reference made from a wrapper.
1309 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1310 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1311 jump_info_token->image = image;
1312 jump_info_token->token = token;
1313 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1318 * This function is called to handle items that are left on the evaluation stack
1319 * at basic block boundaries. What happens is that we save the values to local variables
1320 * and we reload them later when first entering the target basic block (with the
1321 * handle_loaded_temps () function).
1322 * A single joint point will use the same variables (stored in the array bb->out_stack or
1323 * bb->in_stack, if the basic block is before or after the joint point).
1325 * This function needs to be called _before_ emitting the last instruction of
1326 * the bb (i.e. before emitting a branch).
1327 * If the stack merge fails at a join point, cfg->unverifiable is set.
1330 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1333 MonoBasicBlock *bb = cfg->cbb;
1334 MonoBasicBlock *outb;
1335 MonoInst *inst, **locals;
1340 if (cfg->verbose_level > 3)
1341 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1342 if (!bb->out_scount) {
1343 bb->out_scount = count;
1344 //printf ("bblock %d has out:", bb->block_num);
1346 for (i = 0; i < bb->out_count; ++i) {
1347 outb = bb->out_bb [i];
1348 /* exception handlers are linked, but they should not be considered for stack args */
1349 if (outb->flags & BB_EXCEPTION_HANDLER)
1351 //printf (" %d", outb->block_num);
1352 if (outb->in_stack) {
1354 bb->out_stack = outb->in_stack;
1360 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1361 for (i = 0; i < count; ++i) {
1363 * try to reuse temps already allocated for this purpouse, if they occupy the same
1364 * stack slot and if they are of the same type.
1365 * This won't cause conflicts since if 'local' is used to
1366 * store one of the values in the in_stack of a bblock, then
1367 * the same variable will be used for the same outgoing stack
1369 * This doesn't work when inlining methods, since the bblocks
1370 * in the inlined methods do not inherit their in_stack from
1371 * the bblock they are inlined to. See bug #58863 for an
1374 if (cfg->inlined_method)
1375 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1377 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1382 for (i = 0; i < bb->out_count; ++i) {
1383 outb = bb->out_bb [i];
1384 /* exception handlers are linked, but they should not be considered for stack args */
1385 if (outb->flags & BB_EXCEPTION_HANDLER)
1387 if (outb->in_scount) {
1388 if (outb->in_scount != bb->out_scount) {
1389 cfg->unverifiable = TRUE;
1392 continue; /* check they are the same locals */
1394 outb->in_scount = count;
1395 outb->in_stack = bb->out_stack;
1398 locals = bb->out_stack;
1400 for (i = 0; i < count; ++i) {
1401 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1402 inst->cil_code = sp [i]->cil_code;
1403 sp [i] = locals [i];
1404 if (cfg->verbose_level > 3)
1405 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1409 * It is possible that the out bblocks already have in_stack assigned, and
1410 * the in_stacks differ. In this case, we will store to all the different
1417 /* Find a bblock which has a different in_stack */
1419 while (bindex < bb->out_count) {
1420 outb = bb->out_bb [bindex];
1421 /* exception handlers are linked, but they should not be considered for stack args */
1422 if (outb->flags & BB_EXCEPTION_HANDLER) {
1426 if (outb->in_stack != locals) {
1427 for (i = 0; i < count; ++i) {
1428 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1429 inst->cil_code = sp [i]->cil_code;
1430 sp [i] = locals [i];
1431 if (cfg->verbose_level > 3)
1432 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1434 locals = outb->in_stack;
1443 /* Emit code which loads interface_offsets [klass->interface_id]
1444 * The array is stored in memory before vtable.
1447 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1449 if (cfg->compile_aot) {
1450 int ioffset_reg = alloc_preg (cfg);
1451 int iid_reg = alloc_preg (cfg);
1453 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1454 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1455 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1458 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1463 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1465 int ibitmap_reg = alloc_preg (cfg);
1466 #ifdef COMPRESSED_INTERFACE_BITMAP
1468 MonoInst *res, *ins;
1469 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1470 MONO_ADD_INS (cfg->cbb, ins);
1472 if (cfg->compile_aot)
1473 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1475 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1476 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1477 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1479 int ibitmap_byte_reg = alloc_preg (cfg);
1481 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1483 if (cfg->compile_aot) {
1484 int iid_reg = alloc_preg (cfg);
1485 int shifted_iid_reg = alloc_preg (cfg);
1486 int ibitmap_byte_address_reg = alloc_preg (cfg);
1487 int masked_iid_reg = alloc_preg (cfg);
1488 int iid_one_bit_reg = alloc_preg (cfg);
1489 int iid_bit_reg = alloc_preg (cfg);
1490 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1491 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1492 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1493 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1494 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1495 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1496 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1497 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1499 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1500 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1506 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1507 * stored in "klass_reg" implements the interface "klass".
1510 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1512 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1516 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1517 * stored in "vtable_reg" implements the interface "klass".
1520 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1522 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1526 * Emit code which checks whenever the interface id of @klass is smaller than
1527 * than the value given by max_iid_reg.
1530 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1531 MonoBasicBlock *false_target)
1533 if (cfg->compile_aot) {
1534 int iid_reg = alloc_preg (cfg);
1535 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1536 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1539 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1541 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1543 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1546 /* Same as above, but obtains max_iid from a vtable */
1548 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1549 MonoBasicBlock *false_target)
1551 int max_iid_reg = alloc_preg (cfg);
1553 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1554 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1557 /* Same as above, but obtains max_iid from a klass */
1559 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1560 MonoBasicBlock *false_target)
1562 int max_iid_reg = alloc_preg (cfg);
1564 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1565 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1569 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1571 int idepth_reg = alloc_preg (cfg);
1572 int stypes_reg = alloc_preg (cfg);
1573 int stype = alloc_preg (cfg);
1575 mono_class_setup_supertypes (klass);
1577 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1578 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1579 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1580 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1582 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1583 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1585 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1586 } else if (cfg->compile_aot) {
1587 int const_reg = alloc_preg (cfg);
1588 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1589 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1591 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1593 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1597 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1599 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1603 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1605 int intf_reg = alloc_preg (cfg);
1607 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1608 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1609 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1611 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1613 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1617 * Variant of the above that takes a register to the class, not the vtable.
1620 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1622 int intf_bit_reg = alloc_preg (cfg);
1624 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1625 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1626 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1628 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1630 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1634 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1637 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1638 } else if (cfg->compile_aot) {
1639 int const_reg = alloc_preg (cfg);
1640 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1641 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1643 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1645 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1649 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1651 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1655 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1657 if (cfg->compile_aot) {
1658 int const_reg = alloc_preg (cfg);
1659 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1660 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1662 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1664 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1668 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1671 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1674 int rank_reg = alloc_preg (cfg);
1675 int eclass_reg = alloc_preg (cfg);
1677 g_assert (!klass_inst);
1678 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1679 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1680 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1681 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1682 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1683 if (klass->cast_class == mono_defaults.object_class) {
1684 int parent_reg = alloc_preg (cfg);
1685 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1686 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1687 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1688 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1689 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1690 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1691 } else if (klass->cast_class == mono_defaults.enum_class) {
1692 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1693 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1694 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1696 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1697 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1700 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1701 /* Check that the object is a vector too */
1702 int bounds_reg = alloc_preg (cfg);
1703 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1704 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1705 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1708 int idepth_reg = alloc_preg (cfg);
1709 int stypes_reg = alloc_preg (cfg);
1710 int stype = alloc_preg (cfg);
1712 mono_class_setup_supertypes (klass);
1714 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1715 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1716 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1717 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1719 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1720 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1721 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1726 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1728 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1732 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1736 g_assert (val == 0);
1741 if ((size <= 4) && (size <= align)) {
1744 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1747 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1750 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1752 #if SIZEOF_REGISTER == 8
1754 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1760 val_reg = alloc_preg (cfg);
1762 if (SIZEOF_REGISTER == 8)
1763 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1765 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1768 /* This could be optimized further if neccesary */
1770 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1777 #if !NO_UNALIGNED_ACCESS
1778 if (SIZEOF_REGISTER == 8) {
1780 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1785 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1793 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1798 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1803 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1810 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1817 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1818 g_assert (size < 10000);
1821 /* This could be optimized further if neccesary */
1823 cur_reg = alloc_preg (cfg);
1824 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1825 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1832 #if !NO_UNALIGNED_ACCESS
1833 if (SIZEOF_REGISTER == 8) {
1835 cur_reg = alloc_preg (cfg);
1836 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1837 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1846 cur_reg = alloc_preg (cfg);
1847 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1848 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1854 cur_reg = alloc_preg (cfg);
1855 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1856 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1862 cur_reg = alloc_preg (cfg);
1863 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1864 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1872 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1875 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1878 type = mini_get_basic_type_from_generic (gsctx, type);
1879 switch (type->type) {
1880 case MONO_TYPE_VOID:
1881 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1884 case MONO_TYPE_BOOLEAN:
1887 case MONO_TYPE_CHAR:
1890 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1894 case MONO_TYPE_FNPTR:
1895 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1896 case MONO_TYPE_CLASS:
1897 case MONO_TYPE_STRING:
1898 case MONO_TYPE_OBJECT:
1899 case MONO_TYPE_SZARRAY:
1900 case MONO_TYPE_ARRAY:
1901 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1904 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1907 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1908 case MONO_TYPE_VALUETYPE:
1909 if (type->data.klass->enumtype) {
1910 type = mono_class_enum_basetype (type->data.klass);
1913 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1914 case MONO_TYPE_TYPEDBYREF:
1915 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1916 case MONO_TYPE_GENERICINST:
1917 type = &type->data.generic_class->container_class->byval_arg;
1920 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1926 * target_type_is_incompatible:
1927 * @cfg: MonoCompile context
1929 * Check that the item @arg on the evaluation stack can be stored
1930 * in the target type (can be a local, or field, etc).
1931 * The cfg arg can be used to check if we need verification or just
1934 * Returns: non-0 value if arg can't be stored on a target.
1937 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1939 MonoType *simple_type;
1942 if (target->byref) {
1943 /* FIXME: check that the pointed to types match */
1944 if (arg->type == STACK_MP)
1945 return arg->klass != mono_class_from_mono_type (target);
1946 if (arg->type == STACK_PTR)
1951 simple_type = mono_type_get_underlying_type (target);
1952 switch (simple_type->type) {
1953 case MONO_TYPE_VOID:
1957 case MONO_TYPE_BOOLEAN:
1960 case MONO_TYPE_CHAR:
1963 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1967 /* STACK_MP is needed when setting pinned locals */
1968 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1973 case MONO_TYPE_FNPTR:
1975 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
1976 * in native int. (#688008).
1978 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1981 case MONO_TYPE_CLASS:
1982 case MONO_TYPE_STRING:
1983 case MONO_TYPE_OBJECT:
1984 case MONO_TYPE_SZARRAY:
1985 case MONO_TYPE_ARRAY:
1986 if (arg->type != STACK_OBJ)
1988 /* FIXME: check type compatibility */
1992 if (arg->type != STACK_I8)
1997 if (arg->type != STACK_R8)
2000 case MONO_TYPE_VALUETYPE:
2001 if (arg->type != STACK_VTYPE)
2003 klass = mono_class_from_mono_type (simple_type);
2004 if (klass != arg->klass)
2007 case MONO_TYPE_TYPEDBYREF:
2008 if (arg->type != STACK_VTYPE)
2010 klass = mono_class_from_mono_type (simple_type);
2011 if (klass != arg->klass)
2014 case MONO_TYPE_GENERICINST:
2015 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2016 if (arg->type != STACK_VTYPE)
2018 klass = mono_class_from_mono_type (simple_type);
2019 if (klass != arg->klass)
2023 if (arg->type != STACK_OBJ)
2025 /* FIXME: check type compatibility */
2029 case MONO_TYPE_MVAR:
2030 g_assert (cfg->generic_sharing_context);
2031 if (mini_type_var_is_vt (cfg, simple_type)) {
2032 if (arg->type != STACK_VTYPE)
2035 if (arg->type != STACK_OBJ)
2040 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2046 * Prepare arguments for passing to a function call.
2047 * Return a non-zero value if the arguments can't be passed to the given
2049 * The type checks are not yet complete and some conversions may need
2050 * casts on 32 or 64 bit architectures.
2052 * FIXME: implement this using target_type_is_incompatible ()
2055 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2057 MonoType *simple_type;
2061 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2065 for (i = 0; i < sig->param_count; ++i) {
2066 if (sig->params [i]->byref) {
2067 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2071 simple_type = sig->params [i];
2072 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
2074 switch (simple_type->type) {
2075 case MONO_TYPE_VOID:
2080 case MONO_TYPE_BOOLEAN:
2083 case MONO_TYPE_CHAR:
2086 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2092 case MONO_TYPE_FNPTR:
2093 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2096 case MONO_TYPE_CLASS:
2097 case MONO_TYPE_STRING:
2098 case MONO_TYPE_OBJECT:
2099 case MONO_TYPE_SZARRAY:
2100 case MONO_TYPE_ARRAY:
2101 if (args [i]->type != STACK_OBJ)
2106 if (args [i]->type != STACK_I8)
2111 if (args [i]->type != STACK_R8)
2114 case MONO_TYPE_VALUETYPE:
2115 if (simple_type->data.klass->enumtype) {
2116 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2119 if (args [i]->type != STACK_VTYPE)
2122 case MONO_TYPE_TYPEDBYREF:
2123 if (args [i]->type != STACK_VTYPE)
2126 case MONO_TYPE_GENERICINST:
2127 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2131 g_error ("unknown type 0x%02x in check_call_signature",
2139 callvirt_to_call (int opcode)
2144 case OP_VOIDCALLVIRT:
2153 g_assert_not_reached ();
2160 callvirt_to_call_membase (int opcode)
2164 return OP_CALL_MEMBASE;
2165 case OP_VOIDCALLVIRT:
2166 return OP_VOIDCALL_MEMBASE;
2168 return OP_FCALL_MEMBASE;
2170 return OP_LCALL_MEMBASE;
2172 return OP_VCALL_MEMBASE;
2174 g_assert_not_reached ();
2180 #ifdef MONO_ARCH_HAVE_IMT
2181 /* Either METHOD or IMT_ARG needs to be set */
2183 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2187 if (COMPILE_LLVM (cfg)) {
2188 method_reg = alloc_preg (cfg);
2191 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2192 } else if (cfg->compile_aot) {
2193 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2196 MONO_INST_NEW (cfg, ins, OP_PCONST);
2197 ins->inst_p0 = method;
2198 ins->dreg = method_reg;
2199 MONO_ADD_INS (cfg->cbb, ins);
2203 call->imt_arg_reg = method_reg;
2205 #ifdef MONO_ARCH_IMT_REG
2206 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2208 /* Need this to keep the IMT arg alive */
2209 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2214 #ifdef MONO_ARCH_IMT_REG
2215 method_reg = alloc_preg (cfg);
2218 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2219 } else if (cfg->compile_aot) {
2220 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2223 MONO_INST_NEW (cfg, ins, OP_PCONST);
2224 ins->inst_p0 = method;
2225 ins->dreg = method_reg;
2226 MONO_ADD_INS (cfg->cbb, ins);
2229 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2231 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2236 static MonoJumpInfo *
2237 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2239 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2243 ji->data.target = target;
2249 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2251 if (cfg->generic_sharing_context)
2252 return mono_class_check_context_used (klass);
2258 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2260 if (cfg->generic_sharing_context)
2261 return mono_method_check_context_used (method);
2266 inline static MonoCallInst *
2267 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2268 MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
2271 #ifdef MONO_ARCH_SOFT_FLOAT
2276 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2278 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2281 call->signature = sig;
2282 call->rgctx_reg = rgctx;
2284 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2287 if (mini_type_is_vtype (cfg, sig->ret)) {
2288 call->vret_var = cfg->vret_addr;
2289 //g_assert_not_reached ();
2291 } else if (mini_type_is_vtype (cfg, sig->ret)) {
2292 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2295 temp->backend.is_pinvoke = sig->pinvoke;
2298 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2299 * address of return value to increase optimization opportunities.
2300 * Before vtype decomposition, the dreg of the call ins itself represents the
2301 * fact the call modifies the return value. After decomposition, the call will
2302 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2303 * will be transformed into an LDADDR.
2305 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2306 loada->dreg = alloc_preg (cfg);
2307 loada->inst_p0 = temp;
2308 /* We reference the call too since call->dreg could change during optimization */
2309 loada->inst_p1 = call;
2310 MONO_ADD_INS (cfg->cbb, loada);
2312 call->inst.dreg = temp->dreg;
2314 call->vret_var = loada;
2315 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2316 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2318 #ifdef MONO_ARCH_SOFT_FLOAT
2319 if (COMPILE_SOFT_FLOAT (cfg)) {
2321 * If the call has a float argument, we would need to do an r8->r4 conversion using
2322 * an icall, but that cannot be done during the call sequence since it would clobber
2323 * the call registers + the stack. So we do it before emitting the call.
2325 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2327 MonoInst *in = call->args [i];
2329 if (i >= sig->hasthis)
2330 t = sig->params [i - sig->hasthis];
2332 t = &mono_defaults.int_class->byval_arg;
2333 t = mono_type_get_underlying_type (t);
2335 if (!t->byref && t->type == MONO_TYPE_R4) {
2336 MonoInst *iargs [1];
2340 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2342 /* The result will be in an int vreg */
2343 call->args [i] = conv;
2349 call->need_unbox_trampoline = unbox_trampoline;
2352 if (COMPILE_LLVM (cfg))
2353 mono_llvm_emit_call (cfg, call);
2355 mono_arch_emit_call (cfg, call);
2357 mono_arch_emit_call (cfg, call);
2360 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2361 cfg->flags |= MONO_CFG_HAS_CALLS;
2367 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2369 #ifdef MONO_ARCH_RGCTX_REG
2370 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2371 cfg->uses_rgctx_reg = TRUE;
2372 call->rgctx_reg = TRUE;
2374 call->rgctx_arg_reg = rgctx_reg;
2381 inline static MonoInst*
2382 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2388 rgctx_reg = mono_alloc_preg (cfg);
2389 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2392 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2394 call->inst.sreg1 = addr->dreg;
2397 emit_imt_argument (cfg, call, NULL, imt_arg);
2399 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2402 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2404 return (MonoInst*)call;
2408 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2410 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2413 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2414 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2416 gboolean might_be_remote;
2417 gboolean virtual = this != NULL;
2418 gboolean enable_for_aot = TRUE;
2422 gboolean need_unbox_trampoline;
2425 rgctx_reg = mono_alloc_preg (cfg);
2426 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2429 if (method->string_ctor) {
2430 /* Create the real signature */
2431 /* FIXME: Cache these */
2432 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2433 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2438 context_used = mini_method_check_context_used (cfg, method);
2440 might_be_remote = this && sig->hasthis &&
2441 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2442 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2444 if (might_be_remote && context_used) {
2447 g_assert (cfg->generic_sharing_context);
2449 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2451 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2454 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2456 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2458 if (might_be_remote)
2459 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2461 call->method = method;
2462 call->inst.flags |= MONO_INST_HAS_METHOD;
2463 call->inst.inst_left = this;
2466 int vtable_reg, slot_reg, this_reg;
2468 this_reg = this->dreg;
2470 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2471 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2472 MonoInst *dummy_use;
2474 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2476 /* Make a call to delegate->invoke_impl */
2477 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2478 call->inst.inst_basereg = this_reg;
2479 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2480 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2482 /* We must emit a dummy use here because the delegate trampoline will
2483 replace the 'this' argument with the delegate target making this activation
2484 no longer a root for the delegate.
2485 This is an issue for delegates that target collectible code such as dynamic
2486 methods of GC'able assemblies.
2488 For a test case look into #667921.
2490 FIXME: a dummy use is not the best way to do it as the local register allocator
2491 will put it on a caller save register and spil it around the call.
2492 Ideally, we would either put it on a callee save register or only do the store part.
2494 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2496 return (MonoInst*)call;
2500 if ((!cfg->compile_aot || enable_for_aot) &&
2501 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2502 (MONO_METHOD_IS_FINAL (method) &&
2503 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2504 !(method->klass->marshalbyref && context_used)) {
2506 * the method is not virtual, we just need to ensure this is not null
2507 * and then we can call the method directly.
2509 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2511 * The check above ensures method is not gshared, this is needed since
2512 * gshared methods can't have wrappers.
2514 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2517 if (!method->string_ctor)
2518 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2520 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2521 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2523 * the method is virtual, but we can statically dispatch since either
2524 * it's class or the method itself are sealed.
2525 * But first we need to ensure it's not a null reference.
2527 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2529 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2531 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2533 vtable_reg = alloc_preg (cfg);
2534 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2535 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2537 #ifdef MONO_ARCH_HAVE_IMT
2539 guint32 imt_slot = mono_method_get_imt_slot (method);
2540 emit_imt_argument (cfg, call, call->method, imt_arg);
2541 slot_reg = vtable_reg;
2542 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2545 if (slot_reg == -1) {
2546 slot_reg = alloc_preg (cfg);
2547 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2548 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2551 slot_reg = vtable_reg;
2552 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2553 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2554 #ifdef MONO_ARCH_HAVE_IMT
2556 g_assert (mono_method_signature (method)->generic_param_count);
2557 emit_imt_argument (cfg, call, call->method, imt_arg);
2562 call->inst.sreg1 = slot_reg;
2563 call->virtual = TRUE;
2567 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2570 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2572 return (MonoInst*)call;
2576 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2578 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL, NULL);
2582 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2589 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2592 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2594 return (MonoInst*)call;
2598 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2600 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2604 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2608 * mono_emit_abs_call:
2610 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2612 inline static MonoInst*
2613 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2614 MonoMethodSignature *sig, MonoInst **args)
2616 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2620 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2623 if (cfg->abs_patches == NULL)
2624 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2625 g_hash_table_insert (cfg->abs_patches, ji, ji);
2626 ins = mono_emit_native_call (cfg, ji, sig, args);
2627 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2632 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2634 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2635 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2639 * Native code might return non register sized integers
2640 * without initializing the upper bits.
2642 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2643 case OP_LOADI1_MEMBASE:
2644 widen_op = OP_ICONV_TO_I1;
2646 case OP_LOADU1_MEMBASE:
2647 widen_op = OP_ICONV_TO_U1;
2649 case OP_LOADI2_MEMBASE:
2650 widen_op = OP_ICONV_TO_I2;
2652 case OP_LOADU2_MEMBASE:
2653 widen_op = OP_ICONV_TO_U2;
2659 if (widen_op != -1) {
2660 int dreg = alloc_preg (cfg);
2663 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2664 widen->type = ins->type;
2674 get_memcpy_method (void)
2676 static MonoMethod *memcpy_method = NULL;
2677 if (!memcpy_method) {
2678 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2680 g_error ("Old corlib found. Install a new one");
2682 return memcpy_method;
2686 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
2688 MonoClassField *field;
2689 gpointer iter = NULL;
2691 while ((field = mono_class_get_fields (klass, &iter))) {
2694 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2696 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2697 if (mini_type_is_reference (cfg, mono_field_get_type (field))) {
2698 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2699 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2701 MonoClass *field_class = mono_class_from_mono_type (field->type);
2702 if (field_class->has_references)
2703 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
2709 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value, int value_reg)
2711 int card_table_shift_bits;
2712 gpointer card_table_mask;
2714 MonoInst *dummy_use;
2715 int nursery_shift_bits;
2716 size_t nursery_size;
2717 gboolean has_card_table_wb = FALSE;
2719 if (!cfg->gen_write_barriers)
2722 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2724 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2726 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
2727 has_card_table_wb = TRUE;
2730 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0) {
2733 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2734 wbarrier->sreg1 = ptr->dreg;
2736 wbarrier->sreg2 = value->dreg;
2738 wbarrier->sreg2 = value_reg;
2739 MONO_ADD_INS (cfg->cbb, wbarrier);
2740 } else if (card_table) {
2741 int offset_reg = alloc_preg (cfg);
2742 int card_reg = alloc_preg (cfg);
2745 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2746 if (card_table_mask)
2747 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
2749 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2750 * IMM's larger than 32bits.
2752 if (cfg->compile_aot) {
2753 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
2755 MONO_INST_NEW (cfg, ins, OP_PCONST);
2756 ins->inst_p0 = card_table;
2757 ins->dreg = card_reg;
2758 MONO_ADD_INS (cfg->cbb, ins);
2761 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
2762 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
2764 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2765 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
2769 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
2771 MONO_INST_NEW (cfg, dummy_use, OP_DUMMY_USE);
2772 dummy_use->sreg1 = value_reg;
2773 MONO_ADD_INS (cfg->cbb, dummy_use);
2778 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
2780 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
2781 unsigned need_wb = 0;
2786 /*types with references can't have alignment smaller than sizeof(void*) */
2787 if (align < SIZEOF_VOID_P)
2790 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
2791 if (size > 32 * SIZEOF_VOID_P)
2794 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
2796 /* We don't unroll more than 5 stores to avoid code bloat. */
2797 if (size > 5 * SIZEOF_VOID_P) {
2798 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
2799 size += (SIZEOF_VOID_P - 1);
2800 size &= ~(SIZEOF_VOID_P - 1);
2802 EMIT_NEW_ICONST (cfg, iargs [2], size);
2803 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
2804 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
2808 destreg = iargs [0]->dreg;
2809 srcreg = iargs [1]->dreg;
2812 dest_ptr_reg = alloc_preg (cfg);
2813 tmp_reg = alloc_preg (cfg);
2816 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
2818 while (size >= SIZEOF_VOID_P) {
2819 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, tmp_reg, srcreg, offset);
2820 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
2823 emit_write_barrier (cfg, iargs [0], NULL, tmp_reg);
2825 offset += SIZEOF_VOID_P;
2826 size -= SIZEOF_VOID_P;
2829 /*tmp += sizeof (void*)*/
2830 if (size >= SIZEOF_VOID_P) {
2831 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
2832 MONO_ADD_INS (cfg->cbb, iargs [0]);
2836 /* Those cannot be references since size < sizeof (void*) */
2838 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
2839 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
2845 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
2846 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
2852 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
2853 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
2862 * Emit code to copy a valuetype of type @klass whose address is stored in
2863 * @src->dreg to memory whose address is stored at @dest->dreg.
2866 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2868 MonoInst *iargs [4];
2869 int context_used, n;
2871 MonoMethod *memcpy_method;
2872 MonoInst *size_ins = NULL;
2876 * This check breaks with spilled vars... need to handle it during verification anyway.
2877 * g_assert (klass && klass == src->klass && klass == dest->klass);
2880 if (mini_is_gsharedvt_klass (cfg, klass)) {
2882 context_used = mini_class_check_context_used (cfg, klass);
2883 size_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_VALUE_SIZE);
2887 n = mono_class_native_size (klass, &align);
2889 n = mono_class_value_size (klass, &align);
2891 /* if native is true there should be no references in the struct */
2892 if (cfg->gen_write_barriers && klass->has_references && !native) {
2893 /* Avoid barriers when storing to the stack */
2894 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2895 (dest->opcode == OP_LDADDR))) {
2901 context_used = mini_class_check_context_used (cfg, klass);
2903 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
2904 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
2906 } else if (context_used) {
2907 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2909 if (cfg->compile_aot) {
2910 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2912 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2913 mono_class_compute_gc_descriptor (klass);
2917 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2922 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2923 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2924 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2929 iargs [2] = size_ins;
2931 EMIT_NEW_ICONST (cfg, iargs [2], n);
2933 memcpy_method = get_memcpy_method ();
2934 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2939 get_memset_method (void)
2941 static MonoMethod *memset_method = NULL;
2942 if (!memset_method) {
2943 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2945 g_error ("Old corlib found. Install a new one");
2947 return memset_method;
2951 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2953 MonoInst *iargs [3];
2954 int n, context_used;
2956 MonoMethod *memset_method;
2957 MonoInst *size_ins = NULL;
2959 /* FIXME: Optimize this for the case when dest is an LDADDR */
2961 mono_class_init (klass);
2962 if (mini_is_gsharedvt_klass (cfg, klass)) {
2963 context_used = mini_class_check_context_used (cfg, klass);
2964 size_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_VALUE_SIZE);
2967 n = mono_class_value_size (klass, &align);
2970 if (!size_ins && n <= sizeof (gpointer) * 5) {
2971 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2974 memset_method = get_memset_method ();
2976 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2978 iargs [2] = size_ins;
2980 EMIT_NEW_ICONST (cfg, iargs [2], n);
2981 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2986 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2988 MonoInst *this = NULL;
2990 g_assert (cfg->generic_sharing_context);
2992 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2993 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2994 !method->klass->valuetype)
2995 EMIT_NEW_ARGLOAD (cfg, this, 0);
2997 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2998 MonoInst *mrgctx_loc, *mrgctx_var;
3001 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3003 mrgctx_loc = mono_get_vtable_var (cfg);
3004 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3007 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3008 MonoInst *vtable_loc, *vtable_var;
3012 vtable_loc = mono_get_vtable_var (cfg);
3013 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3015 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3016 MonoInst *mrgctx_var = vtable_var;
3019 vtable_reg = alloc_preg (cfg);
3020 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3021 vtable_var->type = STACK_PTR;
3029 vtable_reg = alloc_preg (cfg);
3030 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3035 static MonoJumpInfoRgctxEntry *
3036 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3038 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3039 res->method = method;
3040 res->in_mrgctx = in_mrgctx;
3041 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3042 res->data->type = patch_type;
3043 res->data->data.target = patch_data;
3044 res->info_type = info_type;
3049 static inline MonoInst*
3050 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3052 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3056 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3057 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3059 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3060 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3062 return emit_rgctx_fetch (cfg, rgctx, entry);
3066 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3067 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3069 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3070 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3072 return emit_rgctx_fetch (cfg, rgctx, entry);
3076 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3077 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3079 MonoJumpInfoGSharedVtCall *call_info;
3080 MonoJumpInfoRgctxEntry *entry;
3083 call_info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3084 call_info->sig = sig;
3085 call_info->method = cmethod;
3087 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3088 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3090 return emit_rgctx_fetch (cfg, rgctx, entry);
3094 * emit_get_rgctx_method:
3096 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3097 * normal constants, else emit a load from the rgctx.
3100 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3101 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3103 if (!context_used) {
3106 switch (rgctx_type) {
3107 case MONO_RGCTX_INFO_METHOD:
3108 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3110 case MONO_RGCTX_INFO_METHOD_RGCTX:
3111 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3114 g_assert_not_reached ();
3117 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3118 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3120 return emit_rgctx_fetch (cfg, rgctx, entry);
3125 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3126 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3128 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3129 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3131 return emit_rgctx_fetch (cfg, rgctx, entry);
3135 * On return the caller must check @klass for load errors.
3138 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3140 MonoInst *vtable_arg;
3144 context_used = mini_class_check_context_used (cfg, klass);
3147 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3148 klass, MONO_RGCTX_INFO_VTABLE);
3150 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3154 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3157 if (COMPILE_LLVM (cfg))
3158 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3160 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3161 #ifdef MONO_ARCH_VTABLE_REG
3162 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3163 cfg->uses_vtable_reg = TRUE;
3170 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc)
3174 if (cfg->gen_seq_points && cfg->method == method) {
3175 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3176 MONO_ADD_INS (cfg->cbb, ins);
3181 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
3183 if (mini_get_debug_options ()->better_cast_details) {
3184 int to_klass_reg = alloc_preg (cfg);
3185 int vtable_reg = alloc_preg (cfg);
3186 int klass_reg = alloc_preg (cfg);
3187 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3190 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3194 MONO_ADD_INS (cfg->cbb, tls_get);
3195 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3196 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3198 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3199 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3200 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3205 reset_cast_details (MonoCompile *cfg)
3207 /* Reset the variables holding the cast details */
3208 if (mini_get_debug_options ()->better_cast_details) {
3209 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3211 MONO_ADD_INS (cfg->cbb, tls_get);
3212 /* It is enough to reset the from field */
3213 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3218 * On return the caller must check @array_class for load errors
3221 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3223 int vtable_reg = alloc_preg (cfg);
3226 context_used = mini_class_check_context_used (cfg, array_class);
3228 save_cast_details (cfg, array_class, obj->dreg);
3230 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3232 if (cfg->opt & MONO_OPT_SHARED) {
3233 int class_reg = alloc_preg (cfg);
3234 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3235 if (cfg->compile_aot) {
3236 int klass_reg = alloc_preg (cfg);
3237 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3238 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3240 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3242 } else if (context_used) {
3243 MonoInst *vtable_ins;
3245 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3246 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3248 if (cfg->compile_aot) {
3252 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3254 vt_reg = alloc_preg (cfg);
3255 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3256 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3259 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3261 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3265 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3267 reset_cast_details (cfg);
3271 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3272 * generic code is generated.
3275 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3277 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3280 MonoInst *rgctx, *addr;
3282 /* FIXME: What if the class is shared? We might not
3283 have to get the address of the method from the
3285 addr = emit_get_rgctx_method (cfg, context_used, method,
3286 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3288 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3290 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3292 return mono_emit_method_call (cfg, method, &val, NULL);
3297 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3301 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3302 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3303 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3304 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3306 obj_reg = sp [0]->dreg;
3307 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3308 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3310 /* FIXME: generics */
3311 g_assert (klass->rank == 0);
3314 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3315 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3317 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3318 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3321 MonoInst *element_class;
3323 /* This assertion is from the unboxcast insn */
3324 g_assert (klass->rank == 0);
3326 element_class = emit_get_rgctx_klass (cfg, context_used,
3327 klass->element_class, MONO_RGCTX_INFO_KLASS);
3329 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3330 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3332 save_cast_details (cfg, klass->element_class, obj_reg);
3333 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3334 reset_cast_details (cfg);
3337 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3338 MONO_ADD_INS (cfg->cbb, add);
3339 add->type = STACK_MP;
3346 * Returns NULL and set the cfg exception on error.
3349 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3351 MonoInst *iargs [2];
3357 MonoInst *iargs [2];
3360 FIXME: we cannot get managed_alloc here because we can't get
3361 the class's vtable (because it's not a closed class)
3363 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3364 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3367 if (cfg->opt & MONO_OPT_SHARED)
3368 rgctx_info = MONO_RGCTX_INFO_KLASS;
3370 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3371 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3373 if (cfg->opt & MONO_OPT_SHARED) {
3374 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3376 alloc_ftn = mono_object_new;
3379 alloc_ftn = mono_object_new_specific;
3382 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3385 if (cfg->opt & MONO_OPT_SHARED) {
3386 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3387 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3389 alloc_ftn = mono_object_new;
3390 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3391 /* This happens often in argument checking code, eg. throw new FooException... */
3392 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3393 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3394 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3396 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3397 MonoMethod *managed_alloc = NULL;
3401 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3402 cfg->exception_ptr = klass;
3406 #ifndef MONO_CROSS_COMPILE
3407 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3410 if (managed_alloc) {
3411 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3412 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3414 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3416 guint32 lw = vtable->klass->instance_size;
3417 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3418 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3419 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3422 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3426 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3430 * Returns NULL and set the cfg exception on error.
3433 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
3435 MonoInst *alloc, *ins;
3437 if (mono_class_is_nullable (klass)) {
3438 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3441 /* FIXME: What if the class is shared? We might not
3442 have to get the method address from the RGCTX. */
3443 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3444 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3445 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3447 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3449 return mono_emit_method_call (cfg, method, &val, NULL);
3453 if (mini_is_gsharedvt_klass (cfg, klass)) {
3454 MonoBasicBlock *is_ref_bb, *end_bb;
3455 MonoInst *res, *is_ref, *src_var, *addr;
3458 dreg = alloc_ireg (cfg);
3460 NEW_BBLOCK (cfg, is_ref_bb);
3461 NEW_BBLOCK (cfg, end_bb);
3462 is_ref = emit_get_rgctx_klass (cfg, context_used, klass,
3463 MONO_RGCTX_INFO_CLASS_IS_REF);
3464 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3465 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3468 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3471 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3472 ins->opcode = OP_STOREV_MEMBASE;
3474 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
3475 res->type = STACK_OBJ;
3477 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3480 MONO_START_BB (cfg, is_ref_bb);
3481 addr_reg = alloc_ireg (cfg);
3483 /* val is a vtype, so has to load the value manually */
3484 src_var = get_vreg_to_inst (cfg, val->dreg);
3486 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
3487 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
3488 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
3489 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3491 MONO_START_BB (cfg, end_bb);
3495 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3499 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3506 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
3509 MonoGenericContainer *container;
3510 MonoGenericInst *ginst;
3512 if (klass->generic_class) {
3513 container = klass->generic_class->container_class->generic_container;
3514 ginst = klass->generic_class->context.class_inst;
3515 } else if (klass->generic_container && context_used) {
3516 container = klass->generic_container;
3517 ginst = container->context.class_inst;
3522 for (i = 0; i < container->type_argc; ++i) {
3524 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
3526 type = ginst->type_argv [i];
3527 if (mini_type_is_reference (cfg, type))
3533 // FIXME: This doesn't work yet (class libs tests fail?)
3534 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3537 * Returns NULL and set the cfg exception on error.
3540 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3542 MonoBasicBlock *is_null_bb;
3543 int obj_reg = src->dreg;
3544 int vtable_reg = alloc_preg (cfg);
3545 MonoInst *klass_inst = NULL;
3550 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
3551 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
3552 MonoInst *cache_ins;
3554 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3559 /* klass - it's the second element of the cache entry*/
3560 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3563 args [2] = cache_ins;
3565 return mono_emit_method_call (cfg, mono_castclass, args, NULL);
3568 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3570 if (is_complex_isinst (klass)) {
3571 /* Complex case, handle by an icall */
3577 args [1] = klass_inst;
3579 return mono_emit_jit_icall (cfg, mono_object_castclass, args);
3581 /* Simple case, handled by the code below */
3585 NEW_BBLOCK (cfg, is_null_bb);
3587 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3588 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3590 save_cast_details (cfg, klass, obj_reg);
3592 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3593 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3594 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3596 int klass_reg = alloc_preg (cfg);
3598 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3600 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3601 /* the remoting code is broken, access the class for now */
3602 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3603 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3605 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3606 cfg->exception_ptr = klass;
3609 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3611 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3612 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3614 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3616 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3617 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3621 MONO_START_BB (cfg, is_null_bb);
3623 reset_cast_details (cfg);
3629 * Returns NULL and set the cfg exception on error.
3632 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3635 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3636 int obj_reg = src->dreg;
3637 int vtable_reg = alloc_preg (cfg);
3638 int res_reg = alloc_ireg_ref (cfg);
3639 MonoInst *klass_inst = NULL;
3644 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
3645 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
3646 MonoInst *cache_ins;
3648 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3653 /* klass - it's the second element of the cache entry*/
3654 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3657 args [2] = cache_ins;
3659 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
3662 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3664 if (is_complex_isinst (klass)) {
3665 /* Complex case, handle by an icall */
3671 args [1] = klass_inst;
3673 return mono_emit_jit_icall (cfg, mono_object_isinst, args);
3675 /* Simple case, the code below can handle it */
3679 NEW_BBLOCK (cfg, is_null_bb);
3680 NEW_BBLOCK (cfg, false_bb);
3681 NEW_BBLOCK (cfg, end_bb);
3683 /* Do the assignment at the beginning, so the other assignment can be if converted */
3684 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3685 ins->type = STACK_OBJ;
3688 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3689 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3691 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3693 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3694 g_assert (!context_used);
3695 /* the is_null_bb target simply copies the input register to the output */
3696 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3698 int klass_reg = alloc_preg (cfg);
3701 int rank_reg = alloc_preg (cfg);
3702 int eclass_reg = alloc_preg (cfg);
3704 g_assert (!context_used);
3705 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3706 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3707 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3708 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3709 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3710 if (klass->cast_class == mono_defaults.object_class) {
3711 int parent_reg = alloc_preg (cfg);
3712 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3713 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3714 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3715 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3716 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3717 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3718 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3719 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3720 } else if (klass->cast_class == mono_defaults.enum_class) {
3721 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3722 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3723 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3724 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3726 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3727 /* Check that the object is a vector too */
3728 int bounds_reg = alloc_preg (cfg);
3729 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3730 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3731 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3734 /* the is_null_bb target simply copies the input register to the output */
3735 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3737 } else if (mono_class_is_nullable (klass)) {
3738 g_assert (!context_used);
3739 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3740 /* the is_null_bb target simply copies the input register to the output */
3741 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3743 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3744 g_assert (!context_used);
3745 /* the remoting code is broken, access the class for now */
3746 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3747 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3749 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3750 cfg->exception_ptr = klass;
3753 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3755 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3756 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3758 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3759 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3761 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3762 /* the is_null_bb target simply copies the input register to the output */
3763 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
3768 MONO_START_BB (cfg, false_bb);
3770 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3771 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3773 MONO_START_BB (cfg, is_null_bb);
3775 MONO_START_BB (cfg, end_bb);
3781 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3783 /* This opcode takes as input an object reference and a class, and returns:
3784 0) if the object is an instance of the class,
3785 1) if the object is not instance of the class,
3786 2) if the object is a proxy whose type cannot be determined */
3789 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3790 int obj_reg = src->dreg;
3791 int dreg = alloc_ireg (cfg);
3793 int klass_reg = alloc_preg (cfg);
3795 NEW_BBLOCK (cfg, true_bb);
3796 NEW_BBLOCK (cfg, false_bb);
3797 NEW_BBLOCK (cfg, false2_bb);
3798 NEW_BBLOCK (cfg, end_bb);
3799 NEW_BBLOCK (cfg, no_proxy_bb);
3801 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3802 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3804 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3805 NEW_BBLOCK (cfg, interface_fail_bb);
3807 tmp_reg = alloc_preg (cfg);
3808 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3809 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3810 MONO_START_BB (cfg, interface_fail_bb);
3811 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3813 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3815 tmp_reg = alloc_preg (cfg);
3816 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3817 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3818 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3820 tmp_reg = alloc_preg (cfg);
3821 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3822 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3824 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3825 tmp_reg = alloc_preg (cfg);
3826 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3827 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3829 tmp_reg = alloc_preg (cfg);
3830 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3831 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3832 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3834 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3835 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3837 MONO_START_BB (cfg, no_proxy_bb);
3839 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3842 MONO_START_BB (cfg, false_bb);
3844 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3845 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3847 MONO_START_BB (cfg, false2_bb);
3849 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3850 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3852 MONO_START_BB (cfg, true_bb);
3854 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3856 MONO_START_BB (cfg, end_bb);
3859 MONO_INST_NEW (cfg, ins, OP_ICONST);
3861 ins->type = STACK_I4;
3867 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3869 /* This opcode takes as input an object reference and a class, and returns:
3870 0) if the object is an instance of the class,
3871 1) if the object is a proxy whose type cannot be determined
3872 an InvalidCastException exception is thrown otherwhise*/
3875 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3876 int obj_reg = src->dreg;
3877 int dreg = alloc_ireg (cfg);
3878 int tmp_reg = alloc_preg (cfg);
3879 int klass_reg = alloc_preg (cfg);
3881 NEW_BBLOCK (cfg, end_bb);
3882 NEW_BBLOCK (cfg, ok_result_bb);
3884 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3885 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3887 save_cast_details (cfg, klass, obj_reg);
3889 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3890 NEW_BBLOCK (cfg, interface_fail_bb);
3892 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3893 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3894 MONO_START_BB (cfg, interface_fail_bb);
3895 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3897 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3899 tmp_reg = alloc_preg (cfg);
3900 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3901 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3902 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3904 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3905 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3908 NEW_BBLOCK (cfg, no_proxy_bb);
3910 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3911 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3912 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3914 tmp_reg = alloc_preg (cfg);
3915 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3916 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3918 tmp_reg = alloc_preg (cfg);
3919 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3920 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3921 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3923 NEW_BBLOCK (cfg, fail_1_bb);
3925 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3927 MONO_START_BB (cfg, fail_1_bb);
3929 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3930 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3932 MONO_START_BB (cfg, no_proxy_bb);
3934 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3937 MONO_START_BB (cfg, ok_result_bb);
3939 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3941 MONO_START_BB (cfg, end_bb);
3944 MONO_INST_NEW (cfg, ins, OP_ICONST);
3946 ins->type = STACK_I4;
3952 * Returns NULL and set the cfg exception on error.
3954 static G_GNUC_UNUSED MonoInst*
3955 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3959 gpointer *trampoline;
3960 MonoInst *obj, *method_ins, *tramp_ins;
3964 obj = handle_alloc (cfg, klass, FALSE, 0);
3968 /* Inline the contents of mono_delegate_ctor */
3970 /* Set target field */
3971 /* Optimize away setting of NULL target */
3972 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
3973 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3974 if (cfg->gen_write_barriers) {
3975 dreg = alloc_preg (cfg);
3976 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target));
3977 emit_write_barrier (cfg, ptr, target, 0);
3981 /* Set method field */
3982 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3983 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3984 if (cfg->gen_write_barriers) {
3985 dreg = alloc_preg (cfg);
3986 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method));
3987 emit_write_barrier (cfg, ptr, method_ins, 0);
3990 * To avoid looking up the compiled code belonging to the target method
3991 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3992 * store it, and we fill it after the method has been compiled.
3994 if (!cfg->compile_aot && !method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
3995 MonoInst *code_slot_ins;
3998 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4000 domain = mono_domain_get ();
4001 mono_domain_lock (domain);
4002 if (!domain_jit_info (domain)->method_code_hash)
4003 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4004 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4006 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
4007 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4009 mono_domain_unlock (domain);
4011 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
4013 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4016 /* Set invoke_impl field */
4017 if (cfg->compile_aot) {
4018 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
4020 trampoline = mono_create_delegate_trampoline (cfg->domain, klass);
4021 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4023 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4025 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4031 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4033 MonoJitICallInfo *info;
4035 /* Need to register the icall so it gets an icall wrapper */
4036 info = mono_get_array_new_va_icall (rank);
4038 cfg->flags |= MONO_CFG_HAS_VARARGS;
4040 /* mono_array_new_va () needs a vararg calling convention */
4041 cfg->disable_llvm = TRUE;
4043 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4044 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4048 mono_emit_load_got_addr (MonoCompile *cfg)
4050 MonoInst *getaddr, *dummy_use;
4052 if (!cfg->got_var || cfg->got_var_allocated)
4055 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
4056 getaddr->cil_code = cfg->header->code;
4057 getaddr->dreg = cfg->got_var->dreg;
4059 /* Add it to the start of the first bblock */
4060 if (cfg->bb_entry->code) {
4061 getaddr->next = cfg->bb_entry->code;
4062 cfg->bb_entry->code = getaddr;
4065 MONO_ADD_INS (cfg->bb_entry, getaddr);
4067 cfg->got_var_allocated = TRUE;
4070 * Add a dummy use to keep the got_var alive, since real uses might
4071 * only be generated by the back ends.
4072 * Add it to end_bblock, so the variable's lifetime covers the whole
4074 * It would be better to make the usage of the got var explicit in all
4075 * cases when the backend needs it (i.e. calls, throw etc.), so this
4076 * wouldn't be needed.
4078 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
4079 MONO_ADD_INS (cfg->bb_exit, dummy_use);
4082 static int inline_limit;
4083 static gboolean inline_limit_inited;
4086 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
4088 MonoMethodHeaderSummary header;
4090 #ifdef MONO_ARCH_SOFT_FLOAT
4091 MonoMethodSignature *sig = mono_method_signature (method);
4095 if (cfg->generic_sharing_context)
4098 if (cfg->inline_depth > 10)
4101 #ifdef MONO_ARCH_HAVE_LMF_OPS
4102 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
4103 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
4104 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
4109 if (!mono_method_get_header_summary (method, &header))
4112 /*runtime, icall and pinvoke are checked by summary call*/
4113 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
4114 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
4115 (method->klass->marshalbyref) ||
4119 /* also consider num_locals? */
4120 /* Do the size check early to avoid creating vtables */
4121 if (!inline_limit_inited) {
4122 if (getenv ("MONO_INLINELIMIT"))
4123 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
4125 inline_limit = INLINE_LENGTH_LIMIT;
4126 inline_limit_inited = TRUE;
4128 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
4132 * if we can initialize the class of the method right away, we do,
4133 * otherwise we don't allow inlining if the class needs initialization,
4134 * since it would mean inserting a call to mono_runtime_class_init()
4135 * inside the inlined code
4137 if (!(cfg->opt & MONO_OPT_SHARED)) {
4138 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
4139 if (cfg->run_cctors && method->klass->has_cctor) {
4140 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
4141 if (!method->klass->runtime_info)
4142 /* No vtable created yet */
4144 vtable = mono_class_vtable (cfg->domain, method->klass);
4147 /* This makes so that inline cannot trigger */
4148 /* .cctors: too many apps depend on them */
4149 /* running with a specific order... */
4150 if (! vtable->initialized)
4152 mono_runtime_class_init (vtable);
4154 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
4155 if (!method->klass->runtime_info)
4156 /* No vtable created yet */
4158 vtable = mono_class_vtable (cfg->domain, method->klass);
4161 if (!vtable->initialized)
4166 * If we're compiling for shared code
4167 * the cctor will need to be run at aot method load time, for example,
4168 * or at the end of the compilation of the inlining method.
4170 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
4175 * CAS - do not inline methods with declarative security
4176 * Note: this has to be before any possible return TRUE;
4178 if (mono_method_has_declsec (method))
4181 #ifdef MONO_ARCH_SOFT_FLOAT
4183 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4185 for (i = 0; i < sig->param_count; ++i)
4186 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4194 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
4196 if (vtable->initialized && !cfg->compile_aot)
4199 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
4202 if (!mono_class_needs_cctor_run (vtable->klass, method))
4205 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
4206 /* The initialization is already done before the method is called */
4213 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4217 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4220 if (mini_is_gsharedvt_klass (cfg, klass)) {
4223 mono_class_init (klass);
4224 size = mono_class_array_element_size (klass);
4227 mult_reg = alloc_preg (cfg);
4228 array_reg = arr->dreg;
4229 index_reg = index->dreg;
4231 #if SIZEOF_REGISTER == 8
4232 /* The array reg is 64 bits but the index reg is only 32 */
4233 if (COMPILE_LLVM (cfg)) {
4235 index2_reg = index_reg;
4237 index2_reg = alloc_preg (cfg);
4238 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4241 if (index->type == STACK_I8) {
4242 index2_reg = alloc_preg (cfg);
4243 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4245 index2_reg = index_reg;
4250 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4252 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4253 if (size == 1 || size == 2 || size == 4 || size == 8) {
4254 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4256 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
4257 ins->klass = mono_class_get_element_class (klass);
4258 ins->type = STACK_MP;
4264 add_reg = alloc_ireg_mp (cfg);
4267 MonoInst *rgctx_ins;
4270 g_assert (cfg->generic_sharing_context);
4271 context_used = mini_class_check_context_used (cfg, klass);
4272 g_assert (context_used);
4273 rgctx_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
4274 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
4276 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4278 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4279 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4280 ins->klass = mono_class_get_element_class (klass);
4281 ins->type = STACK_MP;
4282 MONO_ADD_INS (cfg->cbb, ins);
4287 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4289 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4291 int bounds_reg = alloc_preg (cfg);
4292 int add_reg = alloc_ireg_mp (cfg);
4293 int mult_reg = alloc_preg (cfg);
4294 int mult2_reg = alloc_preg (cfg);
4295 int low1_reg = alloc_preg (cfg);
4296 int low2_reg = alloc_preg (cfg);
4297 int high1_reg = alloc_preg (cfg);
4298 int high2_reg = alloc_preg (cfg);
4299 int realidx1_reg = alloc_preg (cfg);
4300 int realidx2_reg = alloc_preg (cfg);
4301 int sum_reg = alloc_preg (cfg);
4302 int index1, index2, tmpreg;
4306 mono_class_init (klass);
4307 size = mono_class_array_element_size (klass);
4309 index1 = index_ins1->dreg;
4310 index2 = index_ins2->dreg;
4312 #if SIZEOF_REGISTER == 8
4313 /* The array reg is 64 bits but the index reg is only 32 */
4314 if (COMPILE_LLVM (cfg)) {
4317 tmpreg = alloc_preg (cfg);
4318 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
4320 tmpreg = alloc_preg (cfg);
4321 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
4325 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
4329 /* range checking */
4330 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4331 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4333 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4334 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4335 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4336 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4337 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4338 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4339 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4341 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4342 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4343 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4344 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4345 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
4346 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4347 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4349 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4350 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4351 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4352 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4353 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4355 ins->type = STACK_MP;
4357 MONO_ADD_INS (cfg->cbb, ins);
4364 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4368 MonoMethod *addr_method;
4371 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4374 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
4376 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4377 /* emit_ldelema_2 depends on OP_LMUL */
4378 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
4379 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
4383 element_size = mono_class_array_element_size (cmethod->klass->element_class);
4384 addr_method = mono_marshal_get_array_address (rank, element_size);
4385 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4390 static MonoBreakPolicy
4391 always_insert_breakpoint (MonoMethod *method)
4393 return MONO_BREAK_POLICY_ALWAYS;
4396 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4399 * mono_set_break_policy:
4400 * policy_callback: the new callback function
4402 * Allow embedders to decide wherther to actually obey breakpoint instructions
4403 * (both break IL instructions and Debugger.Break () method calls), for example
4404 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4405 * untrusted or semi-trusted code.
4407 * @policy_callback will be called every time a break point instruction needs to
4408 * be inserted with the method argument being the method that calls Debugger.Break()
4409 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4410 * if it wants the breakpoint to not be effective in the given method.
4411 * #MONO_BREAK_POLICY_ALWAYS is the default.
4414 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4416 if (policy_callback)
4417 break_policy_func = policy_callback;
4419 break_policy_func = always_insert_breakpoint;
4423 should_insert_brekpoint (MonoMethod *method) {
4424 switch (break_policy_func (method)) {
4425 case MONO_BREAK_POLICY_ALWAYS:
4427 case MONO_BREAK_POLICY_NEVER:
4429 case MONO_BREAK_POLICY_ON_DBG:
4430 return mono_debug_using_mono_debugger ();
4432 g_warning ("Incorrect value returned from break policy callback");
4437 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4439 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4441 MonoInst *addr, *store, *load;
4442 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4444 /* the bounds check is already done by the callers */
4445 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4447 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4448 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4449 if (mini_type_is_reference (cfg, fsig->params [2]))
4450 emit_write_barrier (cfg, addr, load, -1);
4452 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4453 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4460 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4462 return mini_type_is_reference (cfg, &klass->byval_arg);
4466 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
4468 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
4469 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
4470 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
4471 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
4472 MonoInst *iargs [3];
4475 mono_class_setup_vtable (obj_array);
4476 g_assert (helper->slot);
4478 if (sp [0]->type != STACK_OBJ)
4480 if (sp [2]->type != STACK_OBJ)
4487 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
4491 if (mini_is_gsharedvt_klass (cfg, klass)) {
4494 // FIXME-VT: OP_ICONST optimization
4495 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
4496 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
4497 ins->opcode = OP_STOREV_MEMBASE;
4498 } else if (sp [1]->opcode == OP_ICONST) {
4499 int array_reg = sp [0]->dreg;
4500 int index_reg = sp [1]->dreg;
4501 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
4504 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
4505 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
4507 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
4508 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
4509 if (generic_class_is_reference_type (cfg, klass))
4510 emit_write_barrier (cfg, addr, sp [2], -1);
4517 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4522 eklass = mono_class_from_mono_type (fsig->params [2]);
4524 eklass = mono_class_from_mono_type (fsig->ret);
4528 return emit_array_store (cfg, eklass, args, FALSE);
4530 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4531 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
4537 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4539 MonoInst *ins = NULL;
4540 #ifdef MONO_ARCH_SIMD_INTRINSICS
4541 if (cfg->opt & MONO_OPT_SIMD) {
4542 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4552 emit_memory_barrier (MonoCompile *cfg, int kind)
4554 MonoInst *ins = NULL;
4555 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4556 MONO_ADD_INS (cfg->cbb, ins);
4557 ins->backend.memory_barrier_kind = kind;
4563 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4565 MonoInst *ins = NULL;
4568 /* The LLVM backend supports these intrinsics */
4569 if (cmethod->klass == mono_defaults.math_class) {
4570 if (strcmp (cmethod->name, "Sin") == 0) {
4572 } else if (strcmp (cmethod->name, "Cos") == 0) {
4574 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
4576 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
4581 MONO_INST_NEW (cfg, ins, opcode);
4582 ins->type = STACK_R8;
4583 ins->dreg = mono_alloc_freg (cfg);
4584 ins->sreg1 = args [0]->dreg;
4585 MONO_ADD_INS (cfg->cbb, ins);
4589 if (cfg->opt & MONO_OPT_CMOV) {
4590 if (strcmp (cmethod->name, "Min") == 0) {
4591 if (fsig->params [0]->type == MONO_TYPE_I4)
4593 if (fsig->params [0]->type == MONO_TYPE_U4)
4594 opcode = OP_IMIN_UN;
4595 else if (fsig->params [0]->type == MONO_TYPE_I8)
4597 else if (fsig->params [0]->type == MONO_TYPE_U8)
4598 opcode = OP_LMIN_UN;
4599 } else if (strcmp (cmethod->name, "Max") == 0) {
4600 if (fsig->params [0]->type == MONO_TYPE_I4)
4602 if (fsig->params [0]->type == MONO_TYPE_U4)
4603 opcode = OP_IMAX_UN;
4604 else if (fsig->params [0]->type == MONO_TYPE_I8)
4606 else if (fsig->params [0]->type == MONO_TYPE_U8)
4607 opcode = OP_LMAX_UN;
4612 MONO_INST_NEW (cfg, ins, opcode);
4613 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
4614 ins->dreg = mono_alloc_ireg (cfg);
4615 ins->sreg1 = args [0]->dreg;
4616 ins->sreg2 = args [1]->dreg;
4617 MONO_ADD_INS (cfg->cbb, ins);
4625 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4627 if (cmethod->klass == mono_defaults.array_class) {
4628 if (strcmp (cmethod->name, "UnsafeStore") == 0)
4629 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
4630 if (strcmp (cmethod->name, "UnsafeLoad") == 0)
4631 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
4638 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4640 MonoInst *ins = NULL;
4642 static MonoClass *runtime_helpers_class = NULL;
4643 if (! runtime_helpers_class)
4644 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
4645 "System.Runtime.CompilerServices", "RuntimeHelpers");
4647 if (cmethod->klass == mono_defaults.string_class) {
4648 if (strcmp (cmethod->name, "get_Chars") == 0) {
4649 int dreg = alloc_ireg (cfg);
4650 int index_reg = alloc_preg (cfg);
4651 int mult_reg = alloc_preg (cfg);
4652 int add_reg = alloc_preg (cfg);
4654 #if SIZEOF_REGISTER == 8
4655 /* The array reg is 64 bits but the index reg is only 32 */
4656 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
4658 index_reg = args [1]->dreg;
4660 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
4662 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4663 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
4664 add_reg = ins->dreg;
4665 /* Avoid a warning */
4667 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4670 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
4671 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4672 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4673 add_reg, G_STRUCT_OFFSET (MonoString, chars));
4675 type_from_op (ins, NULL, NULL);
4677 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4678 int dreg = alloc_ireg (cfg);
4679 /* Decompose later to allow more optimizations */
4680 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
4681 ins->type = STACK_I4;
4682 ins->flags |= MONO_INST_FAULT;
4683 cfg->cbb->has_array_access = TRUE;
4684 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
4687 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
4688 int mult_reg = alloc_preg (cfg);
4689 int add_reg = alloc_preg (cfg);
4691 /* The corlib functions check for oob already. */
4692 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
4693 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4694 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
4695 return cfg->cbb->last_ins;
4698 } else if (cmethod->klass == mono_defaults.object_class) {
4700 if (strcmp (cmethod->name, "GetType") == 0) {
4701 int dreg = alloc_ireg_ref (cfg);
4702 int vt_reg = alloc_preg (cfg);
4703 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4704 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
4705 type_from_op (ins, NULL, NULL);
4708 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
4709 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
4710 int dreg = alloc_ireg (cfg);
4711 int t1 = alloc_ireg (cfg);
4713 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
4714 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
4715 ins->type = STACK_I4;
4719 } else if (strcmp (cmethod->name, ".ctor") == 0) {
4720 MONO_INST_NEW (cfg, ins, OP_NOP);
4721 MONO_ADD_INS (cfg->cbb, ins);
4725 } else if (cmethod->klass == mono_defaults.array_class) {
4726 if (strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
4727 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
4729 #ifndef MONO_BIG_ARRAYS
4731 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
4734 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
4735 int dreg = alloc_ireg (cfg);
4736 int bounds_reg = alloc_ireg_mp (cfg);
4737 MonoBasicBlock *end_bb, *szarray_bb;
4738 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
4740 NEW_BBLOCK (cfg, end_bb);
4741 NEW_BBLOCK (cfg, szarray_bb);
4743 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
4744 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4745 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4746 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
4747 /* Non-szarray case */
4749 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4750 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4752 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4753 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4754 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4755 MONO_START_BB (cfg, szarray_bb);
4758 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4759 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4761 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4762 MONO_START_BB (cfg, end_bb);
4764 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
4765 ins->type = STACK_I4;
4771 if (cmethod->name [0] != 'g')
4774 if (strcmp (cmethod->name, "get_Rank") == 0) {
4775 int dreg = alloc_ireg (cfg);
4776 int vtable_reg = alloc_preg (cfg);
4777 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
4778 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4779 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
4780 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4781 type_from_op (ins, NULL, NULL);
4784 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4785 int dreg = alloc_ireg (cfg);
4787 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4788 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4789 type_from_op (ins, NULL, NULL);
4794 } else if (cmethod->klass == runtime_helpers_class) {
4796 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
4797 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
4801 } else if (cmethod->klass == mono_defaults.thread_class) {
4802 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
4803 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
4804 MONO_ADD_INS (cfg->cbb, ins);
4806 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
4807 return emit_memory_barrier (cfg, FullBarrier);
4809 } else if (cmethod->klass == mono_defaults.monitor_class) {
4811 /* FIXME this should be integrated to the check below once we support the trampoline version */
4812 #if defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4813 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) {
4814 MonoMethod *fast_method = NULL;
4816 /* Avoid infinite recursion */
4817 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN && !strcmp (cfg->method->name, "FastMonitorEnterV4"))
4820 fast_method = mono_monitor_get_fast_path (cmethod);
4824 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4828 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4829 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
4832 if (COMPILE_LLVM (cfg)) {
4834 * Pass the argument normally, the LLVM backend will handle the
4835 * calling convention problems.
4837 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4839 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
4840 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4841 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4842 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4845 return (MonoInst*)call;
4846 } else if (strcmp (cmethod->name, "Exit") == 0) {
4849 if (COMPILE_LLVM (cfg)) {
4850 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4852 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
4853 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4854 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4855 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4858 return (MonoInst*)call;
4860 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4862 MonoMethod *fast_method = NULL;
4864 /* Avoid infinite recursion */
4865 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
4866 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
4867 strcmp (cfg->method->name, "FastMonitorExit") == 0))
4870 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) ||
4871 strcmp (cmethod->name, "Exit") == 0)
4872 fast_method = mono_monitor_get_fast_path (cmethod);
4876 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4879 } else if (cmethod->klass->image == mono_defaults.corlib &&
4880 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4881 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4884 #if SIZEOF_REGISTER == 8
4885 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
4886 /* 64 bit reads are already atomic */
4887 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
4888 ins->dreg = mono_alloc_preg (cfg);
4889 ins->inst_basereg = args [0]->dreg;
4890 ins->inst_offset = 0;
4891 MONO_ADD_INS (cfg->cbb, ins);
4895 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4896 if (strcmp (cmethod->name, "Increment") == 0) {
4897 MonoInst *ins_iconst;
4900 if (fsig->params [0]->type == MONO_TYPE_I4)
4901 opcode = OP_ATOMIC_ADD_NEW_I4;
4902 #if SIZEOF_REGISTER == 8
4903 else if (fsig->params [0]->type == MONO_TYPE_I8)
4904 opcode = OP_ATOMIC_ADD_NEW_I8;
4907 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4908 ins_iconst->inst_c0 = 1;
4909 ins_iconst->dreg = mono_alloc_ireg (cfg);
4910 MONO_ADD_INS (cfg->cbb, ins_iconst);
4912 MONO_INST_NEW (cfg, ins, opcode);
4913 ins->dreg = mono_alloc_ireg (cfg);
4914 ins->inst_basereg = args [0]->dreg;
4915 ins->inst_offset = 0;
4916 ins->sreg2 = ins_iconst->dreg;
4917 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4918 MONO_ADD_INS (cfg->cbb, ins);
4920 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4921 MonoInst *ins_iconst;
4924 if (fsig->params [0]->type == MONO_TYPE_I4)
4925 opcode = OP_ATOMIC_ADD_NEW_I4;
4926 #if SIZEOF_REGISTER == 8
4927 else if (fsig->params [0]->type == MONO_TYPE_I8)
4928 opcode = OP_ATOMIC_ADD_NEW_I8;
4931 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4932 ins_iconst->inst_c0 = -1;
4933 ins_iconst->dreg = mono_alloc_ireg (cfg);
4934 MONO_ADD_INS (cfg->cbb, ins_iconst);
4936 MONO_INST_NEW (cfg, ins, opcode);
4937 ins->dreg = mono_alloc_ireg (cfg);
4938 ins->inst_basereg = args [0]->dreg;
4939 ins->inst_offset = 0;
4940 ins->sreg2 = ins_iconst->dreg;
4941 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4942 MONO_ADD_INS (cfg->cbb, ins);
4944 } else if (strcmp (cmethod->name, "Add") == 0) {
4947 if (fsig->params [0]->type == MONO_TYPE_I4)
4948 opcode = OP_ATOMIC_ADD_NEW_I4;
4949 #if SIZEOF_REGISTER == 8
4950 else if (fsig->params [0]->type == MONO_TYPE_I8)
4951 opcode = OP_ATOMIC_ADD_NEW_I8;
4955 MONO_INST_NEW (cfg, ins, opcode);
4956 ins->dreg = mono_alloc_ireg (cfg);
4957 ins->inst_basereg = args [0]->dreg;
4958 ins->inst_offset = 0;
4959 ins->sreg2 = args [1]->dreg;
4960 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4961 MONO_ADD_INS (cfg->cbb, ins);
4964 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4966 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4967 if (strcmp (cmethod->name, "Exchange") == 0) {
4969 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4971 if (fsig->params [0]->type == MONO_TYPE_I4)
4972 opcode = OP_ATOMIC_EXCHANGE_I4;
4973 #if SIZEOF_REGISTER == 8
4974 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4975 (fsig->params [0]->type == MONO_TYPE_I))
4976 opcode = OP_ATOMIC_EXCHANGE_I8;
4978 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4979 opcode = OP_ATOMIC_EXCHANGE_I4;
4984 MONO_INST_NEW (cfg, ins, opcode);
4985 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
4986 ins->inst_basereg = args [0]->dreg;
4987 ins->inst_offset = 0;
4988 ins->sreg2 = args [1]->dreg;
4989 MONO_ADD_INS (cfg->cbb, ins);
4991 switch (fsig->params [0]->type) {
4993 ins->type = STACK_I4;
4997 ins->type = STACK_I8;
4999 case MONO_TYPE_OBJECT:
5000 ins->type = STACK_OBJ;
5003 g_assert_not_reached ();
5006 if (cfg->gen_write_barriers && is_ref)
5007 emit_write_barrier (cfg, args [0], args [1], -1);
5009 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
5011 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
5012 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
5014 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
5015 if (fsig->params [1]->type == MONO_TYPE_I4)
5017 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
5018 size = sizeof (gpointer);
5019 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
5022 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
5023 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5024 ins->sreg1 = args [0]->dreg;
5025 ins->sreg2 = args [1]->dreg;
5026 ins->sreg3 = args [2]->dreg;
5027 ins->type = STACK_I4;
5028 MONO_ADD_INS (cfg->cbb, ins);
5029 } else if (size == 8) {
5030 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
5031 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5032 ins->sreg1 = args [0]->dreg;
5033 ins->sreg2 = args [1]->dreg;
5034 ins->sreg3 = args [2]->dreg;
5035 ins->type = STACK_I8;
5036 MONO_ADD_INS (cfg->cbb, ins);
5038 /* g_assert_not_reached (); */
5040 if (cfg->gen_write_barriers && is_ref)
5041 emit_write_barrier (cfg, args [0], args [1], -1);
5043 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
5047 } else if (cmethod->klass->image == mono_defaults.corlib) {
5048 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
5049 && strcmp (cmethod->klass->name, "Debugger") == 0) {
5050 if (should_insert_brekpoint (cfg->method)) {
5051 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
5053 MONO_INST_NEW (cfg, ins, OP_NOP);
5054 MONO_ADD_INS (cfg->cbb, ins);
5058 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
5059 && strcmp (cmethod->klass->name, "Environment") == 0) {
5061 EMIT_NEW_ICONST (cfg, ins, 1);
5063 EMIT_NEW_ICONST (cfg, ins, 0);
5067 } else if (cmethod->klass == mono_defaults.math_class) {
5069 * There is general branches code for Min/Max, but it does not work for
5071 * http://everything2.com/?node_id=1051618
5075 #ifdef MONO_ARCH_SIMD_INTRINSICS
5076 if (cfg->opt & MONO_OPT_SIMD) {
5077 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5083 if (COMPILE_LLVM (cfg)) {
5084 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
5089 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
5093 * This entry point could be used later for arbitrary method
5096 inline static MonoInst*
5097 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
5098 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
5100 if (method->klass == mono_defaults.string_class) {
5101 /* managed string allocation support */
5102 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
5103 MonoInst *iargs [2];
5104 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
5105 MonoMethod *managed_alloc = NULL;
5107 g_assert (vtable); /*Should not fail since it System.String*/
5108 #ifndef MONO_CROSS_COMPILE
5109 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
5113 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
5114 iargs [1] = args [0];
5115 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
5122 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
5124 MonoInst *store, *temp;
5127 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
5128 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
5131 * FIXME: We should use *args++ = sp [0], but that would mean the arg
5132 * would be different than the MonoInst's used to represent arguments, and
5133 * the ldelema implementation can't deal with that.
5134 * Solution: When ldelema is used on an inline argument, create a var for
5135 * it, emit ldelema on that var, and emit the saving code below in
5136 * inline_method () if needed.
5138 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
5139 cfg->args [i] = temp;
5140 /* This uses cfg->args [i] which is set by the preceeding line */
5141 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
5142 store->cil_code = sp [0]->cil_code;
5147 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
5148 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
5150 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5152 check_inline_called_method_name_limit (MonoMethod *called_method)
5155 static char *limit = NULL;
5157 if (limit == NULL) {
5158 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
5160 if (limit_string != NULL)
5161 limit = limit_string;
5163 limit = (char *) "";
5166 if (limit [0] != '\0') {
5167 char *called_method_name = mono_method_full_name (called_method, TRUE);
5169 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
5170 g_free (called_method_name);
5172 //return (strncmp_result <= 0);
5173 return (strncmp_result == 0);
5180 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5182 check_inline_caller_method_name_limit (MonoMethod *caller_method)
5185 static char *limit = NULL;
5187 if (limit == NULL) {
5188 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
5189 if (limit_string != NULL) {
5190 limit = limit_string;
5192 limit = (char *) "";
5196 if (limit [0] != '\0') {
5197 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
5199 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
5200 g_free (caller_method_name);
5202 //return (strncmp_result <= 0);
5203 return (strncmp_result == 0);
5211 emit_init_rvar (MonoCompile *cfg, MonoInst *rvar, MonoType *rtype)
5213 static double r8_0 = 0.0;
5216 switch (rvar->type) {
5218 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
5221 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
5226 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
5229 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5230 ins->type = STACK_R8;
5231 ins->inst_p0 = (void*)&r8_0;
5232 ins->dreg = rvar->dreg;
5233 MONO_ADD_INS (cfg->cbb, ins);
5236 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (rtype));
5239 g_assert_not_reached ();
5244 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
5245 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_always)
5247 MonoInst *ins, *rvar = NULL;
5248 MonoMethodHeader *cheader;
5249 MonoBasicBlock *ebblock, *sbblock;
5251 MonoMethod *prev_inlined_method;
5252 MonoInst **prev_locals, **prev_args;
5253 MonoType **prev_arg_types;
5254 guint prev_real_offset;
5255 GHashTable *prev_cbb_hash;
5256 MonoBasicBlock **prev_cil_offset_to_bb;
5257 MonoBasicBlock *prev_cbb;
5258 unsigned char* prev_cil_start;
5259 guint32 prev_cil_offset_to_bb_len;
5260 MonoMethod *prev_current_method;
5261 MonoGenericContext *prev_generic_context;
5262 gboolean ret_var_set, prev_ret_var_set, virtual = FALSE;
5264 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
5266 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5267 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
5270 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5271 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
5275 if (cfg->verbose_level > 2)
5276 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
5278 if (!cmethod->inline_info) {
5279 cfg->stat_inlineable_methods++;
5280 cmethod->inline_info = 1;
5283 /* allocate local variables */
5284 cheader = mono_method_get_header (cmethod);
5286 if (cheader == NULL || mono_loader_get_last_error ()) {
5287 MonoLoaderError *error = mono_loader_get_last_error ();
5290 mono_metadata_free_mh (cheader);
5291 if (inline_always && error)
5292 mono_cfg_set_exception (cfg, error->exception_type);
5294 mono_loader_clear_error ();
5298 /*Must verify before creating locals as it can cause the JIT to assert.*/
5299 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
5300 mono_metadata_free_mh (cheader);
5304 /* allocate space to store the return value */
5305 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
5306 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
5309 prev_locals = cfg->locals;
5310 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
5311 for (i = 0; i < cheader->num_locals; ++i)
5312 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
5314 /* allocate start and end blocks */
5315 /* This is needed so if the inline is aborted, we can clean up */
5316 NEW_BBLOCK (cfg, sbblock);
5317 sbblock->real_offset = real_offset;
5319 NEW_BBLOCK (cfg, ebblock);
5320 ebblock->block_num = cfg->num_bblocks++;
5321 ebblock->real_offset = real_offset;
5323 prev_args = cfg->args;
5324 prev_arg_types = cfg->arg_types;
5325 prev_inlined_method = cfg->inlined_method;
5326 cfg->inlined_method = cmethod;
5327 cfg->ret_var_set = FALSE;
5328 cfg->inline_depth ++;
5329 prev_real_offset = cfg->real_offset;
5330 prev_cbb_hash = cfg->cbb_hash;
5331 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
5332 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
5333 prev_cil_start = cfg->cil_start;
5334 prev_cbb = cfg->cbb;
5335 prev_current_method = cfg->current_method;
5336 prev_generic_context = cfg->generic_context;
5337 prev_ret_var_set = cfg->ret_var_set;
5339 if (*ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
5342 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, virtual);
5344 ret_var_set = cfg->ret_var_set;
5346 cfg->inlined_method = prev_inlined_method;
5347 cfg->real_offset = prev_real_offset;
5348 cfg->cbb_hash = prev_cbb_hash;
5349 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
5350 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
5351 cfg->cil_start = prev_cil_start;
5352 cfg->locals = prev_locals;
5353 cfg->args = prev_args;
5354 cfg->arg_types = prev_arg_types;
5355 cfg->current_method = prev_current_method;
5356 cfg->generic_context = prev_generic_context;
5357 cfg->ret_var_set = prev_ret_var_set;
5358 cfg->inline_depth --;
5360 if ((costs >= 0 && costs < 60) || inline_always) {
5361 if (cfg->verbose_level > 2)
5362 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
5364 cfg->stat_inlined_methods++;
5366 /* always add some code to avoid block split failures */
5367 MONO_INST_NEW (cfg, ins, OP_NOP);
5368 MONO_ADD_INS (prev_cbb, ins);
5370 prev_cbb->next_bb = sbblock;
5371 link_bblock (cfg, prev_cbb, sbblock);
5374 * Get rid of the begin and end bblocks if possible to aid local
5377 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
5379 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
5380 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
5382 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
5383 MonoBasicBlock *prev = ebblock->in_bb [0];
5384 mono_merge_basic_blocks (cfg, prev, ebblock);
5386 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
5387 mono_merge_basic_blocks (cfg, prev_cbb, prev);
5388 cfg->cbb = prev_cbb;
5392 * Its possible that the rvar is set in some prev bblock, but not in others.
5398 for (i = 0; i < ebblock->in_count; ++i) {
5399 bb = ebblock->in_bb [i];
5401 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
5404 emit_init_rvar (cfg, rvar, fsig->ret);
5414 * If the inlined method contains only a throw, then the ret var is not
5415 * set, so set it to a dummy value.
5418 emit_init_rvar (cfg, rvar, fsig->ret);
5420 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
5423 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5426 if (cfg->verbose_level > 2)
5427 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
5428 cfg->exception_type = MONO_EXCEPTION_NONE;
5429 mono_loader_clear_error ();
5431 /* This gets rid of the newly added bblocks */
5432 cfg->cbb = prev_cbb;
5434 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5439 * Some of these comments may well be out-of-date.
5440 * Design decisions: we do a single pass over the IL code (and we do bblock
5441 * splitting/merging in the few cases when it's required: a back jump to an IL
5442 * address that was not already seen as bblock starting point).
5443 * Code is validated as we go (full verification is still better left to metadata/verify.c).
5444 * Complex operations are decomposed in simpler ones right away. We need to let the
5445 * arch-specific code peek and poke inside this process somehow (except when the
5446 * optimizations can take advantage of the full semantic info of coarse opcodes).
5447 * All the opcodes of the form opcode.s are 'normalized' to opcode.
5448 * MonoInst->opcode initially is the IL opcode or some simplification of that
5449 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
5450 * opcode with value bigger than OP_LAST.
5451 * At this point the IR can be handed over to an interpreter, a dumb code generator
5452 * or to the optimizing code generator that will translate it to SSA form.
5454 * Profiling directed optimizations.
5455 * We may compile by default with few or no optimizations and instrument the code
5456 * or the user may indicate what methods to optimize the most either in a config file
5457 * or through repeated runs where the compiler applies offline the optimizations to
5458 * each method and then decides if it was worth it.
5461 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
5462 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
5463 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
5464 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
5465 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
5466 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
5467 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
5468 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; LOAD_ERROR;}
5470 /* offset from br.s -> br like opcodes */
5471 #define BIG_BRANCH_OFFSET 13
5474 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
5476 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
5478 return b == NULL || b == bb;
5482 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
5484 unsigned char *ip = start;
5485 unsigned char *target;
5488 MonoBasicBlock *bblock;
5489 const MonoOpcode *opcode;
5492 cli_addr = ip - start;
5493 i = mono_opcode_value ((const guint8 **)&ip, end);
5496 opcode = &mono_opcodes [i];
5497 switch (opcode->argument) {
5498 case MonoInlineNone:
5501 case MonoInlineString:
5502 case MonoInlineType:
5503 case MonoInlineField:
5504 case MonoInlineMethod:
5507 case MonoShortInlineR:
5514 case MonoShortInlineVar:
5515 case MonoShortInlineI:
5518 case MonoShortInlineBrTarget:
5519 target = start + cli_addr + 2 + (signed char)ip [1];
5520 GET_BBLOCK (cfg, bblock, target);
5523 GET_BBLOCK (cfg, bblock, ip);
5525 case MonoInlineBrTarget:
5526 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
5527 GET_BBLOCK (cfg, bblock, target);
5530 GET_BBLOCK (cfg, bblock, ip);
5532 case MonoInlineSwitch: {
5533 guint32 n = read32 (ip + 1);
5536 cli_addr += 5 + 4 * n;
5537 target = start + cli_addr;
5538 GET_BBLOCK (cfg, bblock, target);
5540 for (j = 0; j < n; ++j) {
5541 target = start + cli_addr + (gint32)read32 (ip);
5542 GET_BBLOCK (cfg, bblock, target);
5552 g_assert_not_reached ();
5555 if (i == CEE_THROW) {
5556 unsigned char *bb_start = ip - 1;
5558 /* Find the start of the bblock containing the throw */
5560 while ((bb_start >= start) && !bblock) {
5561 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
5565 bblock->out_of_line = 1;
5575 static inline MonoMethod *
5576 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5580 if (m->wrapper_type != MONO_WRAPPER_NONE) {
5581 method = mono_method_get_wrapper_data (m, token);
5583 method = mono_class_inflate_generic_method (method, context);
5585 method = mono_get_method_full (m->klass->image, token, klass, context);
5591 static inline MonoMethod *
5592 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5594 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
5596 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
5602 static inline MonoClass*
5603 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
5607 if (method->wrapper_type != MONO_WRAPPER_NONE)
5608 klass = mono_method_get_wrapper_data (method, token);
5610 klass = mono_class_get_full (method->klass->image, token, context);
5612 mono_class_init (klass);
5616 static inline MonoMethodSignature*
5617 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
5619 MonoMethodSignature *fsig;
5621 if (method->wrapper_type != MONO_WRAPPER_NONE) {
5624 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
5626 fsig = mono_inflate_generic_signature (fsig, context, &error);
5628 g_assert (mono_error_ok (&error));
5631 fsig = mono_metadata_parse_signature (method->klass->image, token);
5637 * Returns TRUE if the JIT should abort inlining because "callee"
5638 * is influenced by security attributes.
5641 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
5645 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
5649 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
5650 if (result == MONO_JIT_SECURITY_OK)
5653 if (result == MONO_JIT_LINKDEMAND_ECMA) {
5654 /* Generate code to throw a SecurityException before the actual call/link */
5655 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5658 NEW_ICONST (cfg, args [0], 4);
5659 NEW_METHODCONST (cfg, args [1], caller);
5660 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
5661 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
5662 /* don't hide previous results */
5663 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
5664 cfg->exception_data = result;
5672 throw_exception (void)
5674 static MonoMethod *method = NULL;
5677 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5678 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
5685 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
5687 MonoMethod *thrower = throw_exception ();
5690 EMIT_NEW_PCONST (cfg, args [0], ex);
5691 mono_emit_method_call (cfg, thrower, args, NULL);
5695 * Return the original method is a wrapper is specified. We can only access
5696 * the custom attributes from the original method.
5699 get_original_method (MonoMethod *method)
5701 if (method->wrapper_type == MONO_WRAPPER_NONE)
5704 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
5705 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
5708 /* in other cases we need to find the original method */
5709 return mono_marshal_method_from_wrapper (method);
5713 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
5714 MonoBasicBlock *bblock, unsigned char *ip)
5716 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5717 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
5719 emit_throw_exception (cfg, ex);
5723 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
5724 MonoBasicBlock *bblock, unsigned char *ip)
5726 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5727 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
5729 emit_throw_exception (cfg, ex);
5733 * Check that the IL instructions at ip are the array initialization
5734 * sequence and return the pointer to the data and the size.
5737 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
5740 * newarr[System.Int32]
5742 * ldtoken field valuetype ...
5743 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
5745 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
5746 guint32 token = read32 (ip + 7);
5747 guint32 field_token = read32 (ip + 2);
5748 guint32 field_index = field_token & 0xffffff;
5750 const char *data_ptr;
5752 MonoMethod *cmethod;
5753 MonoClass *dummy_class;
5754 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
5760 *out_field_token = field_token;
5762 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
5765 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
5767 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
5768 case MONO_TYPE_BOOLEAN:
5772 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
5773 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
5774 case MONO_TYPE_CHAR:
5784 return NULL; /* stupid ARM FP swapped format */
5794 if (size > mono_type_size (field->type, &dummy_align))
5797 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
5798 if (!method->klass->image->dynamic) {
5799 field_index = read32 (ip + 2) & 0xffffff;
5800 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
5801 data_ptr = mono_image_rva_map (method->klass->image, rva);
5802 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
5803 /* for aot code we do the lookup on load */
5804 if (aot && data_ptr)
5805 return GUINT_TO_POINTER (rva);
5807 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
5809 data_ptr = mono_field_get_data (field);
5817 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
5819 char *method_fname = mono_method_full_name (method, TRUE);
5821 MonoMethodHeader *header = mono_method_get_header (method);
5823 if (header->code_size == 0)
5824 method_code = g_strdup ("method body is empty.");
5826 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
5827 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
5828 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
5829 g_free (method_fname);
5830 g_free (method_code);
5831 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
5835 set_exception_object (MonoCompile *cfg, MonoException *exception)
5837 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
5838 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
5839 cfg->exception_ptr = exception;
5843 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5846 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5847 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5848 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5849 /* Optimize reg-reg moves away */
5851 * Can't optimize other opcodes, since sp[0] might point to
5852 * the last ins of a decomposed opcode.
5854 sp [0]->dreg = (cfg)->locals [n]->dreg;
5856 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5861 * ldloca inhibits many optimizations so try to get rid of it in common
5864 static inline unsigned char *
5865 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5874 local = read16 (ip + 2);
5878 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5879 gboolean skip = FALSE;
5881 /* From the INITOBJ case */
5882 token = read32 (ip + 2);
5883 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5884 CHECK_TYPELOAD (klass);
5885 if (mini_type_is_reference (cfg, &klass->byval_arg)) {
5886 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5887 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5888 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5901 is_exception_class (MonoClass *class)
5904 if (class == mono_defaults.exception_class)
5906 class = class->parent;
5912 * is_jit_optimizer_disabled:
5914 * Determine whenever M's assembly has a DebuggableAttribute with the
5915 * IsJITOptimizerDisabled flag set.
5918 is_jit_optimizer_disabled (MonoMethod *m)
5920 MonoAssembly *ass = m->klass->image->assembly;
5921 MonoCustomAttrInfo* attrs;
5922 static MonoClass *klass;
5924 gboolean val = FALSE;
5927 if (ass->jit_optimizer_disabled_inited)
5928 return ass->jit_optimizer_disabled;
5931 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
5934 ass->jit_optimizer_disabled = FALSE;
5935 mono_memory_barrier ();
5936 ass->jit_optimizer_disabled_inited = TRUE;
5940 attrs = mono_custom_attrs_from_assembly (ass);
5942 for (i = 0; i < attrs->num_attrs; ++i) {
5943 MonoCustomAttrEntry *attr = &attrs->attrs [i];
5946 MonoMethodSignature *sig;
5948 if (!attr->ctor || attr->ctor->klass != klass)
5950 /* Decode the attribute. See reflection.c */
5951 len = attr->data_size;
5952 p = (const char*)attr->data;
5953 g_assert (read16 (p) == 0x0001);
5956 // FIXME: Support named parameters
5957 sig = mono_method_signature (attr->ctor);
5958 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
5960 /* Two boolean arguments */
5964 mono_custom_attrs_free (attrs);
5967 ass->jit_optimizer_disabled = val;
5968 mono_memory_barrier ();
5969 ass->jit_optimizer_disabled_inited = TRUE;
5975 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig)
5977 gboolean supported_tail_call;
5980 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
5981 supported_tail_call = MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
5983 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
5986 for (i = 0; i < fsig->param_count; ++i) {
5987 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
5988 /* These can point to the current method's stack */
5989 supported_tail_call = FALSE;
5991 if (fsig->hasthis && cmethod->klass->valuetype)
5992 /* this might point to the current method's stack */
5993 supported_tail_call = FALSE;
5994 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
5995 supported_tail_call = FALSE;
5996 if (cfg->method->save_lmf)
5997 supported_tail_call = FALSE;
5998 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
5999 supported_tail_call = FALSE;
6001 /* Debugging support */
6003 if (supported_tail_call) {
6004 if (!mono_debug_count ())
6005 supported_tail_call = FALSE;
6009 return supported_tail_call;
6012 /* the JIT intercepts ldflda instructions to the tlsdata field in ThreadLocal<T> and redirects
6013 * it to the thread local value based on the tls_offset field. Every other kind of access to
6014 * the field causes an assert.
6017 is_magic_tls_access (MonoClassField *field)
6019 if (strcmp (field->name, "tlsdata"))
6021 if (strcmp (field->parent->name, "ThreadLocal`1"))
6023 return field->parent->image == mono_defaults.corlib;
6026 /* emits the code needed to access a managed tls var (like ThreadStatic)
6027 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
6028 * pointer for the current thread.
6029 * Returns the MonoInst* representing the address of the tls var.
6032 emit_managed_static_data_access (MonoCompile *cfg, MonoInst *thread_ins, int offset_reg)
6035 int static_data_reg, array_reg, dreg;
6036 int offset2_reg, idx_reg;
6037 // inlined access to the tls data
6038 // idx = (offset >> 24) - 1;
6039 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
6040 static_data_reg = alloc_ireg (cfg);
6041 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
6042 idx_reg = alloc_ireg (cfg);
6043 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
6044 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
6045 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
6046 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
6047 array_reg = alloc_ireg (cfg);
6048 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
6049 offset2_reg = alloc_ireg (cfg);
6050 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
6051 dreg = alloc_ireg (cfg);
6052 EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
6057 * redirect access to the tlsdata field to the tls var given by the tls_offset field.
6058 * this address is cached per-method in cached_tls_addr.
6061 create_magic_tls_access (MonoCompile *cfg, MonoClassField *tls_field, MonoInst **cached_tls_addr, MonoInst *thread_local)
6063 MonoInst *load, *addr, *temp, *store, *thread_ins;
6064 MonoClassField *offset_field;
6066 if (*cached_tls_addr) {
6067 EMIT_NEW_TEMPLOAD (cfg, addr, (*cached_tls_addr)->inst_c0);
6070 thread_ins = mono_get_thread_intrinsic (cfg);
6071 offset_field = mono_class_get_field_from_name (tls_field->parent, "tls_offset");
6073 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, offset_field->type, thread_local->dreg, offset_field->offset);
6075 MONO_ADD_INS (cfg->cbb, thread_ins);
6077 MonoMethod *thread_method;
6078 thread_method = mono_class_get_method_from_name (mono_get_thread_class(), "CurrentInternalThread_internal", 0);
6079 thread_ins = mono_emit_method_call (cfg, thread_method, NULL, NULL);
6081 addr = emit_managed_static_data_access (cfg, thread_ins, load->dreg);
6082 addr->klass = mono_class_from_mono_type (tls_field->type);
6083 addr->type = STACK_MP;
6084 *cached_tls_addr = temp = mono_compile_create_var (cfg, type_from_stack_type (addr), OP_LOCAL);
6085 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, addr);
6087 EMIT_NEW_TEMPLOAD (cfg, addr, temp->inst_c0);
6092 * mono_method_to_ir:
6094 * Translate the .net IL into linear IR.
6097 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
6098 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
6099 guint inline_offset, gboolean is_virtual_call)
6102 MonoInst *ins, **sp, **stack_start;
6103 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
6104 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
6105 MonoMethod *cmethod, *method_definition;
6106 MonoInst **arg_array;
6107 MonoMethodHeader *header;
6109 guint32 token, ins_flag;
6111 MonoClass *constrained_call = NULL;
6112 unsigned char *ip, *end, *target, *err_pos;
6113 static double r8_0 = 0.0;
6114 MonoMethodSignature *sig;
6115 MonoGenericContext *generic_context = NULL;
6116 MonoGenericContainer *generic_container = NULL;
6117 MonoType **param_types;
6118 int i, n, start_new_bblock, dreg;
6119 int num_calls = 0, inline_costs = 0;
6120 int breakpoint_id = 0;
6122 MonoBoolean security, pinvoke;
6123 MonoSecurityManager* secman = NULL;
6124 MonoDeclSecurityActions actions;
6125 GSList *class_inits = NULL;
6126 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
6128 gboolean init_locals, seq_points, skip_dead_blocks;
6129 gboolean disable_inline, sym_seq_points = FALSE;
6130 MonoInst *cached_tls_addr = NULL;
6131 MonoDebugMethodInfo *minfo;
6132 MonoBitSet *seq_point_locs = NULL;
6134 disable_inline = is_jit_optimizer_disabled (method);
6136 /* serialization and xdomain stuff may need access to private fields and methods */
6137 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
6138 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
6139 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
6140 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
6141 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
6142 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
6144 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
6146 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
6147 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
6148 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
6149 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
6150 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
6152 image = method->klass->image;
6153 header = mono_method_get_header (method);
6155 MonoLoaderError *error;
6157 if ((error = mono_loader_get_last_error ())) {
6158 mono_cfg_set_exception (cfg, error->exception_type);
6160 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
6161 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
6163 goto exception_exit;
6165 generic_container = mono_method_get_generic_container (method);
6166 sig = mono_method_signature (method);
6167 num_args = sig->hasthis + sig->param_count;
6168 ip = (unsigned char*)header->code;
6169 cfg->cil_start = ip;
6170 end = ip + header->code_size;
6171 cfg->stat_cil_code_size += header->code_size;
6172 init_locals = header->init_locals;
6174 seq_points = cfg->gen_seq_points && cfg->method == method;
6175 #ifdef PLATFORM_ANDROID
6176 seq_points &= cfg->method->wrapper_type == MONO_WRAPPER_NONE;
6179 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
6180 /* We could hit a seq point before attaching to the JIT (#8338) */
6184 if (cfg->gen_seq_points && cfg->method == method) {
6185 minfo = mono_debug_lookup_method (method);
6187 int i, n_il_offsets;
6191 mono_debug_symfile_get_line_numbers_full (minfo, NULL, NULL, &n_il_offsets, &il_offsets, &line_numbers, NULL, NULL);
6192 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6193 sym_seq_points = TRUE;
6194 for (i = 0; i < n_il_offsets; ++i) {
6195 if (il_offsets [i] < header->code_size)
6196 mono_bitset_set_fast (seq_point_locs, il_offsets [i]);
6202 * Methods without init_locals set could cause asserts in various passes
6207 method_definition = method;
6208 while (method_definition->is_inflated) {
6209 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
6210 method_definition = imethod->declaring;
6213 /* SkipVerification is not allowed if core-clr is enabled */
6214 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
6216 dont_verify_stloc = TRUE;
6219 if (mono_debug_using_mono_debugger ())
6220 cfg->keep_cil_nops = TRUE;
6222 if (sig->is_inflated)
6223 generic_context = mono_method_get_context (method);
6224 else if (generic_container)
6225 generic_context = &generic_container->context;
6226 cfg->generic_context = generic_context;
6228 if (!cfg->generic_sharing_context)
6229 g_assert (!sig->has_type_parameters);
6231 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
6232 g_assert (method->is_inflated);
6233 g_assert (mono_method_get_context (method)->method_inst);
6235 if (method->is_inflated && mono_method_get_context (method)->method_inst)
6236 g_assert (sig->generic_param_count);
6238 if (cfg->method == method) {
6239 cfg->real_offset = 0;
6241 cfg->real_offset = inline_offset;
6244 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
6245 cfg->cil_offset_to_bb_len = header->code_size;
6247 cfg->current_method = method;
6249 if (cfg->verbose_level > 2)
6250 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
6252 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
6254 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
6255 for (n = 0; n < sig->param_count; ++n)
6256 param_types [n + sig->hasthis] = sig->params [n];
6257 cfg->arg_types = param_types;
6259 dont_inline = g_list_prepend (dont_inline, method);
6260 if (cfg->method == method) {
6262 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
6263 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
6266 NEW_BBLOCK (cfg, start_bblock);
6267 cfg->bb_entry = start_bblock;
6268 start_bblock->cil_code = NULL;
6269 start_bblock->cil_length = 0;
6270 #if defined(__native_client_codegen__)
6271 MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
6272 ins->dreg = alloc_dreg (cfg, STACK_I4);
6273 MONO_ADD_INS (start_bblock, ins);
6277 NEW_BBLOCK (cfg, end_bblock);
6278 cfg->bb_exit = end_bblock;
6279 end_bblock->cil_code = NULL;
6280 end_bblock->cil_length = 0;
6281 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
6282 g_assert (cfg->num_bblocks == 2);
6284 arg_array = cfg->args;
6286 if (header->num_clauses) {
6287 cfg->spvars = g_hash_table_new (NULL, NULL);
6288 cfg->exvars = g_hash_table_new (NULL, NULL);
6290 /* handle exception clauses */
6291 for (i = 0; i < header->num_clauses; ++i) {
6292 MonoBasicBlock *try_bb;
6293 MonoExceptionClause *clause = &header->clauses [i];
6294 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
6295 try_bb->real_offset = clause->try_offset;
6296 try_bb->try_start = TRUE;
6297 try_bb->region = ((i + 1) << 8) | clause->flags;
6298 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
6299 tblock->real_offset = clause->handler_offset;
6300 tblock->flags |= BB_EXCEPTION_HANDLER;
6302 link_bblock (cfg, try_bb, tblock);
6304 if (*(ip + clause->handler_offset) == CEE_POP)
6305 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
6307 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
6308 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
6309 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
6310 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
6311 MONO_ADD_INS (tblock, ins);
6313 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) {
6314 /* finally clauses already have a seq point */
6315 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
6316 MONO_ADD_INS (tblock, ins);
6319 /* todo: is a fault block unsafe to optimize? */
6320 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
6321 tblock->flags |= BB_EXCEPTION_UNSAFE;
6325 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
6327 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
6329 /* catch and filter blocks get the exception object on the stack */
6330 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
6331 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
6332 MonoInst *dummy_use;
6334 /* mostly like handle_stack_args (), but just sets the input args */
6335 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
6336 tblock->in_scount = 1;
6337 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
6338 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
6341 * Add a dummy use for the exvar so its liveness info will be
6345 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
6347 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
6348 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
6349 tblock->flags |= BB_EXCEPTION_HANDLER;
6350 tblock->real_offset = clause->data.filter_offset;
6351 tblock->in_scount = 1;
6352 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
6353 /* The filter block shares the exvar with the handler block */
6354 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
6355 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
6356 MONO_ADD_INS (tblock, ins);
6360 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
6361 clause->data.catch_class &&
6362 cfg->generic_sharing_context &&
6363 mono_class_check_context_used (clause->data.catch_class)) {
6365 * In shared generic code with catch
6366 * clauses containing type variables
6367 * the exception handling code has to
6368 * be able to get to the rgctx.
6369 * Therefore we have to make sure that
6370 * the vtable/mrgctx argument (for
6371 * static or generic methods) or the
6372 * "this" argument (for non-static
6373 * methods) are live.
6375 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
6376 mini_method_get_context (method)->method_inst ||
6377 method->klass->valuetype) {
6378 mono_get_vtable_var (cfg);
6380 MonoInst *dummy_use;
6382 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
6387 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
6388 cfg->cbb = start_bblock;
6389 cfg->args = arg_array;
6390 mono_save_args (cfg, sig, inline_args);
6393 /* FIRST CODE BLOCK */
6394 NEW_BBLOCK (cfg, bblock);
6395 bblock->cil_code = ip;
6399 ADD_BBLOCK (cfg, bblock);
6401 if (cfg->method == method) {
6402 breakpoint_id = mono_debugger_method_has_breakpoint (method);
6403 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
6404 MONO_INST_NEW (cfg, ins, OP_BREAK);
6405 MONO_ADD_INS (bblock, ins);
6409 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
6410 secman = mono_security_manager_get_methods ();
6412 security = (secman && mono_method_has_declsec (method));
6413 /* at this point having security doesn't mean we have any code to generate */
6414 if (security && (cfg->method == method)) {
6415 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
6416 * And we do not want to enter the next section (with allocation) if we
6417 * have nothing to generate */
6418 security = mono_declsec_get_demands (method, &actions);
6421 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
6422 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
6424 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
6425 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6426 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
6428 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
6429 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
6433 mono_custom_attrs_free (custom);
6436 custom = mono_custom_attrs_from_class (wrapped->klass);
6437 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
6441 mono_custom_attrs_free (custom);
6444 /* not a P/Invoke after all */
6449 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
6450 /* we use a separate basic block for the initialization code */
6451 NEW_BBLOCK (cfg, init_localsbb);
6452 cfg->bb_init = init_localsbb;
6453 init_localsbb->real_offset = cfg->real_offset;
6454 start_bblock->next_bb = init_localsbb;
6455 init_localsbb->next_bb = bblock;
6456 link_bblock (cfg, start_bblock, init_localsbb);
6457 link_bblock (cfg, init_localsbb, bblock);
6459 cfg->cbb = init_localsbb;
6461 start_bblock->next_bb = bblock;
6462 link_bblock (cfg, start_bblock, bblock);
6465 /* at this point we know, if security is TRUE, that some code needs to be generated */
6466 if (security && (cfg->method == method)) {
6469 cfg->stat_cas_demand_generation++;
6471 if (actions.demand.blob) {
6472 /* Add code for SecurityAction.Demand */
6473 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
6474 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
6475 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
6476 mono_emit_method_call (cfg, secman->demand, args, NULL);
6478 if (actions.noncasdemand.blob) {
6479 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
6480 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
6481 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
6482 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
6483 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
6484 mono_emit_method_call (cfg, secman->demand, args, NULL);
6486 if (actions.demandchoice.blob) {
6487 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
6488 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
6489 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
6490 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
6491 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
6495 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
6497 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
6500 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
6501 /* check if this is native code, e.g. an icall or a p/invoke */
6502 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
6503 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
6505 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
6506 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
6508 /* if this ia a native call then it can only be JITted from platform code */
6509 if ((icall || pinvk) && method->klass && method->klass->image) {
6510 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
6511 MonoException *ex = icall ? mono_get_exception_security () :
6512 mono_get_exception_method_access ();
6513 emit_throw_exception (cfg, ex);
6520 if (header->code_size == 0)
6523 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
6528 if (cfg->method == method)
6529 mono_debug_init_method (cfg, bblock, breakpoint_id);
6531 for (n = 0; n < header->num_locals; ++n) {
6532 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
6537 /* We force the vtable variable here for all shared methods
6538 for the possibility that they might show up in a stack
6539 trace where their exact instantiation is needed. */
6540 if (cfg->generic_sharing_context && method == cfg->method) {
6541 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
6542 mini_method_get_context (method)->method_inst ||
6543 method->klass->valuetype) {
6544 mono_get_vtable_var (cfg);
6546 /* FIXME: Is there a better way to do this?
6547 We need the variable live for the duration
6548 of the whole method. */
6549 cfg->args [0]->flags |= MONO_INST_INDIRECT;
6553 /* add a check for this != NULL to inlined methods */
6554 if (is_virtual_call) {
6557 NEW_ARGLOAD (cfg, arg_ins, 0);
6558 MONO_ADD_INS (cfg->cbb, arg_ins);
6559 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
6562 skip_dead_blocks = !dont_verify;
6563 if (skip_dead_blocks) {
6564 original_bb = bb = mono_basic_block_split (method, &error);
6565 if (!mono_error_ok (&error)) {
6566 mono_error_cleanup (&error);
6572 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
6573 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
6576 start_new_bblock = 0;
6579 if (cfg->method == method)
6580 cfg->real_offset = ip - header->code;
6582 cfg->real_offset = inline_offset;
6587 if (start_new_bblock) {
6588 bblock->cil_length = ip - bblock->cil_code;
6589 if (start_new_bblock == 2) {
6590 g_assert (ip == tblock->cil_code);
6592 GET_BBLOCK (cfg, tblock, ip);
6594 bblock->next_bb = tblock;
6597 start_new_bblock = 0;
6598 for (i = 0; i < bblock->in_scount; ++i) {
6599 if (cfg->verbose_level > 3)
6600 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
6601 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
6605 g_slist_free (class_inits);
6608 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
6609 link_bblock (cfg, bblock, tblock);
6610 if (sp != stack_start) {
6611 handle_stack_args (cfg, stack_start, sp - stack_start);
6613 CHECK_UNVERIFIABLE (cfg);
6615 bblock->next_bb = tblock;
6618 for (i = 0; i < bblock->in_scount; ++i) {
6619 if (cfg->verbose_level > 3)
6620 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
6621 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
6624 g_slist_free (class_inits);
6629 if (skip_dead_blocks) {
6630 int ip_offset = ip - header->code;
6632 if (ip_offset == bb->end)
6636 int op_size = mono_opcode_size (ip, end);
6637 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
6639 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
6641 if (ip_offset + op_size == bb->end) {
6642 MONO_INST_NEW (cfg, ins, OP_NOP);
6643 MONO_ADD_INS (bblock, ins);
6644 start_new_bblock = 1;
6652 * Sequence points are points where the debugger can place a breakpoint.
6653 * Currently, we generate these automatically at points where the IL
6656 if (seq_points && ((sp == stack_start) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
6658 * Make methods interruptable at the beginning, and at the targets of
6659 * backward branches.
6660 * Also, do this at the start of every bblock in methods with clauses too,
6661 * to be able to handle instructions with inprecise control flow like
6663 * Backward branches are handled at the end of method-to-ir ().
6665 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
6667 /* Avoid sequence points on empty IL like .volatile */
6668 // FIXME: Enable this
6669 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
6670 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
6671 MONO_ADD_INS (cfg->cbb, ins);
6674 bblock->real_offset = cfg->real_offset;
6676 if ((cfg->method == method) && cfg->coverage_info) {
6677 guint32 cil_offset = ip - header->code;
6678 cfg->coverage_info->data [cil_offset].cil_code = ip;
6680 /* TODO: Use an increment here */
6681 #if defined(TARGET_X86)
6682 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
6683 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
6685 MONO_ADD_INS (cfg->cbb, ins);
6687 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
6688 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
6692 if (cfg->verbose_level > 3)
6693 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
6697 if (seq_points && !sym_seq_points && sp != stack_start) {
6699 * The C# compiler uses these nops to notify the JIT that it should
6700 * insert seq points.
6702 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
6703 MONO_ADD_INS (cfg->cbb, ins);
6705 if (cfg->keep_cil_nops)
6706 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
6708 MONO_INST_NEW (cfg, ins, OP_NOP);
6710 MONO_ADD_INS (bblock, ins);
6713 if (should_insert_brekpoint (cfg->method)) {
6714 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6716 MONO_INST_NEW (cfg, ins, OP_NOP);
6719 MONO_ADD_INS (bblock, ins);
6725 CHECK_STACK_OVF (1);
6726 n = (*ip)-CEE_LDARG_0;
6728 EMIT_NEW_ARGLOAD (cfg, ins, n);
6736 CHECK_STACK_OVF (1);
6737 n = (*ip)-CEE_LDLOC_0;
6739 EMIT_NEW_LOCLOAD (cfg, ins, n);
6748 n = (*ip)-CEE_STLOC_0;
6751 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
6753 emit_stloc_ir (cfg, sp, header, n);
6760 CHECK_STACK_OVF (1);
6763 EMIT_NEW_ARGLOAD (cfg, ins, n);
6769 CHECK_STACK_OVF (1);
6772 NEW_ARGLOADA (cfg, ins, n);
6773 MONO_ADD_INS (cfg->cbb, ins);
6783 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
6785 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
6790 CHECK_STACK_OVF (1);
6793 EMIT_NEW_LOCLOAD (cfg, ins, n);
6797 case CEE_LDLOCA_S: {
6798 unsigned char *tmp_ip;
6800 CHECK_STACK_OVF (1);
6801 CHECK_LOCAL (ip [1]);
6803 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
6809 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
6818 CHECK_LOCAL (ip [1]);
6819 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
6821 emit_stloc_ir (cfg, sp, header, ip [1]);
6826 CHECK_STACK_OVF (1);
6827 EMIT_NEW_PCONST (cfg, ins, NULL);
6828 ins->type = STACK_OBJ;
6833 CHECK_STACK_OVF (1);
6834 EMIT_NEW_ICONST (cfg, ins, -1);
6847 CHECK_STACK_OVF (1);
6848 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
6854 CHECK_STACK_OVF (1);
6856 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
6862 CHECK_STACK_OVF (1);
6863 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
6869 CHECK_STACK_OVF (1);
6870 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6871 ins->type = STACK_I8;
6872 ins->dreg = alloc_dreg (cfg, STACK_I8);
6874 ins->inst_l = (gint64)read64 (ip);
6875 MONO_ADD_INS (bblock, ins);
6881 gboolean use_aotconst = FALSE;
6883 #ifdef TARGET_POWERPC
6884 /* FIXME: Clean this up */
6885 if (cfg->compile_aot)
6886 use_aotconst = TRUE;
6889 /* FIXME: we should really allocate this only late in the compilation process */
6890 f = mono_domain_alloc (cfg->domain, sizeof (float));
6892 CHECK_STACK_OVF (1);
6898 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
6900 dreg = alloc_freg (cfg);
6901 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
6902 ins->type = STACK_R8;
6904 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6905 ins->type = STACK_R8;
6906 ins->dreg = alloc_dreg (cfg, STACK_R8);
6908 MONO_ADD_INS (bblock, ins);
6918 gboolean use_aotconst = FALSE;
6920 #ifdef TARGET_POWERPC
6921 /* FIXME: Clean this up */
6922 if (cfg->compile_aot)
6923 use_aotconst = TRUE;
6926 /* FIXME: we should really allocate this only late in the compilation process */
6927 d = mono_domain_alloc (cfg->domain, sizeof (double));
6929 CHECK_STACK_OVF (1);
6935 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
6937 dreg = alloc_freg (cfg);
6938 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
6939 ins->type = STACK_R8;
6941 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6942 ins->type = STACK_R8;
6943 ins->dreg = alloc_dreg (cfg, STACK_R8);
6945 MONO_ADD_INS (bblock, ins);
6954 MonoInst *temp, *store;
6956 CHECK_STACK_OVF (1);
6960 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
6961 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
6963 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6966 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6979 if (sp [0]->type == STACK_R8)
6980 /* we need to pop the value from the x86 FP stack */
6981 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
6987 INLINE_FAILURE ("jmp");
6988 GSHAREDVT_FAILURE (*ip);
6991 if (stack_start != sp)
6993 token = read32 (ip + 1);
6994 /* FIXME: check the signature matches */
6995 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6997 if (!cmethod || mono_loader_get_last_error ())
7000 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
7001 GENERIC_SHARING_FAILURE (CEE_JMP);
7003 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
7004 CHECK_CFG_EXCEPTION;
7006 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
7008 MonoMethodSignature *fsig = mono_method_signature (cmethod);
7011 /* Handle tail calls similarly to calls */
7012 n = fsig->param_count + fsig->hasthis;
7014 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
7015 call->method = cmethod;
7016 call->tail_call = TRUE;
7017 call->signature = mono_method_signature (cmethod);
7018 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
7019 call->inst.inst_p0 = cmethod;
7020 for (i = 0; i < n; ++i)
7021 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
7023 mono_arch_emit_call (cfg, call);
7024 MONO_ADD_INS (bblock, (MonoInst*)call);
7027 for (i = 0; i < num_args; ++i)
7028 /* Prevent arguments from being optimized away */
7029 arg_array [i]->flags |= MONO_INST_VOLATILE;
7031 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
7032 ins = (MonoInst*)call;
7033 ins->inst_p0 = cmethod;
7034 MONO_ADD_INS (bblock, ins);
7038 start_new_bblock = 1;
7043 case CEE_CALLVIRT: {
7044 MonoInst *addr = NULL;
7045 MonoMethodSignature *fsig = NULL;
7047 int virtual = *ip == CEE_CALLVIRT;
7048 int calli = *ip == CEE_CALLI;
7049 gboolean pass_imt_from_rgctx = FALSE;
7050 MonoInst *imt_arg = NULL;
7051 gboolean pass_vtable = FALSE;
7052 gboolean pass_mrgctx = FALSE;
7053 MonoInst *vtable_arg = NULL;
7054 gboolean check_this = FALSE;
7055 gboolean supported_tail_call = FALSE;
7056 gboolean need_seq_point = FALSE;
7057 guint32 call_opcode = *ip;
7058 gboolean emit_widen = TRUE;
7059 gboolean push_res = TRUE;
7060 gboolean skip_ret = FALSE;
7063 token = read32 (ip + 1);
7068 //GSHAREDVT_FAILURE (*ip);
7073 fsig = mini_get_signature (method, token, generic_context);
7074 n = fsig->param_count + fsig->hasthis;
7076 if (method->dynamic && fsig->pinvoke) {
7080 * This is a call through a function pointer using a pinvoke
7081 * signature. Have to create a wrapper and call that instead.
7082 * FIXME: This is very slow, need to create a wrapper at JIT time
7083 * instead based on the signature.
7085 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
7086 EMIT_NEW_PCONST (cfg, args [1], fsig);
7088 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
7091 MonoMethod *cil_method;
7093 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7094 cil_method = cmethod;
7096 if (constrained_call) {
7097 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7098 if (cfg->verbose_level > 2)
7099 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_call));
7100 if (!((constrained_call->byval_arg.type == MONO_TYPE_VAR ||
7101 constrained_call->byval_arg.type == MONO_TYPE_MVAR) &&
7102 cfg->generic_sharing_context)) {
7103 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_call, generic_context);
7106 if (cfg->verbose_level > 2)
7107 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_call));
7109 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
7111 * This is needed since get_method_constrained can't find
7112 * the method in klass representing a type var.
7113 * The type var is guaranteed to be a reference type in this
7116 if (!mini_is_gsharedvt_klass (cfg, constrained_call))
7117 g_assert (!cmethod->klass->valuetype);
7119 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
7124 if (!cmethod || mono_loader_get_last_error ())
7126 if (!dont_verify && !cfg->skip_visibility) {
7127 MonoMethod *target_method = cil_method;
7128 if (method->is_inflated) {
7129 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
7131 if (!mono_method_can_access_method (method_definition, target_method) &&
7132 !mono_method_can_access_method (method, cil_method))
7133 METHOD_ACCESS_FAILURE;
7136 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
7137 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
7139 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
7140 /* MS.NET seems to silently convert this to a callvirt */
7145 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
7146 * converts to a callvirt.
7148 * tests/bug-515884.il is an example of this behavior
7150 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
7151 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
7152 if (!virtual && cmethod->klass->marshalbyref && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
7156 if (!cmethod->klass->inited)
7157 if (!mono_class_init (cmethod->klass))
7158 TYPE_LOAD_ERROR (cmethod->klass);
7160 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
7161 mini_class_is_system_array (cmethod->klass)) {
7162 array_rank = cmethod->klass->rank;
7163 fsig = mono_method_signature (cmethod);
7165 fsig = mono_method_signature (cmethod);
7170 if (fsig->pinvoke) {
7171 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
7172 check_for_pending_exc, FALSE);
7173 fsig = mono_method_signature (wrapper);
7174 } else if (constrained_call) {
7175 fsig = mono_method_signature (cmethod);
7177 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
7181 mono_save_token_info (cfg, image, token, cil_method);
7183 if (!MONO_TYPE_IS_VOID (fsig->ret) && !sym_seq_points) {
7185 * Need to emit an implicit seq point after every non-void call so single stepping through nested calls like
7186 * foo (bar (), baz ())
7187 * works correctly. MS does this also:
7188 * http://stackoverflow.com/questions/6937198/making-your-net-language-step-correctly-in-the-debugger
7189 * The problem with this approach is that the debugger will stop after all calls returning a value,
7190 * even for simple cases, like:
7193 /* Special case a few common successor opcodes */
7194 if (!(ip + 5 < end && ip [5] == CEE_POP))
7195 need_seq_point = TRUE;
7198 n = fsig->param_count + fsig->hasthis;
7200 /* Don't support calls made using type arguments for now */
7202 if (cfg->gsharedvt) {
7203 if (mini_is_gsharedvt_signature (cfg, fsig))
7204 GSHAREDVT_FAILURE (*ip);
7208 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7209 if (check_linkdemand (cfg, method, cmethod))
7210 INLINE_FAILURE ("linkdemand");
7211 CHECK_CFG_EXCEPTION;
7214 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
7215 g_assert_not_reached ();
7218 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
7221 if (!cfg->generic_sharing_context && cmethod)
7222 g_assert (!mono_method_check_context_used (cmethod));
7226 //g_assert (!virtual || fsig->hasthis);
7230 if (constrained_call) {
7231 if (mini_is_gsharedvt_klass (cfg, constrained_call)) {
7233 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
7235 /* Special case Object:ToString () as its easy to implement */
7236 if (cmethod->klass == mono_defaults.object_class && !strcmp (cmethod->name, "ToString")) {
7240 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
7241 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_call), constrained_call, MONO_RGCTX_INFO_KLASS);
7242 ins = mono_emit_jit_icall (cfg, mono_object_tostring_gsharedvt, args);
7244 } else if (cmethod->klass == mono_defaults.object_class && !strcmp (cmethod->name, "GetHashCode")) {
7248 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
7249 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_call), constrained_call, MONO_RGCTX_INFO_KLASS);
7250 ins = mono_emit_jit_icall (cfg, mono_object_gethashcode_gsharedvt, args);
7252 } else if (constrained_call->valuetype && cmethod->klass->valuetype) {
7253 /* The 'Own method' case below */
7255 GSHAREDVT_FAILURE (*ip);
7259 * We have the `constrained.' prefix opcode.
7261 if (constrained_call->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
7263 * The type parameter is instantiated as a valuetype,
7264 * but that type doesn't override the method we're
7265 * calling, so we need to box `this'.
7267 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
7268 ins->klass = constrained_call;
7269 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call));
7271 CHECK_CFG_EXCEPTION;
7272 } else if (!constrained_call->valuetype) {
7273 int dreg = alloc_ireg_ref (cfg);
7276 * The type parameter is instantiated as a reference
7277 * type. We have a managed pointer on the stack, so
7278 * we need to dereference it here.
7280 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
7281 ins->type = STACK_OBJ;
7284 if (cmethod->klass->valuetype) {
7287 /* Interface method */
7290 mono_class_setup_vtable (constrained_call);
7291 CHECK_TYPELOAD (constrained_call);
7292 ioffset = mono_class_interface_offset (constrained_call, cmethod->klass);
7294 TYPE_LOAD_ERROR (constrained_call);
7295 slot = mono_method_get_vtable_slot (cmethod);
7297 TYPE_LOAD_ERROR (cmethod->klass);
7298 cmethod = constrained_call->vtable [ioffset + slot];
7300 if (cmethod->klass == mono_defaults.enum_class) {
7301 /* Enum implements some interfaces, so treat this as the first case */
7302 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
7303 ins->klass = constrained_call;
7304 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call));
7306 CHECK_CFG_EXCEPTION;
7311 constrained_call = NULL;
7314 if (!calli && check_call_signature (cfg, fsig, sp))
7317 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
7319 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7320 type_to_eval_stack_type ((cfg), fsig->ret, ins);
7328 * If the callee is a shared method, then its static cctor
7329 * might not get called after the call was patched.
7331 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
7332 emit_generic_class_init (cfg, cmethod->klass);
7333 CHECK_TYPELOAD (cmethod->klass);
7336 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
7337 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
7338 gboolean sharable = FALSE;
7340 if (mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7343 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
7344 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
7345 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
7347 sharable = sharing_enabled && context_sharable;
7351 * Pass vtable iff target method might
7352 * be shared, which means that sharing
7353 * is enabled for its class and its
7354 * context is sharable (and it's not a
7357 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
7361 if (cmethod && mini_method_get_context (cmethod) &&
7362 mini_method_get_context (cmethod)->method_inst) {
7363 g_assert (!pass_vtable);
7365 if (mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7368 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
7369 MonoGenericContext *context = mini_method_get_context (cmethod);
7370 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
7372 if (sharing_enabled && context_sharable)
7374 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
7379 if (cfg->generic_sharing_context && cmethod) {
7380 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
7382 context_used = mini_method_check_context_used (cfg, cmethod);
7384 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7385 /* Generic method interface
7386 calls are resolved via a
7387 helper function and don't
7389 if (!cmethod_context || !cmethod_context->method_inst)
7390 pass_imt_from_rgctx = TRUE;
7394 * If a shared method calls another
7395 * shared method then the caller must
7396 * have a generic sharing context
7397 * because the magic trampoline
7398 * requires it. FIXME: We shouldn't
7399 * have to force the vtable/mrgctx
7400 * variable here. Instead there
7401 * should be a flag in the cfg to
7402 * request a generic sharing context.
7405 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
7406 mono_get_vtable_var (cfg);
7411 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7413 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7415 CHECK_TYPELOAD (cmethod->klass);
7416 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7421 g_assert (!vtable_arg);
7423 if (!cfg->compile_aot) {
7425 * emit_get_rgctx_method () calls mono_class_vtable () so check
7426 * for type load errors before.
7428 mono_class_setup_vtable (cmethod->klass);
7429 CHECK_TYPELOAD (cmethod->klass);
7432 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7434 /* !marshalbyref is needed to properly handle generic methods + remoting */
7435 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
7436 MONO_METHOD_IS_FINAL (cmethod)) &&
7437 !cmethod->klass->marshalbyref) {
7444 if (pass_imt_from_rgctx) {
7445 g_assert (!pass_vtable);
7448 imt_arg = emit_get_rgctx_method (cfg, context_used,
7449 cmethod, MONO_RGCTX_INFO_METHOD);
7453 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
7455 /* Calling virtual generic methods */
7456 if (cmethod && virtual &&
7457 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
7458 !(MONO_METHOD_IS_FINAL (cmethod) &&
7459 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
7460 fsig->generic_param_count &&
7461 !(cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))) {
7462 MonoInst *this_temp, *this_arg_temp, *store;
7463 MonoInst *iargs [4];
7464 gboolean use_imt = FALSE;
7466 g_assert (fsig->is_inflated);
7468 /* Prevent inlining of methods that contain indirect calls */
7469 INLINE_FAILURE ("virtual generic call");
7471 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
7472 GSHAREDVT_FAILURE (*ip);
7474 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
7475 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt)
7480 g_assert (!imt_arg);
7482 g_assert (cmethod->is_inflated);
7483 imt_arg = emit_get_rgctx_method (cfg, context_used,
7484 cmethod, MONO_RGCTX_INFO_METHOD);
7485 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg, NULL);
7487 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
7488 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
7489 MONO_ADD_INS (bblock, store);
7491 /* FIXME: This should be a managed pointer */
7492 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7494 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
7495 iargs [1] = emit_get_rgctx_method (cfg, context_used,
7496 cmethod, MONO_RGCTX_INFO_METHOD);
7497 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
7498 addr = mono_emit_jit_icall (cfg,
7499 mono_helper_compile_generic_method, iargs);
7501 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
7503 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
7510 * Implement a workaround for the inherent races involved in locking:
7516 * If a thread abort happens between the call to Monitor.Enter () and the start of the
7517 * try block, the Exit () won't be executed, see:
7518 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
7519 * To work around this, we extend such try blocks to include the last x bytes
7520 * of the Monitor.Enter () call.
7522 if (cmethod && cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
7523 MonoBasicBlock *tbb;
7525 GET_BBLOCK (cfg, tbb, ip + 5);
7527 * Only extend try blocks with a finally, to avoid catching exceptions thrown
7528 * from Monitor.Enter like ArgumentNullException.
7530 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
7531 /* Mark this bblock as needing to be extended */
7532 tbb->extend_try_block = TRUE;
7536 /* Conversion to a JIT intrinsic */
7537 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
7539 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7540 type_to_eval_stack_type ((cfg), fsig->ret, ins);
7547 if (cmethod && (cfg->opt & MONO_OPT_INLINE) &&
7548 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
7549 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
7550 !g_list_find (dont_inline, cmethod)) {
7552 gboolean always = FALSE;
7554 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
7555 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
7556 /* Prevent inlining of methods that call wrappers */
7557 INLINE_FAILURE ("wrapper call");
7558 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
7562 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, always);
7564 cfg->real_offset += 5;
7567 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7568 /* *sp is already set by inline_method */
7573 inline_costs += costs;
7579 /* Tail recursion elimination */
7580 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
7581 gboolean has_vtargs = FALSE;
7584 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
7585 INLINE_FAILURE ("tail call");
7587 /* keep it simple */
7588 for (i = fsig->param_count - 1; i >= 0; i--) {
7589 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
7594 for (i = 0; i < n; ++i)
7595 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
7596 MONO_INST_NEW (cfg, ins, OP_BR);
7597 MONO_ADD_INS (bblock, ins);
7598 tblock = start_bblock->out_bb [0];
7599 link_bblock (cfg, bblock, tblock);
7600 ins->inst_target_bb = tblock;
7601 start_new_bblock = 1;
7603 /* skip the CEE_RET, too */
7604 if (ip_in_bb (cfg, bblock, ip + 5))
7611 inline_costs += 10 * num_calls++;
7614 * Making generic calls out of gsharedvt methods.
7616 if (cmethod && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
7617 MonoRgctxInfoType info_type;
7620 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
7621 //GSHAREDVT_FAILURE (*ip);
7622 // disable for possible remoting calls
7623 if (fsig->hasthis && (method->klass->marshalbyref || method->klass == mono_defaults.object_class))
7624 GSHAREDVT_FAILURE (*ip);
7625 if (fsig->generic_param_count) {
7626 /* virtual generic call */
7627 g_assert (mono_use_imt);
7628 g_assert (!imt_arg);
7629 /* Same as the virtual generic case above */
7630 imt_arg = emit_get_rgctx_method (cfg, context_used,
7631 cmethod, MONO_RGCTX_INFO_METHOD);
7632 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
7637 if (cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)
7638 /* test_0_multi_dim_arrays () in gshared.cs */
7639 GSHAREDVT_FAILURE (*ip);
7641 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
7642 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
7644 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
7645 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
7647 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
7649 } else if (calli && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
7651 * We pass the address to the gsharedvt trampoline in the rgctx reg
7653 MonoInst *callee = addr;
7655 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
7657 GSHAREDVT_FAILURE (*ip);
7659 addr = emit_get_rgctx_sig (cfg, context_used,
7660 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
7661 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
7665 /* Generic sharing */
7666 /* FIXME: only do this for generic methods if
7667 they are not shared! */
7668 if (context_used && !imt_arg && !array_rank &&
7669 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7670 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
7671 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
7672 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
7673 INLINE_FAILURE ("gshared");
7675 g_assert (cfg->generic_sharing_context && cmethod);
7679 * We are compiling a call to a
7680 * generic method from shared code,
7681 * which means that we have to look up
7682 * the method in the rgctx and do an
7685 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7686 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
7690 /* Indirect calls */
7692 if (call_opcode == CEE_CALL)
7693 g_assert (context_used);
7694 else if (call_opcode == CEE_CALLI)
7695 g_assert (!vtable_arg);
7697 /* FIXME: what the hell is this??? */
7698 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
7699 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
7701 /* Prevent inlining of methods with indirect calls */
7702 INLINE_FAILURE ("indirect call");
7704 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
7706 * Instead of emitting an indirect call, emit a direct call
7707 * with the contents of the aotconst as the patch info.
7709 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
7711 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
7712 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
7715 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
7725 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
7726 MonoInst *val = sp [fsig->param_count];
7728 if (val->type == STACK_OBJ) {
7729 MonoInst *iargs [2];
7734 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
7737 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
7738 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
7739 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
7740 emit_write_barrier (cfg, addr, val, 0);
7741 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
7742 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
7744 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
7745 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
7746 if (!cmethod->klass->element_class->valuetype && !readonly)
7747 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
7748 CHECK_TYPELOAD (cmethod->klass);
7751 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
7754 g_assert_not_reached ();
7761 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
7765 /* Tail prefix / tail call optimization */
7767 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
7768 /* FIXME: runtime generic context pointer for jumps? */
7769 /* FIXME: handle this for generic sharing eventually */
7771 ((((ins_flag & MONO_INST_TAILCALL) && (call_opcode == CEE_CALL))
7772 ))//|| ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && ip [5] == CEE_RET))
7773 && !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig))
7774 supported_tail_call = TRUE;
7775 if (supported_tail_call) {
7778 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
7779 INLINE_FAILURE ("tail call");
7781 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7783 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
7784 /* Handle tail calls similarly to calls */
7785 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE, FALSE, FALSE);
7787 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
7788 call->tail_call = TRUE;
7789 call->method = cmethod;
7790 call->signature = mono_method_signature (cmethod);
7793 * We implement tail calls by storing the actual arguments into the
7794 * argument variables, then emitting a CEE_JMP.
7796 for (i = 0; i < n; ++i) {
7797 /* Prevent argument from being register allocated */
7798 arg_array [i]->flags |= MONO_INST_VOLATILE;
7799 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
7803 ins = (MonoInst*)call;
7804 ins->inst_p0 = cmethod;
7805 ins->inst_p1 = arg_array [0];
7806 MONO_ADD_INS (bblock, ins);
7807 link_bblock (cfg, bblock, end_bblock);
7808 start_new_bblock = 1;
7810 // FIXME: Eliminate unreachable epilogs
7813 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
7814 * only reachable from this call.
7816 GET_BBLOCK (cfg, tblock, ip + 5);
7817 if (tblock == bblock || tblock->in_count == 0)
7825 * Synchronized wrappers.
7826 * Its hard to determine where to replace a method with its synchronized
7827 * wrapper without causing an infinite recursion. The current solution is
7828 * to add the synchronized wrapper in the trampolines, and to
7829 * change the called method to a dummy wrapper, and resolve that wrapper
7830 * to the real method in mono_jit_compile_method ().
7832 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED && mono_marshal_method_from_wrapper (cfg->method) == cmethod)
7833 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
7836 INLINE_FAILURE ("call");
7837 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
7838 imt_arg, vtable_arg);
7842 /* End of call, INS should contain the result of the call, if any */
7844 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
7847 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7852 CHECK_CFG_EXCEPTION;
7856 g_assert (*ip == CEE_RET);
7860 constrained_call = NULL;
7862 emit_seq_point (cfg, method, ip, FALSE);
7866 if (cfg->method != method) {
7867 /* return from inlined method */
7869 * If in_count == 0, that means the ret is unreachable due to
7870 * being preceeded by a throw. In that case, inline_method () will
7871 * handle setting the return value
7872 * (test case: test_0_inline_throw ()).
7874 if (return_var && cfg->cbb->in_count) {
7875 MonoType *ret_type = mono_method_signature (method)->ret;
7881 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
7884 //g_assert (returnvar != -1);
7885 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
7886 cfg->ret_var_set = TRUE;
7890 MonoType *ret_type = mono_method_signature (method)->ret;
7892 if (seq_points && !sym_seq_points) {
7894 * Place a seq point here too even through the IL stack is not
7895 * empty, so a step over on
7898 * will work correctly.
7900 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
7901 MONO_ADD_INS (cfg->cbb, ins);
7904 g_assert (!return_var);
7908 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
7911 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
7914 if (!cfg->vret_addr) {
7917 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
7919 EMIT_NEW_RETLOADA (cfg, ret_addr);
7921 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
7922 ins->klass = mono_class_from_mono_type (ret_type);
7925 #ifdef MONO_ARCH_SOFT_FLOAT
7926 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
7927 MonoInst *iargs [1];
7931 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
7932 mono_arch_emit_setret (cfg, method, conv);
7934 mono_arch_emit_setret (cfg, method, *sp);
7937 mono_arch_emit_setret (cfg, method, *sp);
7942 if (sp != stack_start)
7944 MONO_INST_NEW (cfg, ins, OP_BR);
7946 ins->inst_target_bb = end_bblock;
7947 MONO_ADD_INS (bblock, ins);
7948 link_bblock (cfg, bblock, end_bblock);
7949 start_new_bblock = 1;
7953 MONO_INST_NEW (cfg, ins, OP_BR);
7955 target = ip + 1 + (signed char)(*ip);
7957 GET_BBLOCK (cfg, tblock, target);
7958 link_bblock (cfg, bblock, tblock);
7959 ins->inst_target_bb = tblock;
7960 if (sp != stack_start) {
7961 handle_stack_args (cfg, stack_start, sp - stack_start);
7963 CHECK_UNVERIFIABLE (cfg);
7965 MONO_ADD_INS (bblock, ins);
7966 start_new_bblock = 1;
7967 inline_costs += BRANCH_COST;
7981 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
7983 target = ip + 1 + *(signed char*)ip;
7989 inline_costs += BRANCH_COST;
7993 MONO_INST_NEW (cfg, ins, OP_BR);
7996 target = ip + 4 + (gint32)read32(ip);
7998 GET_BBLOCK (cfg, tblock, target);
7999 link_bblock (cfg, bblock, tblock);
8000 ins->inst_target_bb = tblock;
8001 if (sp != stack_start) {
8002 handle_stack_args (cfg, stack_start, sp - stack_start);
8004 CHECK_UNVERIFIABLE (cfg);
8007 MONO_ADD_INS (bblock, ins);
8009 start_new_bblock = 1;
8010 inline_costs += BRANCH_COST;
8017 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
8018 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
8019 guint32 opsize = is_short ? 1 : 4;
8021 CHECK_OPSIZE (opsize);
8023 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
8026 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
8031 GET_BBLOCK (cfg, tblock, target);
8032 link_bblock (cfg, bblock, tblock);
8033 GET_BBLOCK (cfg, tblock, ip);
8034 link_bblock (cfg, bblock, tblock);
8036 if (sp != stack_start) {
8037 handle_stack_args (cfg, stack_start, sp - stack_start);
8038 CHECK_UNVERIFIABLE (cfg);
8041 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
8042 cmp->sreg1 = sp [0]->dreg;
8043 type_from_op (cmp, sp [0], NULL);
8046 #if SIZEOF_REGISTER == 4
8047 if (cmp->opcode == OP_LCOMPARE_IMM) {
8048 /* Convert it to OP_LCOMPARE */
8049 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8050 ins->type = STACK_I8;
8051 ins->dreg = alloc_dreg (cfg, STACK_I8);
8053 MONO_ADD_INS (bblock, ins);
8054 cmp->opcode = OP_LCOMPARE;
8055 cmp->sreg2 = ins->dreg;
8058 MONO_ADD_INS (bblock, cmp);
8060 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
8061 type_from_op (ins, sp [0], NULL);
8062 MONO_ADD_INS (bblock, ins);
8063 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
8064 GET_BBLOCK (cfg, tblock, target);
8065 ins->inst_true_bb = tblock;
8066 GET_BBLOCK (cfg, tblock, ip);
8067 ins->inst_false_bb = tblock;
8068 start_new_bblock = 2;
8071 inline_costs += BRANCH_COST;
8086 MONO_INST_NEW (cfg, ins, *ip);
8088 target = ip + 4 + (gint32)read32(ip);
8094 inline_costs += BRANCH_COST;
8098 MonoBasicBlock **targets;
8099 MonoBasicBlock *default_bblock;
8100 MonoJumpInfoBBTable *table;
8101 int offset_reg = alloc_preg (cfg);
8102 int target_reg = alloc_preg (cfg);
8103 int table_reg = alloc_preg (cfg);
8104 int sum_reg = alloc_preg (cfg);
8105 gboolean use_op_switch;
8109 n = read32 (ip + 1);
8112 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
8116 CHECK_OPSIZE (n * sizeof (guint32));
8117 target = ip + n * sizeof (guint32);
8119 GET_BBLOCK (cfg, default_bblock, target);
8120 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
8122 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
8123 for (i = 0; i < n; ++i) {
8124 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
8125 targets [i] = tblock;
8126 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
8130 if (sp != stack_start) {
8132 * Link the current bb with the targets as well, so handle_stack_args
8133 * will set their in_stack correctly.
8135 link_bblock (cfg, bblock, default_bblock);
8136 for (i = 0; i < n; ++i)
8137 link_bblock (cfg, bblock, targets [i]);
8139 handle_stack_args (cfg, stack_start, sp - stack_start);
8141 CHECK_UNVERIFIABLE (cfg);
8144 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
8145 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
8148 for (i = 0; i < n; ++i)
8149 link_bblock (cfg, bblock, targets [i]);
8151 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
8152 table->table = targets;
8153 table->table_size = n;
8155 use_op_switch = FALSE;
8157 /* ARM implements SWITCH statements differently */
8158 /* FIXME: Make it use the generic implementation */
8159 if (!cfg->compile_aot)
8160 use_op_switch = TRUE;
8163 if (COMPILE_LLVM (cfg))
8164 use_op_switch = TRUE;
8166 cfg->cbb->has_jump_table = 1;
8168 if (use_op_switch) {
8169 MONO_INST_NEW (cfg, ins, OP_SWITCH);
8170 ins->sreg1 = src1->dreg;
8171 ins->inst_p0 = table;
8172 ins->inst_many_bb = targets;
8173 ins->klass = GUINT_TO_POINTER (n);
8174 MONO_ADD_INS (cfg->cbb, ins);
8176 if (sizeof (gpointer) == 8)
8177 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
8179 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
8181 #if SIZEOF_REGISTER == 8
8182 /* The upper word might not be zero, and we add it to a 64 bit address later */
8183 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
8186 if (cfg->compile_aot) {
8187 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
8189 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
8190 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
8191 ins->inst_p0 = table;
8192 ins->dreg = table_reg;
8193 MONO_ADD_INS (cfg->cbb, ins);
8196 /* FIXME: Use load_memindex */
8197 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
8198 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
8199 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
8201 start_new_bblock = 1;
8202 inline_costs += (BRANCH_COST * 2);
8222 dreg = alloc_freg (cfg);
8225 dreg = alloc_lreg (cfg);
8228 dreg = alloc_ireg_ref (cfg);
8231 dreg = alloc_preg (cfg);
8234 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
8235 ins->type = ldind_type [*ip - CEE_LDIND_I1];
8236 ins->flags |= ins_flag;
8238 MONO_ADD_INS (bblock, ins);
8240 if (ins->flags & MONO_INST_VOLATILE) {
8241 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
8242 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
8243 emit_memory_barrier (cfg, FullBarrier);
8258 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
8259 ins->flags |= ins_flag;
8262 if (ins->flags & MONO_INST_VOLATILE) {
8263 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
8264 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
8265 emit_memory_barrier (cfg, FullBarrier);
8268 MONO_ADD_INS (bblock, ins);
8270 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
8271 emit_write_barrier (cfg, sp [0], sp [1], -1);
8280 MONO_INST_NEW (cfg, ins, (*ip));
8282 ins->sreg1 = sp [0]->dreg;
8283 ins->sreg2 = sp [1]->dreg;
8284 type_from_op (ins, sp [0], sp [1]);
8286 ins->dreg = alloc_dreg ((cfg), (ins)->type);
8288 /* Use the immediate opcodes if possible */
8289 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
8290 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
8291 if (imm_opcode != -1) {
8292 ins->opcode = imm_opcode;
8293 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
8296 sp [1]->opcode = OP_NOP;
8300 MONO_ADD_INS ((cfg)->cbb, (ins));
8302 *sp++ = mono_decompose_opcode (cfg, ins);
8319 MONO_INST_NEW (cfg, ins, (*ip));
8321 ins->sreg1 = sp [0]->dreg;
8322 ins->sreg2 = sp [1]->dreg;
8323 type_from_op (ins, sp [0], sp [1]);
8325 ADD_WIDEN_OP (ins, sp [0], sp [1]);
8326 ins->dreg = alloc_dreg ((cfg), (ins)->type);
8328 /* FIXME: Pass opcode to is_inst_imm */
8330 /* Use the immediate opcodes if possible */
8331 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
8334 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
8335 if (imm_opcode != -1) {
8336 ins->opcode = imm_opcode;
8337 if (sp [1]->opcode == OP_I8CONST) {
8338 #if SIZEOF_REGISTER == 8
8339 ins->inst_imm = sp [1]->inst_l;
8341 ins->inst_ls_word = sp [1]->inst_ls_word;
8342 ins->inst_ms_word = sp [1]->inst_ms_word;
8346 ins->inst_imm = (gssize)(sp [1]->inst_c0);
8349 /* Might be followed by an instruction added by ADD_WIDEN_OP */
8350 if (sp [1]->next == NULL)
8351 sp [1]->opcode = OP_NOP;
8354 MONO_ADD_INS ((cfg)->cbb, (ins));
8356 *sp++ = mono_decompose_opcode (cfg, ins);
8369 case CEE_CONV_OVF_I8:
8370 case CEE_CONV_OVF_U8:
8374 /* Special case this earlier so we have long constants in the IR */
8375 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
8376 int data = sp [-1]->inst_c0;
8377 sp [-1]->opcode = OP_I8CONST;
8378 sp [-1]->type = STACK_I8;
8379 #if SIZEOF_REGISTER == 8
8380 if ((*ip) == CEE_CONV_U8)
8381 sp [-1]->inst_c0 = (guint32)data;
8383 sp [-1]->inst_c0 = data;
8385 sp [-1]->inst_ls_word = data;
8386 if ((*ip) == CEE_CONV_U8)
8387 sp [-1]->inst_ms_word = 0;
8389 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
8391 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
8398 case CEE_CONV_OVF_I4:
8399 case CEE_CONV_OVF_I1:
8400 case CEE_CONV_OVF_I2:
8401 case CEE_CONV_OVF_I:
8402 case CEE_CONV_OVF_U:
8405 if (sp [-1]->type == STACK_R8) {
8406 ADD_UNOP (CEE_CONV_OVF_I8);
8413 case CEE_CONV_OVF_U1:
8414 case CEE_CONV_OVF_U2:
8415 case CEE_CONV_OVF_U4:
8418 if (sp [-1]->type == STACK_R8) {
8419 ADD_UNOP (CEE_CONV_OVF_U8);
8426 case CEE_CONV_OVF_I1_UN:
8427 case CEE_CONV_OVF_I2_UN:
8428 case CEE_CONV_OVF_I4_UN:
8429 case CEE_CONV_OVF_I8_UN:
8430 case CEE_CONV_OVF_U1_UN:
8431 case CEE_CONV_OVF_U2_UN:
8432 case CEE_CONV_OVF_U4_UN:
8433 case CEE_CONV_OVF_U8_UN:
8434 case CEE_CONV_OVF_I_UN:
8435 case CEE_CONV_OVF_U_UN:
8442 CHECK_CFG_EXCEPTION;
8446 case CEE_ADD_OVF_UN:
8448 case CEE_MUL_OVF_UN:
8450 case CEE_SUB_OVF_UN:
8456 GSHAREDVT_FAILURE (*ip);
8459 token = read32 (ip + 1);
8460 klass = mini_get_class (method, token, generic_context);
8461 CHECK_TYPELOAD (klass);
8463 if (generic_class_is_reference_type (cfg, klass)) {
8464 MonoInst *store, *load;
8465 int dreg = alloc_ireg_ref (cfg);
8467 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
8468 load->flags |= ins_flag;
8469 MONO_ADD_INS (cfg->cbb, load);
8471 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
8472 store->flags |= ins_flag;
8473 MONO_ADD_INS (cfg->cbb, store);
8475 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
8476 emit_write_barrier (cfg, sp [0], sp [1], -1);
8478 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
8490 token = read32 (ip + 1);
8491 klass = mini_get_class (method, token, generic_context);
8492 CHECK_TYPELOAD (klass);
8494 /* Optimize the common ldobj+stloc combination */
8504 loc_index = ip [5] - CEE_STLOC_0;
8511 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
8512 CHECK_LOCAL (loc_index);
8514 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
8515 ins->dreg = cfg->locals [loc_index]->dreg;
8521 /* Optimize the ldobj+stobj combination */
8522 /* The reference case ends up being a load+store anyway */
8523 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
8528 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
8535 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
8544 CHECK_STACK_OVF (1);
8546 n = read32 (ip + 1);
8548 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
8549 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
8550 ins->type = STACK_OBJ;
8553 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
8554 MonoInst *iargs [1];
8556 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
8557 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
8559 if (cfg->opt & MONO_OPT_SHARED) {
8560 MonoInst *iargs [3];
8562 if (cfg->compile_aot) {
8563 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
8565 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8566 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
8567 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
8568 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
8569 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
8571 if (bblock->out_of_line) {
8572 MonoInst *iargs [2];
8574 if (image == mono_defaults.corlib) {
8576 * Avoid relocations in AOT and save some space by using a
8577 * version of helper_ldstr specialized to mscorlib.
8579 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
8580 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
8582 /* Avoid creating the string object */
8583 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8584 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
8585 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
8589 if (cfg->compile_aot) {
8590 NEW_LDSTRCONST (cfg, ins, image, n);
8592 MONO_ADD_INS (bblock, ins);
8595 NEW_PCONST (cfg, ins, NULL);
8596 ins->type = STACK_OBJ;
8597 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
8599 OUT_OF_MEMORY_FAILURE;
8602 MONO_ADD_INS (bblock, ins);
8611 MonoInst *iargs [2];
8612 MonoMethodSignature *fsig;
8615 MonoInst *vtable_arg = NULL;
8618 token = read32 (ip + 1);
8619 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8620 if (!cmethod || mono_loader_get_last_error ())
8622 fsig = mono_method_get_signature (cmethod, image, token);
8626 mono_save_token_info (cfg, image, token, cmethod);
8628 if (!mono_class_init (cmethod->klass))
8629 TYPE_LOAD_ERROR (cmethod->klass);
8631 context_used = mini_method_check_context_used (cfg, cmethod);
8633 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
8634 if (check_linkdemand (cfg, method, cmethod))
8635 INLINE_FAILURE ("linkdemand");
8636 CHECK_CFG_EXCEPTION;
8637 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
8638 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
8641 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8642 emit_generic_class_init (cfg, cmethod->klass);
8643 CHECK_TYPELOAD (cmethod->klass);
8647 if (cfg->gsharedvt) {
8648 if (mini_is_gsharedvt_variable_signature (sig))
8649 GSHAREDVT_FAILURE (*ip);
8653 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
8654 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
8655 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
8656 mono_class_vtable (cfg->domain, cmethod->klass);
8657 CHECK_TYPELOAD (cmethod->klass);
8659 vtable_arg = emit_get_rgctx_method (cfg, context_used,
8660 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8663 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
8664 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8666 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8668 CHECK_TYPELOAD (cmethod->klass);
8669 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8674 n = fsig->param_count;
8678 * Generate smaller code for the common newobj <exception> instruction in
8679 * argument checking code.
8681 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
8682 is_exception_class (cmethod->klass) && n <= 2 &&
8683 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
8684 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
8685 MonoInst *iargs [3];
8687 g_assert (!vtable_arg);
8691 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
8694 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
8698 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
8703 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
8706 g_assert_not_reached ();
8714 /* move the args to allow room for 'this' in the first position */
8720 /* check_call_signature () requires sp[0] to be set */
8721 this_ins.type = STACK_OBJ;
8723 if (check_call_signature (cfg, fsig, sp))
8728 if (mini_class_is_system_array (cmethod->klass)) {
8729 g_assert (!vtable_arg);
8731 *sp = emit_get_rgctx_method (cfg, context_used,
8732 cmethod, MONO_RGCTX_INFO_METHOD);
8734 /* Avoid varargs in the common case */
8735 if (fsig->param_count == 1)
8736 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
8737 else if (fsig->param_count == 2)
8738 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
8739 else if (fsig->param_count == 3)
8740 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
8741 else if (fsig->param_count == 4)
8742 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
8744 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
8745 } else if (cmethod->string_ctor) {
8746 g_assert (!context_used);
8747 g_assert (!vtable_arg);
8748 /* we simply pass a null pointer */
8749 EMIT_NEW_PCONST (cfg, *sp, NULL);
8750 /* now call the string ctor */
8751 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL, NULL);
8753 MonoInst* callvirt_this_arg = NULL;
8755 if (cmethod->klass->valuetype) {
8756 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
8757 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
8758 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
8763 * The code generated by mini_emit_virtual_call () expects
8764 * iargs [0] to be a boxed instance, but luckily the vcall
8765 * will be transformed into a normal call there.
8767 } else if (context_used) {
8768 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
8771 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8773 CHECK_TYPELOAD (cmethod->klass);
8776 * TypeInitializationExceptions thrown from the mono_runtime_class_init
8777 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
8778 * As a workaround, we call class cctors before allocating objects.
8780 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8781 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8782 if (cfg->verbose_level > 2)
8783 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
8784 class_inits = g_slist_prepend (class_inits, vtable);
8787 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
8790 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
8793 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
8795 /* Now call the actual ctor */
8796 /* Avoid virtual calls to ctors if possible */
8797 if (cmethod->klass->marshalbyref)
8798 callvirt_this_arg = sp [0];
8801 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
8802 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8803 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8808 CHECK_CFG_EXCEPTION;
8809 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
8810 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
8811 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
8812 !g_list_find (dont_inline, cmethod)) {
8815 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
8816 cfg->real_offset += 5;
8819 inline_costs += costs - 5;
8821 INLINE_FAILURE ("inline failure");
8822 // FIXME-VT: Clean this up
8823 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
8824 GSHAREDVT_FAILURE(*ip);
8825 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL, NULL);
8827 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
8830 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
8831 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
8832 } else if (context_used &&
8833 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
8834 !mono_class_generic_sharing_enabled (cmethod->klass))) {
8835 MonoInst *cmethod_addr;
8837 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
8838 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8840 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
8842 INLINE_FAILURE ("ctor call");
8843 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp,
8844 callvirt_this_arg, NULL, vtable_arg);
8848 if (alloc == NULL) {
8850 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
8851 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
8865 token = read32 (ip + 1);
8866 klass = mini_get_class (method, token, generic_context);
8867 CHECK_TYPELOAD (klass);
8868 if (sp [0]->type != STACK_OBJ)
8871 context_used = mini_class_check_context_used (cfg, klass);
8873 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
8874 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
8881 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8884 if (cfg->compile_aot)
8885 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
8887 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8889 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
8890 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
8893 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8894 MonoMethod *mono_castclass;
8895 MonoInst *iargs [1];
8898 mono_castclass = mono_marshal_get_castclass (klass);
8901 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
8902 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8903 CHECK_CFG_EXCEPTION;
8904 g_assert (costs > 0);
8907 cfg->real_offset += 5;
8912 inline_costs += costs;
8915 ins = handle_castclass (cfg, klass, *sp, context_used);
8916 CHECK_CFG_EXCEPTION;
8926 token = read32 (ip + 1);
8927 klass = mini_get_class (method, token, generic_context);
8928 CHECK_TYPELOAD (klass);
8929 if (sp [0]->type != STACK_OBJ)
8932 context_used = mini_class_check_context_used (cfg, klass);
8934 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
8935 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
8942 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8945 if (cfg->compile_aot)
8946 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
8948 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8950 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
8953 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8954 MonoMethod *mono_isinst;
8955 MonoInst *iargs [1];
8958 mono_isinst = mono_marshal_get_isinst (klass);
8961 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
8962 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8963 CHECK_CFG_EXCEPTION;
8964 g_assert (costs > 0);
8967 cfg->real_offset += 5;
8972 inline_costs += costs;
8975 ins = handle_isinst (cfg, klass, *sp, context_used);
8976 CHECK_CFG_EXCEPTION;
8983 case CEE_UNBOX_ANY: {
8987 token = read32 (ip + 1);
8988 klass = mini_get_class (method, token, generic_context);
8989 CHECK_TYPELOAD (klass);
8991 mono_save_token_info (cfg, image, token, klass);
8993 context_used = mini_class_check_context_used (cfg, klass);
8995 if (mini_is_gsharedvt_klass (cfg, klass)) {
8996 MonoInst *obj, *addr, *klass_inst, *args[16];
8999 /* Need to check for nullable types at runtime, but those are disabled in mini_is_gsharedvt_sharable_method*/
9000 if (mono_class_is_nullable (klass))
9001 GSHAREDVT_FAILURE (*ip);
9005 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
9011 args [1] = klass_inst;
9014 obj = mono_emit_jit_icall (cfg, mono_object_castclass, args);
9017 dreg = alloc_dreg (cfg, STACK_MP);
9018 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, dreg, obj->dreg, sizeof (MonoObject));
9019 MONO_ADD_INS (cfg->cbb, addr);
9022 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
9030 if (generic_class_is_reference_type (cfg, klass)) {
9031 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
9032 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9033 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
9040 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9043 /*FIXME AOT support*/
9044 if (cfg->compile_aot)
9045 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9047 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9049 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
9050 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
9053 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9054 MonoMethod *mono_castclass;
9055 MonoInst *iargs [1];
9058 mono_castclass = mono_marshal_get_castclass (klass);
9061 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
9062 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9063 CHECK_CFG_EXCEPTION;
9064 g_assert (costs > 0);
9067 cfg->real_offset += 5;
9071 inline_costs += costs;
9073 ins = handle_castclass (cfg, klass, *sp, context_used);
9074 CHECK_CFG_EXCEPTION;
9082 if (mono_class_is_nullable (klass)) {
9083 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
9090 ins = handle_unbox (cfg, klass, sp, context_used);
9096 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9109 token = read32 (ip + 1);
9110 klass = mini_get_class (method, token, generic_context);
9111 CHECK_TYPELOAD (klass);
9113 mono_save_token_info (cfg, image, token, klass);
9115 context_used = mini_class_check_context_used (cfg, klass);
9117 if (generic_class_is_reference_type (cfg, klass)) {
9123 if (klass == mono_defaults.void_class)
9125 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
9127 /* frequent check in generic code: box (struct), brtrue */
9129 // FIXME: LLVM can't handle the inconsistent bb linking
9130 if (!mono_class_is_nullable (klass) &&
9131 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
9132 (ip [5] == CEE_BRTRUE ||
9133 ip [5] == CEE_BRTRUE_S ||
9134 ip [5] == CEE_BRFALSE ||
9135 ip [5] == CEE_BRFALSE_S)) {
9136 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
9138 MonoBasicBlock *true_bb, *false_bb;
9142 if (cfg->verbose_level > 3) {
9143 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9144 printf ("<box+brtrue opt>\n");
9152 target = ip + 1 + (signed char)(*ip);
9159 target = ip + 4 + (gint)(read32 (ip));
9163 g_assert_not_reached ();
9167 * We need to link both bblocks, since it is needed for handling stack
9168 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
9169 * Branching to only one of them would lead to inconsistencies, so
9170 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
9172 GET_BBLOCK (cfg, true_bb, target);
9173 GET_BBLOCK (cfg, false_bb, ip);
9175 mono_link_bblock (cfg, cfg->cbb, true_bb);
9176 mono_link_bblock (cfg, cfg->cbb, false_bb);
9178 if (sp != stack_start) {
9179 handle_stack_args (cfg, stack_start, sp - stack_start);
9181 CHECK_UNVERIFIABLE (cfg);
9184 if (COMPILE_LLVM (cfg)) {
9185 dreg = alloc_ireg (cfg);
9186 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
9187 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
9189 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
9191 /* The JIT can't eliminate the iconst+compare */
9192 MONO_INST_NEW (cfg, ins, OP_BR);
9193 ins->inst_target_bb = is_true ? true_bb : false_bb;
9194 MONO_ADD_INS (cfg->cbb, ins);
9197 start_new_bblock = 1;
9201 *sp++ = handle_box (cfg, val, klass, context_used);
9204 CHECK_CFG_EXCEPTION;
9213 token = read32 (ip + 1);
9214 klass = mini_get_class (method, token, generic_context);
9215 CHECK_TYPELOAD (klass);
9217 mono_save_token_info (cfg, image, token, klass);
9219 context_used = mini_class_check_context_used (cfg, klass);
9221 if (mono_class_is_nullable (klass)) {
9224 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
9225 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
9229 ins = handle_unbox (cfg, klass, sp, context_used);
9242 MonoClassField *field;
9245 gboolean is_instance;
9247 gpointer addr = NULL;
9248 gboolean is_special_static;
9250 MonoInst *store_val = NULL;
9253 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
9255 if (op == CEE_STFLD) {
9263 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
9265 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
9268 if (op == CEE_STSFLD) {
9276 token = read32 (ip + 1);
9277 if (method->wrapper_type != MONO_WRAPPER_NONE) {
9278 field = mono_method_get_wrapper_data (method, token);
9279 klass = field->parent;
9282 field = mono_field_from_token (image, token, &klass, generic_context);
9286 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
9287 FIELD_ACCESS_FAILURE;
9288 mono_class_init (klass);
9290 if (is_instance && *ip != CEE_LDFLDA && is_magic_tls_access (field))
9293 /* if the class is Critical then transparent code cannot access it's fields */
9294 if (!is_instance && mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
9295 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
9297 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
9298 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
9299 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
9300 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
9304 * LDFLD etc. is usable on static fields as well, so convert those cases to
9307 if (is_instance && field->type->attrs & FIELD_ATTRIBUTE_STATIC) {
9319 g_assert_not_reached ();
9321 is_instance = FALSE;
9324 context_used = mini_class_check_context_used (cfg, klass);
9328 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
9329 if (op == CEE_STFLD) {
9330 if (target_type_is_incompatible (cfg, field->type, sp [1]))
9332 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
9333 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
9334 MonoInst *iargs [5];
9336 GSHAREDVT_FAILURE (op);
9339 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9340 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
9341 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
9345 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
9346 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
9347 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9348 CHECK_CFG_EXCEPTION;
9349 g_assert (costs > 0);
9351 cfg->real_offset += 5;
9354 inline_costs += costs;
9356 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
9361 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
9363 if (mini_is_gsharedvt_klass (cfg, klass)) {
9364 MonoInst *offset_ins;
9366 context_used = mini_class_check_context_used (cfg, klass);
9368 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9369 dreg = alloc_ireg_mp (cfg);
9370 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
9371 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
9372 // FIXME-VT: wbarriers ?
9374 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
9376 if (sp [0]->opcode != OP_LDADDR)
9377 store->flags |= MONO_INST_FAULT;
9379 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
9380 /* insert call to write barrier */
9384 dreg = alloc_ireg_mp (cfg);
9385 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
9386 emit_write_barrier (cfg, ptr, sp [1], -1);
9389 store->flags |= ins_flag;
9396 if (is_instance && ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class)) {
9397 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
9398 MonoInst *iargs [4];
9400 GSHAREDVT_FAILURE (op);
9403 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9404 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
9405 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
9406 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
9407 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
9408 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9409 CHECK_CFG_EXCEPTION;
9411 g_assert (costs > 0);
9413 cfg->real_offset += 5;
9417 inline_costs += costs;
9419 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
9422 } else if (is_instance) {
9423 if (sp [0]->type == STACK_VTYPE) {
9426 /* Have to compute the address of the variable */
9428 var = get_vreg_to_inst (cfg, sp [0]->dreg);
9430 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
9432 g_assert (var->klass == klass);
9434 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
9438 if (op == CEE_LDFLDA) {
9439 if (is_magic_tls_access (field)) {
9440 GSHAREDVT_FAILURE (*ip);
9442 *sp++ = create_magic_tls_access (cfg, field, &cached_tls_addr, ins);
9444 if (sp [0]->type == STACK_OBJ) {
9445 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
9446 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
9449 dreg = alloc_ireg_mp (cfg);
9451 if (mini_is_gsharedvt_klass (cfg, klass)) {
9452 MonoInst *offset_ins;
9454 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9455 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
9457 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
9459 ins->klass = mono_class_from_mono_type (field->type);
9460 ins->type = STACK_MP;
9466 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
9468 if (mini_is_gsharedvt_klass (cfg, klass)) {
9469 MonoInst *offset_ins;
9471 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9472 dreg = alloc_ireg_mp (cfg);
9473 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
9474 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
9476 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
9478 load->flags |= ins_flag;
9479 if (sp [0]->opcode != OP_LDADDR)
9480 load->flags |= MONO_INST_FAULT;
9494 * We can only support shared generic static
9495 * field access on architectures where the
9496 * trampoline code has been extended to handle
9497 * the generic class init.
9499 #ifndef MONO_ARCH_VTABLE_REG
9500 GENERIC_SHARING_FAILURE (op);
9503 context_used = mini_class_check_context_used (cfg, klass);
9505 ftype = mono_field_get_type (field);
9507 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
9510 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
9511 * to be called here.
9513 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
9514 mono_class_vtable (cfg->domain, klass);
9515 CHECK_TYPELOAD (klass);
9517 mono_domain_lock (cfg->domain);
9518 if (cfg->domain->special_static_fields)
9519 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
9520 mono_domain_unlock (cfg->domain);
9522 is_special_static = mono_class_field_is_special_static (field);
9524 /* Generate IR to compute the field address */
9525 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
9527 * Fast access to TLS data
9528 * Inline version of get_thread_static_data () in
9532 int idx, static_data_reg, array_reg, dreg;
9533 MonoInst *thread_ins;
9535 GSHAREDVT_FAILURE (op);
9537 // offset &= 0x7fffffff;
9538 // idx = (offset >> 24) - 1;
9539 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
9541 thread_ins = mono_get_thread_intrinsic (cfg);
9542 MONO_ADD_INS (cfg->cbb, thread_ins);
9543 static_data_reg = alloc_ireg (cfg);
9544 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
9546 if (cfg->compile_aot) {
9547 int offset_reg, offset2_reg, idx_reg;
9549 /* For TLS variables, this will return the TLS offset */
9550 EMIT_NEW_SFLDACONST (cfg, ins, field);
9551 offset_reg = ins->dreg;
9552 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
9553 idx_reg = alloc_ireg (cfg);
9554 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
9555 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
9556 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
9557 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
9558 array_reg = alloc_ireg (cfg);
9559 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
9560 offset2_reg = alloc_ireg (cfg);
9561 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
9562 dreg = alloc_ireg (cfg);
9563 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
9565 offset = (gsize)addr & 0x7fffffff;
9566 idx = (offset >> 24) - 1;
9568 array_reg = alloc_ireg (cfg);
9569 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
9570 dreg = alloc_ireg (cfg);
9571 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
9573 } else if ((cfg->opt & MONO_OPT_SHARED) ||
9574 (cfg->compile_aot && is_special_static) ||
9575 (context_used && is_special_static)) {
9576 MonoInst *iargs [2];
9578 g_assert (field->parent);
9579 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9581 iargs [1] = emit_get_rgctx_field (cfg, context_used,
9582 field, MONO_RGCTX_INFO_CLASS_FIELD);
9584 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
9586 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
9587 } else if (context_used) {
9588 MonoInst *static_data;
9591 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
9592 method->klass->name_space, method->klass->name, method->name,
9593 depth, field->offset);
9596 if (mono_class_needs_cctor_run (klass, method))
9597 emit_generic_class_init (cfg, klass);
9600 * The pointer we're computing here is
9602 * super_info.static_data + field->offset
9604 static_data = emit_get_rgctx_klass (cfg, context_used,
9605 klass, MONO_RGCTX_INFO_STATIC_DATA);
9607 if (mini_is_gsharedvt_klass (cfg, klass)) {
9608 MonoInst *offset_ins;
9610 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9611 dreg = alloc_ireg_mp (cfg);
9612 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
9613 } else if (field->offset == 0) {
9616 int addr_reg = mono_alloc_preg (cfg);
9617 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
9619 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
9620 MonoInst *iargs [2];
9622 g_assert (field->parent);
9623 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9624 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
9625 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
9627 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
9629 CHECK_TYPELOAD (klass);
9631 if (mini_field_access_needs_cctor_run (cfg, method, vtable)) {
9632 if (!(g_slist_find (class_inits, vtable))) {
9633 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
9634 if (cfg->verbose_level > 2)
9635 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
9636 class_inits = g_slist_prepend (class_inits, vtable);
9639 if (cfg->run_cctors) {
9641 /* This makes so that inline cannot trigger */
9642 /* .cctors: too many apps depend on them */
9643 /* running with a specific order... */
9644 if (! vtable->initialized)
9645 INLINE_FAILURE ("class init");
9646 ex = mono_runtime_class_init_full (vtable, FALSE);
9648 set_exception_object (cfg, ex);
9649 goto exception_exit;
9653 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
9655 if (cfg->compile_aot)
9656 EMIT_NEW_SFLDACONST (cfg, ins, field);
9658 EMIT_NEW_PCONST (cfg, ins, addr);
9660 MonoInst *iargs [1];
9661 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
9662 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
9666 /* Generate IR to do the actual load/store operation */
9668 if (op == CEE_LDSFLDA) {
9669 ins->klass = mono_class_from_mono_type (ftype);
9670 ins->type = STACK_PTR;
9672 } else if (op == CEE_STSFLD) {
9675 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
9676 store->flags |= ins_flag;
9678 gboolean is_const = FALSE;
9679 MonoVTable *vtable = NULL;
9680 gpointer addr = NULL;
9682 if (!context_used) {
9683 vtable = mono_class_vtable (cfg->domain, klass);
9684 CHECK_TYPELOAD (klass);
9686 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
9687 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
9688 int ro_type = ftype->type;
9690 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
9691 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
9692 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
9695 GSHAREDVT_FAILURE (op);
9697 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
9700 case MONO_TYPE_BOOLEAN:
9702 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
9706 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
9709 case MONO_TYPE_CHAR:
9711 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
9715 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
9720 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
9724 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
9730 case MONO_TYPE_FNPTR:
9731 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
9732 type_to_eval_stack_type ((cfg), field->type, *sp);
9735 case MONO_TYPE_STRING:
9736 case MONO_TYPE_OBJECT:
9737 case MONO_TYPE_CLASS:
9738 case MONO_TYPE_SZARRAY:
9739 case MONO_TYPE_ARRAY:
9740 if (!mono_gc_is_moving ()) {
9741 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
9742 type_to_eval_stack_type ((cfg), field->type, *sp);
9750 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
9755 case MONO_TYPE_VALUETYPE:
9765 CHECK_STACK_OVF (1);
9767 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
9768 load->flags |= ins_flag;
9781 token = read32 (ip + 1);
9782 klass = mini_get_class (method, token, generic_context);
9783 CHECK_TYPELOAD (klass);
9784 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
9785 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
9786 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
9787 generic_class_is_reference_type (cfg, klass)) {
9788 /* insert call to write barrier */
9789 emit_write_barrier (cfg, sp [0], sp [1], -1);
9801 const char *data_ptr;
9803 guint32 field_token;
9809 token = read32 (ip + 1);
9811 klass = mini_get_class (method, token, generic_context);
9812 CHECK_TYPELOAD (klass);
9814 context_used = mini_class_check_context_used (cfg, klass);
9816 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
9817 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
9818 ins->sreg1 = sp [0]->dreg;
9819 ins->type = STACK_I4;
9820 ins->dreg = alloc_ireg (cfg);
9821 MONO_ADD_INS (cfg->cbb, ins);
9822 *sp = mono_decompose_opcode (cfg, ins);
9827 MonoClass *array_class = mono_array_class_get (klass, 1);
9828 /* FIXME: we cannot get a managed
9829 allocator because we can't get the
9830 open generic class's vtable. We
9831 have the same problem in
9832 handle_alloc(). This
9833 needs to be solved so that we can
9834 have managed allocs of shared
9837 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
9838 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
9840 MonoMethod *managed_alloc = NULL;
9842 /* FIXME: Decompose later to help abcrem */
9845 args [0] = emit_get_rgctx_klass (cfg, context_used,
9846 array_class, MONO_RGCTX_INFO_VTABLE);
9851 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
9853 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
9855 if (cfg->opt & MONO_OPT_SHARED) {
9856 /* Decompose now to avoid problems with references to the domainvar */
9857 MonoInst *iargs [3];
9859 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9860 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9863 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
9865 /* Decompose later since it is needed by abcrem */
9866 MonoClass *array_type = mono_array_class_get (klass, 1);
9867 mono_class_vtable (cfg->domain, array_type);
9868 CHECK_TYPELOAD (array_type);
9870 MONO_INST_NEW (cfg, ins, OP_NEWARR);
9871 ins->dreg = alloc_ireg_ref (cfg);
9872 ins->sreg1 = sp [0]->dreg;
9873 ins->inst_newa_class = klass;
9874 ins->type = STACK_OBJ;
9875 ins->klass = array_type;
9876 MONO_ADD_INS (cfg->cbb, ins);
9877 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
9878 cfg->cbb->has_array_access = TRUE;
9880 /* Needed so mono_emit_load_get_addr () gets called */
9881 mono_get_got_var (cfg);
9891 * we inline/optimize the initialization sequence if possible.
9892 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
9893 * for small sizes open code the memcpy
9894 * ensure the rva field is big enough
9896 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
9897 MonoMethod *memcpy_method = get_memcpy_method ();
9898 MonoInst *iargs [3];
9899 int add_reg = alloc_ireg_mp (cfg);
9901 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
9902 if (cfg->compile_aot) {
9903 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
9905 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
9907 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
9908 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9917 if (sp [0]->type != STACK_OBJ)
9920 MONO_INST_NEW (cfg, ins, OP_LDLEN);
9921 ins->dreg = alloc_preg (cfg);
9922 ins->sreg1 = sp [0]->dreg;
9923 ins->type = STACK_I4;
9924 /* This flag will be inherited by the decomposition */
9925 ins->flags |= MONO_INST_FAULT;
9926 MONO_ADD_INS (cfg->cbb, ins);
9927 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
9928 cfg->cbb->has_array_access = TRUE;
9936 if (sp [0]->type != STACK_OBJ)
9939 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9941 klass = mini_get_class (method, read32 (ip + 1), generic_context);
9942 CHECK_TYPELOAD (klass);
9943 /* we need to make sure that this array is exactly the type it needs
9944 * to be for correctness. the wrappers are lax with their usage
9945 * so we need to ignore them here
9947 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
9948 MonoClass *array_class = mono_array_class_get (klass, 1);
9949 mini_emit_check_array_type (cfg, sp [0], array_class);
9950 CHECK_TYPELOAD (array_class);
9954 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9969 case CEE_LDELEM_REF: {
9975 if (*ip == CEE_LDELEM) {
9977 token = read32 (ip + 1);
9978 klass = mini_get_class (method, token, generic_context);
9979 CHECK_TYPELOAD (klass);
9980 mono_class_init (klass);
9983 klass = array_access_to_klass (*ip);
9985 if (sp [0]->type != STACK_OBJ)
9988 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9990 if (mini_is_gsharedvt_klass (cfg, klass)) {
9991 // FIXME-VT: OP_ICONST optimization
9992 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9993 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
9994 ins->opcode = OP_LOADV_MEMBASE;
9995 } else if (sp [1]->opcode == OP_ICONST) {
9996 int array_reg = sp [0]->dreg;
9997 int index_reg = sp [1]->dreg;
9998 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
10000 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
10001 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
10003 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10004 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10007 if (*ip == CEE_LDELEM)
10014 case CEE_STELEM_I1:
10015 case CEE_STELEM_I2:
10016 case CEE_STELEM_I4:
10017 case CEE_STELEM_I8:
10018 case CEE_STELEM_R4:
10019 case CEE_STELEM_R8:
10020 case CEE_STELEM_REF:
10025 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10027 if (*ip == CEE_STELEM) {
10029 token = read32 (ip + 1);
10030 klass = mini_get_class (method, token, generic_context);
10031 CHECK_TYPELOAD (klass);
10032 mono_class_init (klass);
10035 klass = array_access_to_klass (*ip);
10037 if (sp [0]->type != STACK_OBJ)
10040 emit_array_store (cfg, klass, sp, TRUE);
10042 if (*ip == CEE_STELEM)
10049 case CEE_CKFINITE: {
10053 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
10054 ins->sreg1 = sp [0]->dreg;
10055 ins->dreg = alloc_freg (cfg);
10056 ins->type = STACK_R8;
10057 MONO_ADD_INS (bblock, ins);
10059 *sp++ = mono_decompose_opcode (cfg, ins);
10064 case CEE_REFANYVAL: {
10065 MonoInst *src_var, *src;
10067 int klass_reg = alloc_preg (cfg);
10068 int dreg = alloc_preg (cfg);
10070 GSHAREDVT_FAILURE (*ip);
10073 MONO_INST_NEW (cfg, ins, *ip);
10076 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
10077 CHECK_TYPELOAD (klass);
10078 mono_class_init (klass);
10080 context_used = mini_class_check_context_used (cfg, klass);
10083 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10085 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
10086 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
10087 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
10089 if (context_used) {
10090 MonoInst *klass_ins;
10092 klass_ins = emit_get_rgctx_klass (cfg, context_used,
10093 klass, MONO_RGCTX_INFO_KLASS);
10096 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
10097 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
10099 mini_emit_class_check (cfg, klass_reg, klass);
10101 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
10102 ins->type = STACK_MP;
10107 case CEE_MKREFANY: {
10108 MonoInst *loc, *addr;
10110 GSHAREDVT_FAILURE (*ip);
10113 MONO_INST_NEW (cfg, ins, *ip);
10116 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
10117 CHECK_TYPELOAD (klass);
10118 mono_class_init (klass);
10120 context_used = mini_class_check_context_used (cfg, klass);
10122 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
10123 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
10125 if (context_used) {
10126 MonoInst *const_ins;
10127 int type_reg = alloc_preg (cfg);
10129 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
10130 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
10131 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
10132 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
10133 } else if (cfg->compile_aot) {
10134 int const_reg = alloc_preg (cfg);
10135 int type_reg = alloc_preg (cfg);
10137 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
10138 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
10139 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
10140 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
10142 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
10143 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
10145 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
10147 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
10148 ins->type = STACK_VTYPE;
10149 ins->klass = mono_defaults.typed_reference_class;
10154 case CEE_LDTOKEN: {
10156 MonoClass *handle_class;
10158 CHECK_STACK_OVF (1);
10161 n = read32 (ip + 1);
10163 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
10164 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
10165 handle = mono_method_get_wrapper_data (method, n);
10166 handle_class = mono_method_get_wrapper_data (method, n + 1);
10167 if (handle_class == mono_defaults.typehandle_class)
10168 handle = &((MonoClass*)handle)->byval_arg;
10171 handle = mono_ldtoken (image, n, &handle_class, generic_context);
10175 mono_class_init (handle_class);
10176 if (cfg->generic_sharing_context) {
10177 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
10178 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
10179 /* This case handles ldtoken
10180 of an open type, like for
10183 } else if (handle_class == mono_defaults.typehandle_class) {
10184 /* If we get a MONO_TYPE_CLASS
10185 then we need to provide the
10187 instantiation of it. */
10188 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
10191 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type (handle));
10192 } else if (handle_class == mono_defaults.fieldhandle_class)
10193 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
10194 else if (handle_class == mono_defaults.methodhandle_class)
10195 context_used = mini_method_check_context_used (cfg, handle);
10197 g_assert_not_reached ();
10200 if ((cfg->opt & MONO_OPT_SHARED) &&
10201 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
10202 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
10203 MonoInst *addr, *vtvar, *iargs [3];
10204 int method_context_used;
10206 method_context_used = mini_method_check_context_used (cfg, method);
10208 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
10210 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10211 EMIT_NEW_ICONST (cfg, iargs [1], n);
10212 if (method_context_used) {
10213 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
10214 method, MONO_RGCTX_INFO_METHOD);
10215 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
10217 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
10218 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
10220 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
10222 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
10224 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
10226 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
10227 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
10228 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
10229 (cmethod->klass == mono_defaults.monotype_class->parent) &&
10230 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
10231 MonoClass *tclass = mono_class_from_mono_type (handle);
10233 mono_class_init (tclass);
10234 if (context_used) {
10235 ins = emit_get_rgctx_klass (cfg, context_used,
10236 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
10237 } else if (cfg->compile_aot) {
10238 if (method->wrapper_type) {
10239 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
10240 /* Special case for static synchronized wrappers */
10241 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
10243 /* FIXME: n is not a normal token */
10244 cfg->disable_aot = TRUE;
10245 EMIT_NEW_PCONST (cfg, ins, NULL);
10248 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
10251 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
10253 ins->type = STACK_OBJ;
10254 ins->klass = cmethod->klass;
10257 MonoInst *addr, *vtvar;
10259 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
10261 if (context_used) {
10262 if (handle_class == mono_defaults.typehandle_class) {
10263 ins = emit_get_rgctx_klass (cfg, context_used,
10264 mono_class_from_mono_type (handle),
10265 MONO_RGCTX_INFO_TYPE);
10266 } else if (handle_class == mono_defaults.methodhandle_class) {
10267 ins = emit_get_rgctx_method (cfg, context_used,
10268 handle, MONO_RGCTX_INFO_METHOD);
10269 } else if (handle_class == mono_defaults.fieldhandle_class) {
10270 ins = emit_get_rgctx_field (cfg, context_used,
10271 handle, MONO_RGCTX_INFO_CLASS_FIELD);
10273 g_assert_not_reached ();
10275 } else if (cfg->compile_aot) {
10276 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
10278 EMIT_NEW_PCONST (cfg, ins, handle);
10280 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
10281 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
10282 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
10292 MONO_INST_NEW (cfg, ins, OP_THROW);
10294 ins->sreg1 = sp [0]->dreg;
10296 bblock->out_of_line = TRUE;
10297 MONO_ADD_INS (bblock, ins);
10298 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
10299 MONO_ADD_INS (bblock, ins);
10302 link_bblock (cfg, bblock, end_bblock);
10303 start_new_bblock = 1;
10305 case CEE_ENDFINALLY:
10306 /* mono_save_seq_point_info () depends on this */
10307 if (sp != stack_start)
10308 emit_seq_point (cfg, method, ip, FALSE);
10309 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
10310 MONO_ADD_INS (bblock, ins);
10312 start_new_bblock = 1;
10315 * Control will leave the method so empty the stack, otherwise
10316 * the next basic block will start with a nonempty stack.
10318 while (sp != stack_start) {
10323 case CEE_LEAVE_S: {
10326 if (*ip == CEE_LEAVE) {
10328 target = ip + 5 + (gint32)read32(ip + 1);
10331 target = ip + 2 + (signed char)(ip [1]);
10334 /* empty the stack */
10335 while (sp != stack_start) {
10340 * If this leave statement is in a catch block, check for a
10341 * pending exception, and rethrow it if necessary.
10342 * We avoid doing this in runtime invoke wrappers, since those are called
10343 * by native code which excepts the wrapper to catch all exceptions.
10345 for (i = 0; i < header->num_clauses; ++i) {
10346 MonoExceptionClause *clause = &header->clauses [i];
10349 * Use <= in the final comparison to handle clauses with multiple
10350 * leave statements, like in bug #78024.
10351 * The ordering of the exception clauses guarantees that we find the
10352 * innermost clause.
10354 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
10356 MonoBasicBlock *dont_throw;
10361 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
10364 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
10366 NEW_BBLOCK (cfg, dont_throw);
10369 * Currently, we always rethrow the abort exception, despite the
10370 * fact that this is not correct. See thread6.cs for an example.
10371 * But propagating the abort exception is more important than
10372 * getting the sematics right.
10374 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
10375 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
10376 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
10378 MONO_START_BB (cfg, dont_throw);
10383 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
10385 MonoExceptionClause *clause;
10387 for (tmp = handlers; tmp; tmp = tmp->next) {
10388 clause = tmp->data;
10389 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
10391 link_bblock (cfg, bblock, tblock);
10392 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
10393 ins->inst_target_bb = tblock;
10394 ins->inst_eh_block = clause;
10395 MONO_ADD_INS (bblock, ins);
10396 bblock->has_call_handler = 1;
10397 if (COMPILE_LLVM (cfg)) {
10398 MonoBasicBlock *target_bb;
10401 * Link the finally bblock with the target, since it will
10402 * conceptually branch there.
10403 * FIXME: Have to link the bblock containing the endfinally.
10405 GET_BBLOCK (cfg, target_bb, target);
10406 link_bblock (cfg, tblock, target_bb);
10409 g_list_free (handlers);
10412 MONO_INST_NEW (cfg, ins, OP_BR);
10413 MONO_ADD_INS (bblock, ins);
10414 GET_BBLOCK (cfg, tblock, target);
10415 link_bblock (cfg, bblock, tblock);
10416 ins->inst_target_bb = tblock;
10417 start_new_bblock = 1;
10419 if (*ip == CEE_LEAVE)
10428 * Mono specific opcodes
10430 case MONO_CUSTOM_PREFIX: {
10432 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10436 case CEE_MONO_ICALL: {
10438 MonoJitICallInfo *info;
10440 token = read32 (ip + 2);
10441 func = mono_method_get_wrapper_data (method, token);
10442 info = mono_find_jit_icall_by_addr (func);
10444 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
10447 CHECK_STACK (info->sig->param_count);
10448 sp -= info->sig->param_count;
10450 ins = mono_emit_jit_icall (cfg, info->func, sp);
10451 if (!MONO_TYPE_IS_VOID (info->sig->ret))
10455 inline_costs += 10 * num_calls++;
10459 case CEE_MONO_LDPTR: {
10462 CHECK_STACK_OVF (1);
10464 token = read32 (ip + 2);
10466 ptr = mono_method_get_wrapper_data (method, token);
10467 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
10468 MonoJitICallInfo *callinfo;
10469 const char *icall_name;
10471 icall_name = method->name + strlen ("__icall_wrapper_");
10472 g_assert (icall_name);
10473 callinfo = mono_find_jit_icall_by_name (icall_name);
10474 g_assert (callinfo);
10476 if (ptr == callinfo->func) {
10477 /* Will be transformed into an AOTCONST later */
10478 EMIT_NEW_PCONST (cfg, ins, ptr);
10484 /* FIXME: Generalize this */
10485 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
10486 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
10491 EMIT_NEW_PCONST (cfg, ins, ptr);
10494 inline_costs += 10 * num_calls++;
10495 /* Can't embed random pointers into AOT code */
10496 cfg->disable_aot = 1;
10499 case CEE_MONO_ICALL_ADDR: {
10500 MonoMethod *cmethod;
10503 CHECK_STACK_OVF (1);
10505 token = read32 (ip + 2);
10507 cmethod = mono_method_get_wrapper_data (method, token);
10509 if (cfg->compile_aot) {
10510 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
10512 ptr = mono_lookup_internal_call (cmethod);
10514 EMIT_NEW_PCONST (cfg, ins, ptr);
10520 case CEE_MONO_VTADDR: {
10521 MonoInst *src_var, *src;
10527 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10528 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
10533 case CEE_MONO_NEWOBJ: {
10534 MonoInst *iargs [2];
10536 CHECK_STACK_OVF (1);
10538 token = read32 (ip + 2);
10539 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
10540 mono_class_init (klass);
10541 NEW_DOMAINCONST (cfg, iargs [0]);
10542 MONO_ADD_INS (cfg->cbb, iargs [0]);
10543 NEW_CLASSCONST (cfg, iargs [1], klass);
10544 MONO_ADD_INS (cfg->cbb, iargs [1]);
10545 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
10547 inline_costs += 10 * num_calls++;
10550 case CEE_MONO_OBJADDR:
10553 MONO_INST_NEW (cfg, ins, OP_MOVE);
10554 ins->dreg = alloc_ireg_mp (cfg);
10555 ins->sreg1 = sp [0]->dreg;
10556 ins->type = STACK_MP;
10557 MONO_ADD_INS (cfg->cbb, ins);
10561 case CEE_MONO_LDNATIVEOBJ:
10563 * Similar to LDOBJ, but instead load the unmanaged
10564 * representation of the vtype to the stack.
10569 token = read32 (ip + 2);
10570 klass = mono_method_get_wrapper_data (method, token);
10571 g_assert (klass->valuetype);
10572 mono_class_init (klass);
10575 MonoInst *src, *dest, *temp;
10578 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
10579 temp->backend.is_pinvoke = 1;
10580 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
10581 mini_emit_stobj (cfg, dest, src, klass, TRUE);
10583 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
10584 dest->type = STACK_VTYPE;
10585 dest->klass = klass;
10591 case CEE_MONO_RETOBJ: {
10593 * Same as RET, but return the native representation of a vtype
10596 g_assert (cfg->ret);
10597 g_assert (mono_method_signature (method)->pinvoke);
10602 token = read32 (ip + 2);
10603 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
10605 if (!cfg->vret_addr) {
10606 g_assert (cfg->ret_var_is_local);
10608 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
10610 EMIT_NEW_RETLOADA (cfg, ins);
10612 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
10614 if (sp != stack_start)
10617 MONO_INST_NEW (cfg, ins, OP_BR);
10618 ins->inst_target_bb = end_bblock;
10619 MONO_ADD_INS (bblock, ins);
10620 link_bblock (cfg, bblock, end_bblock);
10621 start_new_bblock = 1;
10625 case CEE_MONO_CISINST:
10626 case CEE_MONO_CCASTCLASS: {
10631 token = read32 (ip + 2);
10632 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
10633 if (ip [1] == CEE_MONO_CISINST)
10634 ins = handle_cisinst (cfg, klass, sp [0]);
10636 ins = handle_ccastclass (cfg, klass, sp [0]);
10642 case CEE_MONO_SAVE_LMF:
10643 case CEE_MONO_RESTORE_LMF:
10644 #ifdef MONO_ARCH_HAVE_LMF_OPS
10645 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
10646 MONO_ADD_INS (bblock, ins);
10647 cfg->need_lmf_area = TRUE;
10651 case CEE_MONO_CLASSCONST:
10652 CHECK_STACK_OVF (1);
10654 token = read32 (ip + 2);
10655 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
10658 inline_costs += 10 * num_calls++;
10660 case CEE_MONO_NOT_TAKEN:
10661 bblock->out_of_line = TRUE;
10665 CHECK_STACK_OVF (1);
10667 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
10668 ins->dreg = alloc_preg (cfg);
10669 ins->inst_offset = (gint32)read32 (ip + 2);
10670 ins->type = STACK_PTR;
10671 MONO_ADD_INS (bblock, ins);
10675 case CEE_MONO_DYN_CALL: {
10676 MonoCallInst *call;
10678 /* It would be easier to call a trampoline, but that would put an
10679 * extra frame on the stack, confusing exception handling. So
10680 * implement it inline using an opcode for now.
10683 if (!cfg->dyn_call_var) {
10684 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
10685 /* prevent it from being register allocated */
10686 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
10689 /* Has to use a call inst since it local regalloc expects it */
10690 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
10691 ins = (MonoInst*)call;
10693 ins->sreg1 = sp [0]->dreg;
10694 ins->sreg2 = sp [1]->dreg;
10695 MONO_ADD_INS (bblock, ins);
10697 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
10698 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
10702 inline_costs += 10 * num_calls++;
10706 case CEE_MONO_MEMORY_BARRIER: {
10708 emit_memory_barrier (cfg, (int)read32 (ip + 1));
10712 case CEE_MONO_JIT_ATTACH: {
10713 MonoInst *args [16];
10714 MonoInst *ad_ins, *lmf_ins;
10715 MonoBasicBlock *next_bb = NULL;
10717 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
10719 EMIT_NEW_PCONST (cfg, ins, NULL);
10720 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
10726 ad_ins = mono_get_domain_intrinsic (cfg);
10727 lmf_ins = mono_get_lmf_intrinsic (cfg);
10730 #ifdef MONO_ARCH_HAVE_TLS_GET
10731 if (MONO_ARCH_HAVE_TLS_GET && ad_ins && lmf_ins) {
10732 NEW_BBLOCK (cfg, next_bb);
10734 MONO_ADD_INS (cfg->cbb, ad_ins);
10735 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ad_ins->dreg, 0);
10736 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
10738 MONO_ADD_INS (cfg->cbb, lmf_ins);
10739 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, lmf_ins->dreg, 0);
10740 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
10744 if (cfg->compile_aot) {
10745 /* AOT code is only used in the root domain */
10746 EMIT_NEW_PCONST (cfg, args [0], NULL);
10748 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
10750 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
10751 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
10754 MONO_START_BB (cfg, next_bb);
10760 case CEE_MONO_JIT_DETACH: {
10761 MonoInst *args [16];
10763 /* Restore the original domain */
10764 dreg = alloc_ireg (cfg);
10765 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
10766 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
10771 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
10777 case CEE_PREFIX1: {
10780 case CEE_ARGLIST: {
10781 /* somewhat similar to LDTOKEN */
10782 MonoInst *addr, *vtvar;
10783 CHECK_STACK_OVF (1);
10784 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
10786 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
10787 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
10789 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
10790 ins->type = STACK_VTYPE;
10791 ins->klass = mono_defaults.argumenthandle_class;
10804 * The following transforms:
10805 * CEE_CEQ into OP_CEQ
10806 * CEE_CGT into OP_CGT
10807 * CEE_CGT_UN into OP_CGT_UN
10808 * CEE_CLT into OP_CLT
10809 * CEE_CLT_UN into OP_CLT_UN
10811 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
10813 MONO_INST_NEW (cfg, ins, cmp->opcode);
10815 cmp->sreg1 = sp [0]->dreg;
10816 cmp->sreg2 = sp [1]->dreg;
10817 type_from_op (cmp, sp [0], sp [1]);
10819 if ((sp [0]->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
10820 cmp->opcode = OP_LCOMPARE;
10821 else if (sp [0]->type == STACK_R8)
10822 cmp->opcode = OP_FCOMPARE;
10824 cmp->opcode = OP_ICOMPARE;
10825 MONO_ADD_INS (bblock, cmp);
10826 ins->type = STACK_I4;
10827 ins->dreg = alloc_dreg (cfg, ins->type);
10828 type_from_op (ins, sp [0], sp [1]);
10830 if (cmp->opcode == OP_FCOMPARE) {
10832 * The backends expect the fceq opcodes to do the
10835 cmp->opcode = OP_NOP;
10836 ins->sreg1 = cmp->sreg1;
10837 ins->sreg2 = cmp->sreg2;
10839 MONO_ADD_INS (bblock, ins);
10845 MonoInst *argconst;
10846 MonoMethod *cil_method;
10848 CHECK_STACK_OVF (1);
10850 n = read32 (ip + 2);
10851 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
10852 if (!cmethod || mono_loader_get_last_error ())
10854 mono_class_init (cmethod->klass);
10856 mono_save_token_info (cfg, image, n, cmethod);
10858 context_used = mini_method_check_context_used (cfg, cmethod);
10860 cil_method = cmethod;
10861 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
10862 METHOD_ACCESS_FAILURE;
10864 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
10865 if (check_linkdemand (cfg, method, cmethod))
10866 INLINE_FAILURE ("linkdemand");
10867 CHECK_CFG_EXCEPTION;
10868 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
10869 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
10873 * Optimize the common case of ldftn+delegate creation
10875 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
10876 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
10877 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
10878 MonoInst *target_ins;
10879 MonoMethod *invoke;
10880 int invoke_context_used;
10882 invoke = mono_get_delegate_invoke (ctor_method->klass);
10883 if (!invoke || !mono_method_signature (invoke))
10886 invoke_context_used = mini_method_check_context_used (cfg, invoke);
10888 target_ins = sp [-1];
10890 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
10891 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
10893 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
10894 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
10895 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
10896 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
10897 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
10901 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
10902 /* FIXME: SGEN support */
10903 if (invoke_context_used == 0) {
10905 if (cfg->verbose_level > 3)
10906 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10908 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
10909 CHECK_CFG_EXCEPTION;
10918 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
10919 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
10923 inline_costs += 10 * num_calls++;
10926 case CEE_LDVIRTFTN: {
10927 MonoInst *args [2];
10931 n = read32 (ip + 2);
10932 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
10933 if (!cmethod || mono_loader_get_last_error ())
10935 mono_class_init (cmethod->klass);
10937 context_used = mini_method_check_context_used (cfg, cmethod);
10939 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
10940 if (check_linkdemand (cfg, method, cmethod))
10941 INLINE_FAILURE ("linkdemand");
10942 CHECK_CFG_EXCEPTION;
10943 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
10944 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
10950 args [1] = emit_get_rgctx_method (cfg, context_used,
10951 cmethod, MONO_RGCTX_INFO_METHOD);
10954 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
10956 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
10959 inline_costs += 10 * num_calls++;
10963 CHECK_STACK_OVF (1);
10965 n = read16 (ip + 2);
10967 EMIT_NEW_ARGLOAD (cfg, ins, n);
10972 CHECK_STACK_OVF (1);
10974 n = read16 (ip + 2);
10976 NEW_ARGLOADA (cfg, ins, n);
10977 MONO_ADD_INS (cfg->cbb, ins);
10985 n = read16 (ip + 2);
10987 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
10989 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
10993 CHECK_STACK_OVF (1);
10995 n = read16 (ip + 2);
10997 EMIT_NEW_LOCLOAD (cfg, ins, n);
11002 unsigned char *tmp_ip;
11003 CHECK_STACK_OVF (1);
11005 n = read16 (ip + 2);
11008 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
11014 EMIT_NEW_LOCLOADA (cfg, ins, n);
11023 n = read16 (ip + 2);
11025 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
11027 emit_stloc_ir (cfg, sp, header, n);
11034 if (sp != stack_start)
11036 if (cfg->method != method)
11038 * Inlining this into a loop in a parent could lead to
11039 * stack overflows which is different behavior than the
11040 * non-inlined case, thus disable inlining in this case.
11042 goto inline_failure;
11044 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
11045 ins->dreg = alloc_preg (cfg);
11046 ins->sreg1 = sp [0]->dreg;
11047 ins->type = STACK_PTR;
11048 MONO_ADD_INS (cfg->cbb, ins);
11050 cfg->flags |= MONO_CFG_HAS_ALLOCA;
11052 ins->flags |= MONO_INST_INIT;
11057 case CEE_ENDFILTER: {
11058 MonoExceptionClause *clause, *nearest;
11059 int cc, nearest_num;
11063 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
11065 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
11066 ins->sreg1 = (*sp)->dreg;
11067 MONO_ADD_INS (bblock, ins);
11068 start_new_bblock = 1;
11073 for (cc = 0; cc < header->num_clauses; ++cc) {
11074 clause = &header->clauses [cc];
11075 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
11076 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
11077 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
11082 g_assert (nearest);
11083 if ((ip - header->code) != nearest->handler_offset)
11088 case CEE_UNALIGNED_:
11089 ins_flag |= MONO_INST_UNALIGNED;
11090 /* FIXME: record alignment? we can assume 1 for now */
11094 case CEE_VOLATILE_:
11095 ins_flag |= MONO_INST_VOLATILE;
11099 ins_flag |= MONO_INST_TAILCALL;
11100 cfg->flags |= MONO_CFG_HAS_TAIL;
11101 /* Can't inline tail calls at this time */
11102 inline_costs += 100000;
11109 token = read32 (ip + 2);
11110 klass = mini_get_class (method, token, generic_context);
11111 CHECK_TYPELOAD (klass);
11112 if (generic_class_is_reference_type (cfg, klass))
11113 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
11115 mini_emit_initobj (cfg, *sp, NULL, klass);
11119 case CEE_CONSTRAINED_:
11121 token = read32 (ip + 2);
11122 constrained_call = mini_get_class (method, token, generic_context);
11123 CHECK_TYPELOAD (constrained_call);
11127 case CEE_INITBLK: {
11128 MonoInst *iargs [3];
11132 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
11133 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
11134 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
11135 /* emit_memset only works when val == 0 */
11136 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
11138 iargs [0] = sp [0];
11139 iargs [1] = sp [1];
11140 iargs [2] = sp [2];
11141 if (ip [1] == CEE_CPBLK) {
11142 MonoMethod *memcpy_method = get_memcpy_method ();
11143 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11145 MonoMethod *memset_method = get_memset_method ();
11146 mono_emit_method_call (cfg, memset_method, iargs, NULL);
11156 ins_flag |= MONO_INST_NOTYPECHECK;
11158 ins_flag |= MONO_INST_NORANGECHECK;
11159 /* we ignore the no-nullcheck for now since we
11160 * really do it explicitly only when doing callvirt->call
11164 case CEE_RETHROW: {
11166 int handler_offset = -1;
11168 for (i = 0; i < header->num_clauses; ++i) {
11169 MonoExceptionClause *clause = &header->clauses [i];
11170 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
11171 handler_offset = clause->handler_offset;
11176 bblock->flags |= BB_EXCEPTION_UNSAFE;
11178 g_assert (handler_offset != -1);
11180 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
11181 MONO_INST_NEW (cfg, ins, OP_RETHROW);
11182 ins->sreg1 = load->dreg;
11183 MONO_ADD_INS (bblock, ins);
11185 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11186 MONO_ADD_INS (bblock, ins);
11189 link_bblock (cfg, bblock, end_bblock);
11190 start_new_bblock = 1;
11198 GSHAREDVT_FAILURE (*ip);
11200 CHECK_STACK_OVF (1);
11202 token = read32 (ip + 2);
11203 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic && !generic_context) {
11204 MonoType *type = mono_type_create_from_typespec (image, token);
11205 val = mono_type_size (type, &ialign);
11207 MonoClass *klass = mono_class_get_full (image, token, generic_context);
11208 CHECK_TYPELOAD (klass);
11209 mono_class_init (klass);
11210 val = mono_type_size (&klass->byval_arg, &ialign);
11212 EMIT_NEW_ICONST (cfg, ins, val);
11217 case CEE_REFANYTYPE: {
11218 MonoInst *src_var, *src;
11220 GSHAREDVT_FAILURE (*ip);
11226 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11228 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11229 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11230 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
11235 case CEE_READONLY_:
11248 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
11258 g_warning ("opcode 0x%02x not handled", *ip);
11262 if (start_new_bblock != 1)
11265 bblock->cil_length = ip - bblock->cil_code;
11266 if (bblock->next_bb) {
11267 /* This could already be set because of inlining, #693905 */
11268 MonoBasicBlock *bb = bblock;
11270 while (bb->next_bb)
11272 bb->next_bb = end_bblock;
11274 bblock->next_bb = end_bblock;
11277 if (cfg->method == method && cfg->domainvar) {
11279 MonoInst *get_domain;
11281 cfg->cbb = init_localsbb;
11283 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
11284 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
11287 get_domain->dreg = alloc_preg (cfg);
11288 MONO_ADD_INS (cfg->cbb, get_domain);
11290 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
11291 MONO_ADD_INS (cfg->cbb, store);
11294 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
11295 if (cfg->compile_aot)
11296 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
11297 mono_get_got_var (cfg);
11300 if (cfg->method == method && cfg->got_var)
11301 mono_emit_load_got_addr (cfg);
11306 cfg->cbb = init_localsbb;
11308 for (i = 0; i < header->num_locals; ++i) {
11309 MonoType *ptype = header->locals [i];
11310 int t = ptype->type;
11311 dreg = cfg->locals [i]->dreg;
11313 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
11314 t = mono_class_enum_basetype (ptype->data.klass)->type;
11315 if (ptype->byref) {
11316 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
11317 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
11318 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
11319 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
11320 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
11321 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
11322 MONO_INST_NEW (cfg, ins, OP_R8CONST);
11323 ins->type = STACK_R8;
11324 ins->inst_p0 = (void*)&r8_0;
11325 ins->dreg = alloc_dreg (cfg, STACK_R8);
11326 MONO_ADD_INS (init_localsbb, ins);
11327 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
11328 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
11329 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
11330 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
11331 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, ptype)) {
11332 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
11334 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
11339 if (cfg->init_ref_vars && cfg->method == method) {
11340 /* Emit initialization for ref vars */
11341 // FIXME: Avoid duplication initialization for IL locals.
11342 for (i = 0; i < cfg->num_varinfo; ++i) {
11343 MonoInst *ins = cfg->varinfo [i];
11345 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
11346 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
11351 MonoBasicBlock *bb;
11354 * Make seq points at backward branch targets interruptable.
11356 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
11357 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
11358 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
11361 /* Add a sequence point for method entry/exit events */
11363 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
11364 MONO_ADD_INS (init_localsbb, ins);
11365 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
11366 MONO_ADD_INS (cfg->bb_exit, ins);
11371 if (cfg->method == method) {
11372 MonoBasicBlock *bb;
11373 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
11374 bb->region = mono_find_block_region (cfg, bb->real_offset);
11376 mono_create_spvar_for_region (cfg, bb->region);
11377 if (cfg->verbose_level > 2)
11378 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
11382 g_slist_free (class_inits);
11383 dont_inline = g_list_remove (dont_inline, method);
11385 if (inline_costs < 0) {
11388 /* Method is too large */
11389 mname = mono_method_full_name (method, TRUE);
11390 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
11391 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
11393 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
11394 mono_basic_block_free (original_bb);
11398 if ((cfg->verbose_level > 2) && (cfg->method == method))
11399 mono_print_code (cfg, "AFTER METHOD-TO-IR");
11401 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
11402 mono_basic_block_free (original_bb);
11403 return inline_costs;
11406 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
11413 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
11417 set_exception_type_from_invalid_il (cfg, method, ip);
11421 g_slist_free (class_inits);
11422 mono_basic_block_free (original_bb);
11423 dont_inline = g_list_remove (dont_inline, method);
11424 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
11429 store_membase_reg_to_store_membase_imm (int opcode)
11432 case OP_STORE_MEMBASE_REG:
11433 return OP_STORE_MEMBASE_IMM;
11434 case OP_STOREI1_MEMBASE_REG:
11435 return OP_STOREI1_MEMBASE_IMM;
11436 case OP_STOREI2_MEMBASE_REG:
11437 return OP_STOREI2_MEMBASE_IMM;
11438 case OP_STOREI4_MEMBASE_REG:
11439 return OP_STOREI4_MEMBASE_IMM;
11440 case OP_STOREI8_MEMBASE_REG:
11441 return OP_STOREI8_MEMBASE_IMM;
11443 g_assert_not_reached ();
11450 mono_op_to_op_imm (int opcode)
11454 return OP_IADD_IMM;
11456 return OP_ISUB_IMM;
11458 return OP_IDIV_IMM;
11460 return OP_IDIV_UN_IMM;
11462 return OP_IREM_IMM;
11464 return OP_IREM_UN_IMM;
11466 return OP_IMUL_IMM;
11468 return OP_IAND_IMM;
11472 return OP_IXOR_IMM;
11474 return OP_ISHL_IMM;
11476 return OP_ISHR_IMM;
11478 return OP_ISHR_UN_IMM;
11481 return OP_LADD_IMM;
11483 return OP_LSUB_IMM;
11485 return OP_LAND_IMM;
11489 return OP_LXOR_IMM;
11491 return OP_LSHL_IMM;
11493 return OP_LSHR_IMM;
11495 return OP_LSHR_UN_IMM;
11498 return OP_COMPARE_IMM;
11500 return OP_ICOMPARE_IMM;
11502 return OP_LCOMPARE_IMM;
11504 case OP_STORE_MEMBASE_REG:
11505 return OP_STORE_MEMBASE_IMM;
11506 case OP_STOREI1_MEMBASE_REG:
11507 return OP_STOREI1_MEMBASE_IMM;
11508 case OP_STOREI2_MEMBASE_REG:
11509 return OP_STOREI2_MEMBASE_IMM;
11510 case OP_STOREI4_MEMBASE_REG:
11511 return OP_STOREI4_MEMBASE_IMM;
11513 #if defined(TARGET_X86) || defined (TARGET_AMD64)
11515 return OP_X86_PUSH_IMM;
11516 case OP_X86_COMPARE_MEMBASE_REG:
11517 return OP_X86_COMPARE_MEMBASE_IMM;
11519 #if defined(TARGET_AMD64)
11520 case OP_AMD64_ICOMPARE_MEMBASE_REG:
11521 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
11523 case OP_VOIDCALL_REG:
11524 return OP_VOIDCALL;
11532 return OP_LOCALLOC_IMM;
11539 ldind_to_load_membase (int opcode)
11543 return OP_LOADI1_MEMBASE;
11545 return OP_LOADU1_MEMBASE;
11547 return OP_LOADI2_MEMBASE;
11549 return OP_LOADU2_MEMBASE;
11551 return OP_LOADI4_MEMBASE;
11553 return OP_LOADU4_MEMBASE;
11555 return OP_LOAD_MEMBASE;
11556 case CEE_LDIND_REF:
11557 return OP_LOAD_MEMBASE;
11559 return OP_LOADI8_MEMBASE;
11561 return OP_LOADR4_MEMBASE;
11563 return OP_LOADR8_MEMBASE;
11565 g_assert_not_reached ();
11572 stind_to_store_membase (int opcode)
11576 return OP_STOREI1_MEMBASE_REG;
11578 return OP_STOREI2_MEMBASE_REG;
11580 return OP_STOREI4_MEMBASE_REG;
11582 case CEE_STIND_REF:
11583 return OP_STORE_MEMBASE_REG;
11585 return OP_STOREI8_MEMBASE_REG;
11587 return OP_STORER4_MEMBASE_REG;
11589 return OP_STORER8_MEMBASE_REG;
11591 g_assert_not_reached ();
11598 mono_load_membase_to_load_mem (int opcode)
11600 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
11601 #if defined(TARGET_X86) || defined(TARGET_AMD64)
11603 case OP_LOAD_MEMBASE:
11604 return OP_LOAD_MEM;
11605 case OP_LOADU1_MEMBASE:
11606 return OP_LOADU1_MEM;
11607 case OP_LOADU2_MEMBASE:
11608 return OP_LOADU2_MEM;
11609 case OP_LOADI4_MEMBASE:
11610 return OP_LOADI4_MEM;
11611 case OP_LOADU4_MEMBASE:
11612 return OP_LOADU4_MEM;
11613 #if SIZEOF_REGISTER == 8
11614 case OP_LOADI8_MEMBASE:
11615 return OP_LOADI8_MEM;
11624 op_to_op_dest_membase (int store_opcode, int opcode)
11626 #if defined(TARGET_X86)
11627 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
11632 return OP_X86_ADD_MEMBASE_REG;
11634 return OP_X86_SUB_MEMBASE_REG;
11636 return OP_X86_AND_MEMBASE_REG;
11638 return OP_X86_OR_MEMBASE_REG;
11640 return OP_X86_XOR_MEMBASE_REG;
11643 return OP_X86_ADD_MEMBASE_IMM;
11646 return OP_X86_SUB_MEMBASE_IMM;
11649 return OP_X86_AND_MEMBASE_IMM;
11652 return OP_X86_OR_MEMBASE_IMM;
11655 return OP_X86_XOR_MEMBASE_IMM;
11661 #if defined(TARGET_AMD64)
11662 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
11667 return OP_X86_ADD_MEMBASE_REG;
11669 return OP_X86_SUB_MEMBASE_REG;
11671 return OP_X86_AND_MEMBASE_REG;
11673 return OP_X86_OR_MEMBASE_REG;
11675 return OP_X86_XOR_MEMBASE_REG;
11677 return OP_X86_ADD_MEMBASE_IMM;
11679 return OP_X86_SUB_MEMBASE_IMM;
11681 return OP_X86_AND_MEMBASE_IMM;
11683 return OP_X86_OR_MEMBASE_IMM;
11685 return OP_X86_XOR_MEMBASE_IMM;
11687 return OP_AMD64_ADD_MEMBASE_REG;
11689 return OP_AMD64_SUB_MEMBASE_REG;
11691 return OP_AMD64_AND_MEMBASE_REG;
11693 return OP_AMD64_OR_MEMBASE_REG;
11695 return OP_AMD64_XOR_MEMBASE_REG;
11698 return OP_AMD64_ADD_MEMBASE_IMM;
11701 return OP_AMD64_SUB_MEMBASE_IMM;
11704 return OP_AMD64_AND_MEMBASE_IMM;
11707 return OP_AMD64_OR_MEMBASE_IMM;
11710 return OP_AMD64_XOR_MEMBASE_IMM;
11720 op_to_op_store_membase (int store_opcode, int opcode)
11722 #if defined(TARGET_X86) || defined(TARGET_AMD64)
11725 if (store_opcode == OP_STOREI1_MEMBASE_REG)
11726 return OP_X86_SETEQ_MEMBASE;
11728 if (store_opcode == OP_STOREI1_MEMBASE_REG)
11729 return OP_X86_SETNE_MEMBASE;
11737 op_to_op_src1_membase (int load_opcode, int opcode)
11740 /* FIXME: This has sign extension issues */
11742 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
11743 return OP_X86_COMPARE_MEMBASE8_IMM;
11746 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
11751 return OP_X86_PUSH_MEMBASE;
11752 case OP_COMPARE_IMM:
11753 case OP_ICOMPARE_IMM:
11754 return OP_X86_COMPARE_MEMBASE_IMM;
11757 return OP_X86_COMPARE_MEMBASE_REG;
11761 #ifdef TARGET_AMD64
11762 /* FIXME: This has sign extension issues */
11764 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
11765 return OP_X86_COMPARE_MEMBASE8_IMM;
11770 #ifdef __mono_ilp32__
11771 if (load_opcode == OP_LOADI8_MEMBASE)
11773 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
11775 return OP_X86_PUSH_MEMBASE;
11777 /* FIXME: This only works for 32 bit immediates
11778 case OP_COMPARE_IMM:
11779 case OP_LCOMPARE_IMM:
11780 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
11781 return OP_AMD64_COMPARE_MEMBASE_IMM;
11783 case OP_ICOMPARE_IMM:
11784 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
11785 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
11789 #ifdef __mono_ilp32__
11790 if (load_opcode == OP_LOAD_MEMBASE)
11791 return OP_AMD64_ICOMPARE_MEMBASE_REG;
11792 if (load_opcode == OP_LOADI8_MEMBASE)
11794 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
11796 return OP_AMD64_COMPARE_MEMBASE_REG;
11799 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
11800 return OP_AMD64_ICOMPARE_MEMBASE_REG;
11809 op_to_op_src2_membase (int load_opcode, int opcode)
11812 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
11818 return OP_X86_COMPARE_REG_MEMBASE;
11820 return OP_X86_ADD_REG_MEMBASE;
11822 return OP_X86_SUB_REG_MEMBASE;
11824 return OP_X86_AND_REG_MEMBASE;
11826 return OP_X86_OR_REG_MEMBASE;
11828 return OP_X86_XOR_REG_MEMBASE;
11832 #ifdef TARGET_AMD64
11833 #ifdef __mono_ilp32__
11834 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
11836 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
11840 return OP_AMD64_ICOMPARE_REG_MEMBASE;
11842 return OP_X86_ADD_REG_MEMBASE;
11844 return OP_X86_SUB_REG_MEMBASE;
11846 return OP_X86_AND_REG_MEMBASE;
11848 return OP_X86_OR_REG_MEMBASE;
11850 return OP_X86_XOR_REG_MEMBASE;
11852 #ifdef __mono_ilp32__
11853 } else if (load_opcode == OP_LOADI8_MEMBASE) {
11855 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
11860 return OP_AMD64_COMPARE_REG_MEMBASE;
11862 return OP_AMD64_ADD_REG_MEMBASE;
11864 return OP_AMD64_SUB_REG_MEMBASE;
11866 return OP_AMD64_AND_REG_MEMBASE;
11868 return OP_AMD64_OR_REG_MEMBASE;
11870 return OP_AMD64_XOR_REG_MEMBASE;
11879 mono_op_to_op_imm_noemul (int opcode)
11882 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
11888 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
11895 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
11900 return mono_op_to_op_imm (opcode);
11905 * mono_handle_global_vregs:
11907 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
11911 mono_handle_global_vregs (MonoCompile *cfg)
11913 gint32 *vreg_to_bb;
11914 MonoBasicBlock *bb;
11917 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
11919 #ifdef MONO_ARCH_SIMD_INTRINSICS
11920 if (cfg->uses_simd_intrinsics)
11921 mono_simd_simplify_indirection (cfg);
11924 /* Find local vregs used in more than one bb */
11925 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
11926 MonoInst *ins = bb->code;
11927 int block_num = bb->block_num;
11929 if (cfg->verbose_level > 2)
11930 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
11933 for (; ins; ins = ins->next) {
11934 const char *spec = INS_INFO (ins->opcode);
11935 int regtype = 0, regindex;
11938 if (G_UNLIKELY (cfg->verbose_level > 2))
11939 mono_print_ins (ins);
11941 g_assert (ins->opcode >= MONO_CEE_LAST);
11943 for (regindex = 0; regindex < 4; regindex ++) {
11946 if (regindex == 0) {
11947 regtype = spec [MONO_INST_DEST];
11948 if (regtype == ' ')
11951 } else if (regindex == 1) {
11952 regtype = spec [MONO_INST_SRC1];
11953 if (regtype == ' ')
11956 } else if (regindex == 2) {
11957 regtype = spec [MONO_INST_SRC2];
11958 if (regtype == ' ')
11961 } else if (regindex == 3) {
11962 regtype = spec [MONO_INST_SRC3];
11963 if (regtype == ' ')
11968 #if SIZEOF_REGISTER == 4
11969 /* In the LLVM case, the long opcodes are not decomposed */
11970 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
11972 * Since some instructions reference the original long vreg,
11973 * and some reference the two component vregs, it is quite hard
11974 * to determine when it needs to be global. So be conservative.
11976 if (!get_vreg_to_inst (cfg, vreg)) {
11977 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
11979 if (cfg->verbose_level > 2)
11980 printf ("LONG VREG R%d made global.\n", vreg);
11984 * Make the component vregs volatile since the optimizations can
11985 * get confused otherwise.
11987 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
11988 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
11992 g_assert (vreg != -1);
11994 prev_bb = vreg_to_bb [vreg];
11995 if (prev_bb == 0) {
11996 /* 0 is a valid block num */
11997 vreg_to_bb [vreg] = block_num + 1;
11998 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
11999 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
12002 if (!get_vreg_to_inst (cfg, vreg)) {
12003 if (G_UNLIKELY (cfg->verbose_level > 2))
12004 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
12008 if (vreg_is_ref (cfg, vreg))
12009 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
12011 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
12014 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
12017 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
12020 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
12023 g_assert_not_reached ();
12027 /* Flag as having been used in more than one bb */
12028 vreg_to_bb [vreg] = -1;
12034 /* If a variable is used in only one bblock, convert it into a local vreg */
12035 for (i = 0; i < cfg->num_varinfo; i++) {
12036 MonoInst *var = cfg->varinfo [i];
12037 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
12039 switch (var->type) {
12045 #if SIZEOF_REGISTER == 8
12048 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
12049 /* Enabling this screws up the fp stack on x86 */
12052 /* Arguments are implicitly global */
12053 /* Putting R4 vars into registers doesn't work currently */
12054 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
12056 * Make that the variable's liveness interval doesn't contain a call, since
12057 * that would cause the lvreg to be spilled, making the whole optimization
12060 /* This is too slow for JIT compilation */
12062 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
12064 int def_index, call_index, ins_index;
12065 gboolean spilled = FALSE;
12070 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
12071 const char *spec = INS_INFO (ins->opcode);
12073 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
12074 def_index = ins_index;
12076 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
12077 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
12078 if (call_index > def_index) {
12084 if (MONO_IS_CALL (ins))
12085 call_index = ins_index;
12095 if (G_UNLIKELY (cfg->verbose_level > 2))
12096 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
12097 var->flags |= MONO_INST_IS_DEAD;
12098 cfg->vreg_to_inst [var->dreg] = NULL;
12105 * Compress the varinfo and vars tables so the liveness computation is faster and
12106 * takes up less space.
12109 for (i = 0; i < cfg->num_varinfo; ++i) {
12110 MonoInst *var = cfg->varinfo [i];
12111 if (pos < i && cfg->locals_start == i)
12112 cfg->locals_start = pos;
12113 if (!(var->flags & MONO_INST_IS_DEAD)) {
12115 cfg->varinfo [pos] = cfg->varinfo [i];
12116 cfg->varinfo [pos]->inst_c0 = pos;
12117 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
12118 cfg->vars [pos].idx = pos;
12119 #if SIZEOF_REGISTER == 4
12120 if (cfg->varinfo [pos]->type == STACK_I8) {
12121 /* Modify the two component vars too */
12124 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
12125 var1->inst_c0 = pos;
12126 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
12127 var1->inst_c0 = pos;
12134 cfg->num_varinfo = pos;
12135 if (cfg->locals_start > cfg->num_varinfo)
12136 cfg->locals_start = cfg->num_varinfo;
12140 * mono_spill_global_vars:
12142 * Generate spill code for variables which are not allocated to registers,
12143 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
12144 * code is generated which could be optimized by the local optimization passes.
12147 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
12149 MonoBasicBlock *bb;
12151 int orig_next_vreg;
12152 guint32 *vreg_to_lvreg;
12154 guint32 i, lvregs_len;
12155 gboolean dest_has_lvreg = FALSE;
12156 guint32 stacktypes [128];
12157 MonoInst **live_range_start, **live_range_end;
12158 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
12160 *need_local_opts = FALSE;
12162 memset (spec2, 0, sizeof (spec2));
12164 /* FIXME: Move this function to mini.c */
12165 stacktypes ['i'] = STACK_PTR;
12166 stacktypes ['l'] = STACK_I8;
12167 stacktypes ['f'] = STACK_R8;
12168 #ifdef MONO_ARCH_SIMD_INTRINSICS
12169 stacktypes ['x'] = STACK_VTYPE;
12172 #if SIZEOF_REGISTER == 4
12173 /* Create MonoInsts for longs */
12174 for (i = 0; i < cfg->num_varinfo; i++) {
12175 MonoInst *ins = cfg->varinfo [i];
12177 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
12178 switch (ins->type) {
12183 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
12186 g_assert (ins->opcode == OP_REGOFFSET);
12188 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
12190 tree->opcode = OP_REGOFFSET;
12191 tree->inst_basereg = ins->inst_basereg;
12192 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
12194 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
12196 tree->opcode = OP_REGOFFSET;
12197 tree->inst_basereg = ins->inst_basereg;
12198 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
12208 if (cfg->compute_gc_maps) {
12209 /* registers need liveness info even for !non refs */
12210 for (i = 0; i < cfg->num_varinfo; i++) {
12211 MonoInst *ins = cfg->varinfo [i];
12213 if (ins->opcode == OP_REGVAR)
12214 ins->flags |= MONO_INST_GC_TRACK;
12218 /* FIXME: widening and truncation */
12221 * As an optimization, when a variable allocated to the stack is first loaded into
12222 * an lvreg, we will remember the lvreg and use it the next time instead of loading
12223 * the variable again.
12225 orig_next_vreg = cfg->next_vreg;
12226 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
12227 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
12231 * These arrays contain the first and last instructions accessing a given
12233 * Since we emit bblocks in the same order we process them here, and we
12234 * don't split live ranges, these will precisely describe the live range of
12235 * the variable, i.e. the instruction range where a valid value can be found
12236 * in the variables location.
12237 * The live range is computed using the liveness info computed by the liveness pass.
12238 * We can't use vmv->range, since that is an abstract live range, and we need
12239 * one which is instruction precise.
12240 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
12242 /* FIXME: Only do this if debugging info is requested */
12243 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
12244 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
12245 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
12246 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
12248 /* Add spill loads/stores */
12249 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12252 if (cfg->verbose_level > 2)
12253 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
12255 /* Clear vreg_to_lvreg array */
12256 for (i = 0; i < lvregs_len; i++)
12257 vreg_to_lvreg [lvregs [i]] = 0;
12261 MONO_BB_FOR_EACH_INS (bb, ins) {
12262 const char *spec = INS_INFO (ins->opcode);
12263 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
12264 gboolean store, no_lvreg;
12265 int sregs [MONO_MAX_SRC_REGS];
12267 if (G_UNLIKELY (cfg->verbose_level > 2))
12268 mono_print_ins (ins);
12270 if (ins->opcode == OP_NOP)
12274 * We handle LDADDR here as well, since it can only be decomposed
12275 * when variable addresses are known.
12277 if (ins->opcode == OP_LDADDR) {
12278 MonoInst *var = ins->inst_p0;
12280 if (var->opcode == OP_VTARG_ADDR) {
12281 /* Happens on SPARC/S390 where vtypes are passed by reference */
12282 MonoInst *vtaddr = var->inst_left;
12283 if (vtaddr->opcode == OP_REGVAR) {
12284 ins->opcode = OP_MOVE;
12285 ins->sreg1 = vtaddr->dreg;
12287 else if (var->inst_left->opcode == OP_REGOFFSET) {
12288 ins->opcode = OP_LOAD_MEMBASE;
12289 ins->inst_basereg = vtaddr->inst_basereg;
12290 ins->inst_offset = vtaddr->inst_offset;
12294 g_assert (var->opcode == OP_REGOFFSET);
12296 ins->opcode = OP_ADD_IMM;
12297 ins->sreg1 = var->inst_basereg;
12298 ins->inst_imm = var->inst_offset;
12301 *need_local_opts = TRUE;
12302 spec = INS_INFO (ins->opcode);
12305 if (ins->opcode < MONO_CEE_LAST) {
12306 mono_print_ins (ins);
12307 g_assert_not_reached ();
12311 * Store opcodes have destbasereg in the dreg, but in reality, it is an
12315 if (MONO_IS_STORE_MEMBASE (ins)) {
12316 tmp_reg = ins->dreg;
12317 ins->dreg = ins->sreg2;
12318 ins->sreg2 = tmp_reg;
12321 spec2 [MONO_INST_DEST] = ' ';
12322 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
12323 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
12324 spec2 [MONO_INST_SRC3] = ' ';
12326 } else if (MONO_IS_STORE_MEMINDEX (ins))
12327 g_assert_not_reached ();
12332 if (G_UNLIKELY (cfg->verbose_level > 2)) {
12333 printf ("\t %.3s %d", spec, ins->dreg);
12334 num_sregs = mono_inst_get_src_registers (ins, sregs);
12335 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
12336 printf (" %d", sregs [srcindex]);
12343 regtype = spec [MONO_INST_DEST];
12344 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
12347 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
12348 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
12349 MonoInst *store_ins;
12351 MonoInst *def_ins = ins;
12352 int dreg = ins->dreg; /* The original vreg */
12354 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
12356 if (var->opcode == OP_REGVAR) {
12357 ins->dreg = var->dreg;
12358 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
12360 * Instead of emitting a load+store, use a _membase opcode.
12362 g_assert (var->opcode == OP_REGOFFSET);
12363 if (ins->opcode == OP_MOVE) {
12367 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
12368 ins->inst_basereg = var->inst_basereg;
12369 ins->inst_offset = var->inst_offset;
12372 spec = INS_INFO (ins->opcode);
12376 g_assert (var->opcode == OP_REGOFFSET);
12378 prev_dreg = ins->dreg;
12380 /* Invalidate any previous lvreg for this vreg */
12381 vreg_to_lvreg [ins->dreg] = 0;
12385 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
12387 store_opcode = OP_STOREI8_MEMBASE_REG;
12390 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
12392 if (regtype == 'l') {
12393 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
12394 mono_bblock_insert_after_ins (bb, ins, store_ins);
12395 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
12396 mono_bblock_insert_after_ins (bb, ins, store_ins);
12397 def_ins = store_ins;
12400 g_assert (store_opcode != OP_STOREV_MEMBASE);
12402 /* Try to fuse the store into the instruction itself */
12403 /* FIXME: Add more instructions */
12404 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
12405 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
12406 ins->inst_imm = ins->inst_c0;
12407 ins->inst_destbasereg = var->inst_basereg;
12408 ins->inst_offset = var->inst_offset;
12409 spec = INS_INFO (ins->opcode);
12410 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
12411 ins->opcode = store_opcode;
12412 ins->inst_destbasereg = var->inst_basereg;
12413 ins->inst_offset = var->inst_offset;
12417 tmp_reg = ins->dreg;
12418 ins->dreg = ins->sreg2;
12419 ins->sreg2 = tmp_reg;
12422 spec2 [MONO_INST_DEST] = ' ';
12423 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
12424 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
12425 spec2 [MONO_INST_SRC3] = ' ';
12427 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
12428 // FIXME: The backends expect the base reg to be in inst_basereg
12429 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
12431 ins->inst_basereg = var->inst_basereg;
12432 ins->inst_offset = var->inst_offset;
12433 spec = INS_INFO (ins->opcode);
12435 /* printf ("INS: "); mono_print_ins (ins); */
12436 /* Create a store instruction */
12437 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
12439 /* Insert it after the instruction */
12440 mono_bblock_insert_after_ins (bb, ins, store_ins);
12442 def_ins = store_ins;
12445 * We can't assign ins->dreg to var->dreg here, since the
12446 * sregs could use it. So set a flag, and do it after
12449 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
12450 dest_has_lvreg = TRUE;
12455 if (def_ins && !live_range_start [dreg]) {
12456 live_range_start [dreg] = def_ins;
12457 live_range_start_bb [dreg] = bb;
12460 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
12463 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
12464 tmp->inst_c1 = dreg;
12465 mono_bblock_insert_after_ins (bb, def_ins, tmp);
12472 num_sregs = mono_inst_get_src_registers (ins, sregs);
12473 for (srcindex = 0; srcindex < 3; ++srcindex) {
12474 regtype = spec [MONO_INST_SRC1 + srcindex];
12475 sreg = sregs [srcindex];
12477 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
12478 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
12479 MonoInst *var = get_vreg_to_inst (cfg, sreg);
12480 MonoInst *use_ins = ins;
12481 MonoInst *load_ins;
12482 guint32 load_opcode;
12484 if (var->opcode == OP_REGVAR) {
12485 sregs [srcindex] = var->dreg;
12486 //mono_inst_set_src_registers (ins, sregs);
12487 live_range_end [sreg] = use_ins;
12488 live_range_end_bb [sreg] = bb;
12490 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
12493 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
12494 /* var->dreg is a hreg */
12495 tmp->inst_c1 = sreg;
12496 mono_bblock_insert_after_ins (bb, ins, tmp);
12502 g_assert (var->opcode == OP_REGOFFSET);
12504 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
12506 g_assert (load_opcode != OP_LOADV_MEMBASE);
12508 if (vreg_to_lvreg [sreg]) {
12509 g_assert (vreg_to_lvreg [sreg] != -1);
12511 /* The variable is already loaded to an lvreg */
12512 if (G_UNLIKELY (cfg->verbose_level > 2))
12513 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
12514 sregs [srcindex] = vreg_to_lvreg [sreg];
12515 //mono_inst_set_src_registers (ins, sregs);
12519 /* Try to fuse the load into the instruction */
12520 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
12521 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
12522 sregs [0] = var->inst_basereg;
12523 //mono_inst_set_src_registers (ins, sregs);
12524 ins->inst_offset = var->inst_offset;
12525 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
12526 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
12527 sregs [1] = var->inst_basereg;
12528 //mono_inst_set_src_registers (ins, sregs);
12529 ins->inst_offset = var->inst_offset;
12531 if (MONO_IS_REAL_MOVE (ins)) {
12532 ins->opcode = OP_NOP;
12535 //printf ("%d ", srcindex); mono_print_ins (ins);
12537 sreg = alloc_dreg (cfg, stacktypes [regtype]);
12539 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
12540 if (var->dreg == prev_dreg) {
12542 * sreg refers to the value loaded by the load
12543 * emitted below, but we need to use ins->dreg
12544 * since it refers to the store emitted earlier.
12548 g_assert (sreg != -1);
12549 vreg_to_lvreg [var->dreg] = sreg;
12550 g_assert (lvregs_len < 1024);
12551 lvregs [lvregs_len ++] = var->dreg;
12555 sregs [srcindex] = sreg;
12556 //mono_inst_set_src_registers (ins, sregs);
12558 if (regtype == 'l') {
12559 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
12560 mono_bblock_insert_before_ins (bb, ins, load_ins);
12561 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
12562 mono_bblock_insert_before_ins (bb, ins, load_ins);
12563 use_ins = load_ins;
12566 #if SIZEOF_REGISTER == 4
12567 g_assert (load_opcode != OP_LOADI8_MEMBASE);
12569 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
12570 mono_bblock_insert_before_ins (bb, ins, load_ins);
12571 use_ins = load_ins;
12575 if (var->dreg < orig_next_vreg) {
12576 live_range_end [var->dreg] = use_ins;
12577 live_range_end_bb [var->dreg] = bb;
12580 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
12583 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
12584 tmp->inst_c1 = var->dreg;
12585 mono_bblock_insert_after_ins (bb, ins, tmp);
12589 mono_inst_set_src_registers (ins, sregs);
12591 if (dest_has_lvreg) {
12592 g_assert (ins->dreg != -1);
12593 vreg_to_lvreg [prev_dreg] = ins->dreg;
12594 g_assert (lvregs_len < 1024);
12595 lvregs [lvregs_len ++] = prev_dreg;
12596 dest_has_lvreg = FALSE;
12600 tmp_reg = ins->dreg;
12601 ins->dreg = ins->sreg2;
12602 ins->sreg2 = tmp_reg;
12605 if (MONO_IS_CALL (ins)) {
12606 /* Clear vreg_to_lvreg array */
12607 for (i = 0; i < lvregs_len; i++)
12608 vreg_to_lvreg [lvregs [i]] = 0;
12610 } else if (ins->opcode == OP_NOP) {
12612 MONO_INST_NULLIFY_SREGS (ins);
12615 if (cfg->verbose_level > 2)
12616 mono_print_ins_index (1, ins);
12619 /* Extend the live range based on the liveness info */
12620 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
12621 for (i = 0; i < cfg->num_varinfo; i ++) {
12622 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
12624 if (vreg_is_volatile (cfg, vi->vreg))
12625 /* The liveness info is incomplete */
12628 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
12629 /* Live from at least the first ins of this bb */
12630 live_range_start [vi->vreg] = bb->code;
12631 live_range_start_bb [vi->vreg] = bb;
12634 if (mono_bitset_test_fast (bb->live_out_set, i)) {
12635 /* Live at least until the last ins of this bb */
12636 live_range_end [vi->vreg] = bb->last_ins;
12637 live_range_end_bb [vi->vreg] = bb;
12643 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
12645 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
12646 * by storing the current native offset into MonoMethodVar->live_range_start/end.
12648 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
12649 for (i = 0; i < cfg->num_varinfo; ++i) {
12650 int vreg = MONO_VARINFO (cfg, i)->vreg;
12653 if (live_range_start [vreg]) {
12654 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
12656 ins->inst_c1 = vreg;
12657 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
12659 if (live_range_end [vreg]) {
12660 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
12662 ins->inst_c1 = vreg;
12663 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
12664 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
12666 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
12672 g_free (live_range_start);
12673 g_free (live_range_end);
12674 g_free (live_range_start_bb);
12675 g_free (live_range_end_bb);
12680 * - use 'iadd' instead of 'int_add'
12681 * - handling ovf opcodes: decompose in method_to_ir.
12682 * - unify iregs/fregs
12683 * -> partly done, the missing parts are:
12684 * - a more complete unification would involve unifying the hregs as well, so
12685 * code wouldn't need if (fp) all over the place. but that would mean the hregs
12686 * would no longer map to the machine hregs, so the code generators would need to
12687 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
12688 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
12689 * fp/non-fp branches speeds it up by about 15%.
12690 * - use sext/zext opcodes instead of shifts
12692 * - get rid of TEMPLOADs if possible and use vregs instead
12693 * - clean up usage of OP_P/OP_ opcodes
12694 * - cleanup usage of DUMMY_USE
12695 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
12697 * - set the stack type and allocate a dreg in the EMIT_NEW macros
12698 * - get rid of all the <foo>2 stuff when the new JIT is ready.
12699 * - make sure handle_stack_args () is called before the branch is emitted
12700 * - when the new IR is done, get rid of all unused stuff
12701 * - COMPARE/BEQ as separate instructions or unify them ?
12702 * - keeping them separate allows specialized compare instructions like
12703 * compare_imm, compare_membase
12704 * - most back ends unify fp compare+branch, fp compare+ceq
12705 * - integrate mono_save_args into inline_method
12706 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
12707 * - handle long shift opts on 32 bit platforms somehow: they require
12708 * 3 sregs (2 for arg1 and 1 for arg2)
12709 * - make byref a 'normal' type.
12710 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
12711 * variable if needed.
12712 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
12713 * like inline_method.
12714 * - remove inlining restrictions
12715 * - fix LNEG and enable cfold of INEG
12716 * - generalize x86 optimizations like ldelema as a peephole optimization
12717 * - add store_mem_imm for amd64
12718 * - optimize the loading of the interruption flag in the managed->native wrappers
12719 * - avoid special handling of OP_NOP in passes
12720 * - move code inserting instructions into one function/macro.
12721 * - try a coalescing phase after liveness analysis
12722 * - add float -> vreg conversion + local optimizations on !x86
12723 * - figure out how to handle decomposed branches during optimizations, ie.
12724 * compare+branch, op_jump_table+op_br etc.
12725 * - promote RuntimeXHandles to vregs
12726 * - vtype cleanups:
12727 * - add a NEW_VARLOADA_VREG macro
12728 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
12729 * accessing vtype fields.
12730 * - get rid of I8CONST on 64 bit platforms
12731 * - dealing with the increase in code size due to branches created during opcode
12733 * - use extended basic blocks
12734 * - all parts of the JIT
12735 * - handle_global_vregs () && local regalloc
12736 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
12737 * - sources of increase in code size:
12740 * - isinst and castclass
12741 * - lvregs not allocated to global registers even if used multiple times
12742 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
12744 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
12745 * - add all micro optimizations from the old JIT
12746 * - put tree optimizations into the deadce pass
12747 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
12748 * specific function.
12749 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
12750 * fcompare + branchCC.
12751 * - create a helper function for allocating a stack slot, taking into account
12752 * MONO_CFG_HAS_SPILLUP.
12754 * - merge the ia64 switch changes.
12755 * - optimize mono_regstate2_alloc_int/float.
12756 * - fix the pessimistic handling of variables accessed in exception handler blocks.
12757 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
12758 * parts of the tree could be separated by other instructions, killing the tree
12759 * arguments, or stores killing loads etc. Also, should we fold loads into other
12760 * instructions if the result of the load is used multiple times ?
12761 * - make the REM_IMM optimization in mini-x86.c arch-independent.
12762 * - LAST MERGE: 108395.
12763 * - when returning vtypes in registers, generate IR and append it to the end of the
12764 * last bb instead of doing it in the epilog.
12765 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
12773 - When to decompose opcodes:
12774 - earlier: this makes some optimizations hard to implement, since the low level IR
12775 no longer contains the neccessary information. But it is easier to do.
12776 - later: harder to implement, enables more optimizations.
12777 - Branches inside bblocks:
12778 - created when decomposing complex opcodes.
12779 - branches to another bblock: harmless, but not tracked by the branch
12780 optimizations, so need to branch to a label at the start of the bblock.
12781 - branches to inside the same bblock: very problematic, trips up the local
12782 reg allocator. Can be fixed by spitting the current bblock, but that is a
12783 complex operation, since some local vregs can become global vregs etc.
12784 - Local/global vregs:
12785 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
12786 local register allocator.
12787 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
12788 structure, created by mono_create_var (). Assigned to hregs or the stack by
12789 the global register allocator.
12790 - When to do optimizations like alu->alu_imm:
12791 - earlier -> saves work later on since the IR will be smaller/simpler
12792 - later -> can work on more instructions
12793 - Handling of valuetypes:
12794 - When a vtype is pushed on the stack, a new temporary is created, an
12795 instruction computing its address (LDADDR) is emitted and pushed on
12796 the stack. Need to optimize cases when the vtype is used immediately as in
12797 argument passing, stloc etc.
12798 - Instead of the to_end stuff in the old JIT, simply call the function handling
12799 the values on the stack before emitting the last instruction of the bb.
12802 #endif /* DISABLE_JIT */