2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/assembly.h>
38 #include <mono/metadata/attrdefs.h>
39 #include <mono/metadata/loader.h>
40 #include <mono/metadata/tabledefs.h>
41 #include <mono/metadata/class.h>
42 #include <mono/metadata/object.h>
43 #include <mono/metadata/exception.h>
44 #include <mono/metadata/opcodes.h>
45 #include <mono/metadata/mono-endian.h>
46 #include <mono/metadata/tokentype.h>
47 #include <mono/metadata/tabledefs.h>
48 #include <mono/metadata/marshal.h>
49 #include <mono/metadata/debug-helpers.h>
50 #include <mono/metadata/mono-debug.h>
51 #include <mono/metadata/gc-internal.h>
52 #include <mono/metadata/security-manager.h>
53 #include <mono/metadata/threads-types.h>
54 #include <mono/metadata/security-core-clr.h>
55 #include <mono/metadata/monitor.h>
56 #include <mono/metadata/profiler-private.h>
57 #include <mono/metadata/profiler.h>
58 #include <mono/metadata/debug-mono-symfile.h>
59 #include <mono/utils/mono-compiler.h>
60 #include <mono/utils/mono-memory-model.h>
61 #include <mono/metadata/mono-basic-block.h>
68 #include "jit-icalls.h"
70 #include "debugger-agent.h"
72 #define BRANCH_COST 10
73 #define INLINE_LENGTH_LIMIT 20
74 #define INLINE_FAILURE(msg) do { \
75 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE)) { \
76 if (cfg->verbose_level >= 2) \
77 printf ("inline failed: %s\n", msg); \
78 goto inline_failure; \
81 #define CHECK_CFG_EXCEPTION do {\
82 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
85 #define METHOD_ACCESS_FAILURE do { \
86 char *method_fname = mono_method_full_name (method, TRUE); \
87 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
88 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS); \
89 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
90 g_free (method_fname); \
91 g_free (cil_method_fname); \
92 goto exception_exit; \
94 #define FIELD_ACCESS_FAILURE do { \
95 char *method_fname = mono_method_full_name (method, TRUE); \
96 char *field_fname = mono_field_full_name (field); \
97 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS); \
98 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
99 g_free (method_fname); \
100 g_free (field_fname); \
101 goto exception_exit; \
103 #define GENERIC_SHARING_FAILURE(opcode) do { \
104 if (cfg->generic_sharing_context) { \
105 if (cfg->verbose_level > 2) \
106 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
107 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
108 goto exception_exit; \
111 #define GSHAREDVT_FAILURE(opcode) do { \
112 if (cfg->gsharedvt) { \
113 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __FILE__, __LINE__); \
114 if (cfg->verbose_level >= 2) \
115 printf ("%s\n", cfg->exception_message); \
116 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
117 goto exception_exit; \
120 #define OUT_OF_MEMORY_FAILURE do { \
121 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
122 goto exception_exit; \
124 /* Determine whenever 'ins' represents a load of the 'this' argument */
125 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
127 static int ldind_to_load_membase (int opcode);
128 static int stind_to_store_membase (int opcode);
130 int mono_op_to_op_imm (int opcode);
131 int mono_op_to_op_imm_noemul (int opcode);
133 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
135 /* helper methods signatures */
136 static MonoMethodSignature *helper_sig_class_init_trampoline = NULL;
137 static MonoMethodSignature *helper_sig_domain_get = NULL;
138 static MonoMethodSignature *helper_sig_generic_class_init_trampoline = NULL;
139 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm = NULL;
140 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline = NULL;
141 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline = NULL;
142 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm = NULL;
145 * Instruction metadata
153 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
154 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
160 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
165 /* keep in sync with the enum in mini.h */
168 #include "mini-ops.h"
173 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
174 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
176 * This should contain the index of the last sreg + 1. This is not the same
177 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
179 const gint8 ins_sreg_counts[] = {
180 #include "mini-ops.h"
185 #define MONO_INIT_VARINFO(vi,id) do { \
186 (vi)->range.first_use.pos.bid = 0xffff; \
192 mono_inst_set_src_registers (MonoInst *ins, int *regs)
194 ins->sreg1 = regs [0];
195 ins->sreg2 = regs [1];
196 ins->sreg3 = regs [2];
200 mono_alloc_ireg (MonoCompile *cfg)
202 return alloc_ireg (cfg);
206 mono_alloc_freg (MonoCompile *cfg)
208 return alloc_freg (cfg);
212 mono_alloc_preg (MonoCompile *cfg)
214 return alloc_preg (cfg);
218 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
220 return alloc_dreg (cfg, stack_type);
224 * mono_alloc_ireg_ref:
226 * Allocate an IREG, and mark it as holding a GC ref.
229 mono_alloc_ireg_ref (MonoCompile *cfg)
231 return alloc_ireg_ref (cfg);
235 * mono_alloc_ireg_mp:
237 * Allocate an IREG, and mark it as holding a managed pointer.
240 mono_alloc_ireg_mp (MonoCompile *cfg)
242 return alloc_ireg_mp (cfg);
246 * mono_alloc_ireg_copy:
248 * Allocate an IREG with the same GC type as VREG.
251 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
253 if (vreg_is_ref (cfg, vreg))
254 return alloc_ireg_ref (cfg);
255 else if (vreg_is_mp (cfg, vreg))
256 return alloc_ireg_mp (cfg);
258 return alloc_ireg (cfg);
262 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
268 switch (type->type) {
271 case MONO_TYPE_BOOLEAN:
283 case MONO_TYPE_FNPTR:
285 case MONO_TYPE_CLASS:
286 case MONO_TYPE_STRING:
287 case MONO_TYPE_OBJECT:
288 case MONO_TYPE_SZARRAY:
289 case MONO_TYPE_ARRAY:
293 #if SIZEOF_REGISTER == 8
302 case MONO_TYPE_VALUETYPE:
303 if (type->data.klass->enumtype) {
304 type = mono_class_enum_basetype (type->data.klass);
307 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
310 case MONO_TYPE_TYPEDBYREF:
312 case MONO_TYPE_GENERICINST:
313 type = &type->data.generic_class->container_class->byval_arg;
317 g_assert (cfg->generic_sharing_context);
318 if (mini_type_var_is_vt (cfg, type))
323 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
329 mono_print_bb (MonoBasicBlock *bb, const char *msg)
334 printf ("\n%s %d: [IN: ", msg, bb->block_num);
335 for (i = 0; i < bb->in_count; ++i)
336 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
338 for (i = 0; i < bb->out_count; ++i)
339 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
341 for (tree = bb->code; tree; tree = tree->next)
342 mono_print_ins_index (-1, tree);
346 mono_create_helper_signatures (void)
348 helper_sig_domain_get = mono_create_icall_signature ("ptr");
349 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
350 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
351 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
352 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
353 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
354 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
358 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
359 * foo<T> (int i) { ldarg.0; box T; }
361 #define UNVERIFIED do { \
362 if (cfg->gsharedvt) { \
363 if (cfg->verbose_level > 2) \
364 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
365 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
366 goto exception_exit; \
368 if (mini_get_debug_options ()->break_on_unverified) \
374 #define LOAD_ERROR do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto load_error; } while (0)
376 #define TYPE_LOAD_ERROR(klass) do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else { cfg->exception_ptr = klass; goto load_error; } } while (0)
378 #define GET_BBLOCK(cfg,tblock,ip) do { \
379 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
381 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
382 NEW_BBLOCK (cfg, (tblock)); \
383 (tblock)->cil_code = (ip); \
384 ADD_BBLOCK (cfg, (tblock)); \
388 #if defined(TARGET_X86) || defined(TARGET_AMD64)
389 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
390 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
391 (dest)->dreg = alloc_ireg_mp ((cfg)); \
392 (dest)->sreg1 = (sr1); \
393 (dest)->sreg2 = (sr2); \
394 (dest)->inst_imm = (imm); \
395 (dest)->backend.shift_amount = (shift); \
396 MONO_ADD_INS ((cfg)->cbb, (dest)); \
400 #if SIZEOF_REGISTER == 8
401 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
402 /* FIXME: Need to add many more cases */ \
403 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
405 int dr = alloc_preg (cfg); \
406 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
407 (ins)->sreg2 = widen->dreg; \
411 #define ADD_WIDEN_OP(ins, arg1, arg2)
414 #define ADD_BINOP(op) do { \
415 MONO_INST_NEW (cfg, ins, (op)); \
417 ins->sreg1 = sp [0]->dreg; \
418 ins->sreg2 = sp [1]->dreg; \
419 type_from_op (ins, sp [0], sp [1]); \
421 /* Have to insert a widening op */ \
422 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
423 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
424 MONO_ADD_INS ((cfg)->cbb, (ins)); \
425 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
428 #define ADD_UNOP(op) do { \
429 MONO_INST_NEW (cfg, ins, (op)); \
431 ins->sreg1 = sp [0]->dreg; \
432 type_from_op (ins, sp [0], NULL); \
434 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
435 MONO_ADD_INS ((cfg)->cbb, (ins)); \
436 *sp++ = mono_decompose_opcode (cfg, ins); \
439 #define ADD_BINCOND(next_block) do { \
442 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
443 cmp->sreg1 = sp [0]->dreg; \
444 cmp->sreg2 = sp [1]->dreg; \
445 type_from_op (cmp, sp [0], sp [1]); \
447 type_from_op (ins, sp [0], sp [1]); \
448 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
449 GET_BBLOCK (cfg, tblock, target); \
450 link_bblock (cfg, bblock, tblock); \
451 ins->inst_true_bb = tblock; \
452 if ((next_block)) { \
453 link_bblock (cfg, bblock, (next_block)); \
454 ins->inst_false_bb = (next_block); \
455 start_new_bblock = 1; \
457 GET_BBLOCK (cfg, tblock, ip); \
458 link_bblock (cfg, bblock, tblock); \
459 ins->inst_false_bb = tblock; \
460 start_new_bblock = 2; \
462 if (sp != stack_start) { \
463 handle_stack_args (cfg, stack_start, sp - stack_start); \
464 CHECK_UNVERIFIABLE (cfg); \
466 MONO_ADD_INS (bblock, cmp); \
467 MONO_ADD_INS (bblock, ins); \
471 * link_bblock: Links two basic blocks
473 * links two basic blocks in the control flow graph, the 'from'
474 * argument is the starting block and the 'to' argument is the block
475 * the control flow ends to after 'from'.
478 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
480 MonoBasicBlock **newa;
484 if (from->cil_code) {
486 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
488 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
491 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
493 printf ("edge from entry to exit\n");
498 for (i = 0; i < from->out_count; ++i) {
499 if (to == from->out_bb [i]) {
505 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
506 for (i = 0; i < from->out_count; ++i) {
507 newa [i] = from->out_bb [i];
515 for (i = 0; i < to->in_count; ++i) {
516 if (from == to->in_bb [i]) {
522 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
523 for (i = 0; i < to->in_count; ++i) {
524 newa [i] = to->in_bb [i];
533 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
535 link_bblock (cfg, from, to);
539 * mono_find_block_region:
541 * We mark each basic block with a region ID. We use that to avoid BB
542 * optimizations when blocks are in different regions.
545 * A region token that encodes where this region is, and information
546 * about the clause owner for this block.
548 * The region encodes the try/catch/filter clause that owns this block
549 * as well as the type. -1 is a special value that represents a block
550 * that is in none of try/catch/filter.
553 mono_find_block_region (MonoCompile *cfg, int offset)
555 MonoMethodHeader *header = cfg->header;
556 MonoExceptionClause *clause;
559 for (i = 0; i < header->num_clauses; ++i) {
560 clause = &header->clauses [i];
561 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
562 (offset < (clause->handler_offset)))
563 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
565 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
566 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
567 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
568 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
569 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
571 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
574 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
575 return ((i + 1) << 8) | clause->flags;
582 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
584 MonoMethodHeader *header = cfg->header;
585 MonoExceptionClause *clause;
589 for (i = 0; i < header->num_clauses; ++i) {
590 clause = &header->clauses [i];
591 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
592 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
593 if (clause->flags == type)
594 res = g_list_append (res, clause);
601 mono_create_spvar_for_region (MonoCompile *cfg, int region)
605 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
609 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
610 /* prevent it from being register allocated */
611 var->flags |= MONO_INST_INDIRECT;
613 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
617 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
619 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
623 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
627 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
631 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
632 /* prevent it from being register allocated */
633 var->flags |= MONO_INST_INDIRECT;
635 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
641 * Returns the type used in the eval stack when @type is loaded.
642 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
645 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
649 inst->klass = klass = mono_class_from_mono_type (type);
651 inst->type = STACK_MP;
656 switch (type->type) {
658 inst->type = STACK_INV;
662 case MONO_TYPE_BOOLEAN:
668 inst->type = STACK_I4;
673 case MONO_TYPE_FNPTR:
674 inst->type = STACK_PTR;
676 case MONO_TYPE_CLASS:
677 case MONO_TYPE_STRING:
678 case MONO_TYPE_OBJECT:
679 case MONO_TYPE_SZARRAY:
680 case MONO_TYPE_ARRAY:
681 inst->type = STACK_OBJ;
685 inst->type = STACK_I8;
689 inst->type = STACK_R8;
691 case MONO_TYPE_VALUETYPE:
692 if (type->data.klass->enumtype) {
693 type = mono_class_enum_basetype (type->data.klass);
697 inst->type = STACK_VTYPE;
700 case MONO_TYPE_TYPEDBYREF:
701 inst->klass = mono_defaults.typed_reference_class;
702 inst->type = STACK_VTYPE;
704 case MONO_TYPE_GENERICINST:
705 type = &type->data.generic_class->container_class->byval_arg;
709 g_assert (cfg->generic_sharing_context);
710 if (mini_is_gsharedvt_type (cfg, type)) {
711 g_assert (cfg->gsharedvt);
712 inst->type = STACK_VTYPE;
714 inst->type = STACK_OBJ;
718 g_error ("unknown type 0x%02x in eval stack type", type->type);
723 * The following tables are used to quickly validate the IL code in type_from_op ().
726 bin_num_table [STACK_MAX] [STACK_MAX] = {
727 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
728 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
729 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
730 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
731 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
732 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
733 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
734 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
739 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
742 /* reduce the size of this table */
744 bin_int_table [STACK_MAX] [STACK_MAX] = {
745 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
746 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
747 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
748 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
749 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
750 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
751 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
752 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
756 bin_comp_table [STACK_MAX] [STACK_MAX] = {
757 /* Inv i L p F & O vt */
759 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
760 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
761 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
762 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
763 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
764 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
765 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
768 /* reduce the size of this table */
770 shift_table [STACK_MAX] [STACK_MAX] = {
771 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
772 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
773 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
774 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
775 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
776 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
777 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
778 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
782 * Tables to map from the non-specific opcode to the matching
783 * type-specific opcode.
785 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
787 binops_op_map [STACK_MAX] = {
788 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
791 /* handles from CEE_NEG to CEE_CONV_U8 */
793 unops_op_map [STACK_MAX] = {
794 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
797 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
799 ovfops_op_map [STACK_MAX] = {
800 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
803 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
805 ovf2ops_op_map [STACK_MAX] = {
806 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
809 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
811 ovf3ops_op_map [STACK_MAX] = {
812 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
815 /* handles from CEE_BEQ to CEE_BLT_UN */
817 beqops_op_map [STACK_MAX] = {
818 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
821 /* handles from CEE_CEQ to CEE_CLT_UN */
823 ceqops_op_map [STACK_MAX] = {
824 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
828 * Sets ins->type (the type on the eval stack) according to the
829 * type of the opcode and the arguments to it.
830 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
832 * FIXME: this function sets ins->type unconditionally in some cases, but
833 * it should set it to invalid for some types (a conv.x on an object)
836 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
838 switch (ins->opcode) {
845 /* FIXME: check unverifiable args for STACK_MP */
846 ins->type = bin_num_table [src1->type] [src2->type];
847 ins->opcode += binops_op_map [ins->type];
854 ins->type = bin_int_table [src1->type] [src2->type];
855 ins->opcode += binops_op_map [ins->type];
860 ins->type = shift_table [src1->type] [src2->type];
861 ins->opcode += binops_op_map [ins->type];
866 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
867 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
868 ins->opcode = OP_LCOMPARE;
869 else if (src1->type == STACK_R8)
870 ins->opcode = OP_FCOMPARE;
872 ins->opcode = OP_ICOMPARE;
874 case OP_ICOMPARE_IMM:
875 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
876 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
877 ins->opcode = OP_LCOMPARE_IMM;
889 ins->opcode += beqops_op_map [src1->type];
892 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
893 ins->opcode += ceqops_op_map [src1->type];
899 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
900 ins->opcode += ceqops_op_map [src1->type];
904 ins->type = neg_table [src1->type];
905 ins->opcode += unops_op_map [ins->type];
908 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
909 ins->type = src1->type;
911 ins->type = STACK_INV;
912 ins->opcode += unops_op_map [ins->type];
918 ins->type = STACK_I4;
919 ins->opcode += unops_op_map [src1->type];
922 ins->type = STACK_R8;
923 switch (src1->type) {
926 ins->opcode = OP_ICONV_TO_R_UN;
929 ins->opcode = OP_LCONV_TO_R_UN;
933 case CEE_CONV_OVF_I1:
934 case CEE_CONV_OVF_U1:
935 case CEE_CONV_OVF_I2:
936 case CEE_CONV_OVF_U2:
937 case CEE_CONV_OVF_I4:
938 case CEE_CONV_OVF_U4:
939 ins->type = STACK_I4;
940 ins->opcode += ovf3ops_op_map [src1->type];
942 case CEE_CONV_OVF_I_UN:
943 case CEE_CONV_OVF_U_UN:
944 ins->type = STACK_PTR;
945 ins->opcode += ovf2ops_op_map [src1->type];
947 case CEE_CONV_OVF_I1_UN:
948 case CEE_CONV_OVF_I2_UN:
949 case CEE_CONV_OVF_I4_UN:
950 case CEE_CONV_OVF_U1_UN:
951 case CEE_CONV_OVF_U2_UN:
952 case CEE_CONV_OVF_U4_UN:
953 ins->type = STACK_I4;
954 ins->opcode += ovf2ops_op_map [src1->type];
957 ins->type = STACK_PTR;
958 switch (src1->type) {
960 ins->opcode = OP_ICONV_TO_U;
964 #if SIZEOF_VOID_P == 8
965 ins->opcode = OP_LCONV_TO_U;
967 ins->opcode = OP_MOVE;
971 ins->opcode = OP_LCONV_TO_U;
974 ins->opcode = OP_FCONV_TO_U;
980 ins->type = STACK_I8;
981 ins->opcode += unops_op_map [src1->type];
983 case CEE_CONV_OVF_I8:
984 case CEE_CONV_OVF_U8:
985 ins->type = STACK_I8;
986 ins->opcode += ovf3ops_op_map [src1->type];
988 case CEE_CONV_OVF_U8_UN:
989 case CEE_CONV_OVF_I8_UN:
990 ins->type = STACK_I8;
991 ins->opcode += ovf2ops_op_map [src1->type];
995 ins->type = STACK_R8;
996 ins->opcode += unops_op_map [src1->type];
999 ins->type = STACK_R8;
1003 ins->type = STACK_I4;
1004 ins->opcode += ovfops_op_map [src1->type];
1007 case CEE_CONV_OVF_I:
1008 case CEE_CONV_OVF_U:
1009 ins->type = STACK_PTR;
1010 ins->opcode += ovfops_op_map [src1->type];
1013 case CEE_ADD_OVF_UN:
1015 case CEE_MUL_OVF_UN:
1017 case CEE_SUB_OVF_UN:
1018 ins->type = bin_num_table [src1->type] [src2->type];
1019 ins->opcode += ovfops_op_map [src1->type];
1020 if (ins->type == STACK_R8)
1021 ins->type = STACK_INV;
1023 case OP_LOAD_MEMBASE:
1024 ins->type = STACK_PTR;
1026 case OP_LOADI1_MEMBASE:
1027 case OP_LOADU1_MEMBASE:
1028 case OP_LOADI2_MEMBASE:
1029 case OP_LOADU2_MEMBASE:
1030 case OP_LOADI4_MEMBASE:
1031 case OP_LOADU4_MEMBASE:
1032 ins->type = STACK_PTR;
1034 case OP_LOADI8_MEMBASE:
1035 ins->type = STACK_I8;
1037 case OP_LOADR4_MEMBASE:
1038 case OP_LOADR8_MEMBASE:
1039 ins->type = STACK_R8;
1042 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1046 if (ins->type == STACK_MP)
1047 ins->klass = mono_defaults.object_class;
1052 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1058 param_table [STACK_MAX] [STACK_MAX] = {
1063 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1067 switch (args->type) {
1077 for (i = 0; i < sig->param_count; ++i) {
1078 switch (args [i].type) {
1082 if (!sig->params [i]->byref)
1086 if (sig->params [i]->byref)
1088 switch (sig->params [i]->type) {
1089 case MONO_TYPE_CLASS:
1090 case MONO_TYPE_STRING:
1091 case MONO_TYPE_OBJECT:
1092 case MONO_TYPE_SZARRAY:
1093 case MONO_TYPE_ARRAY:
1100 if (sig->params [i]->byref)
1102 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1111 /*if (!param_table [args [i].type] [sig->params [i]->type])
1119 * When we need a pointer to the current domain many times in a method, we
1120 * call mono_domain_get() once and we store the result in a local variable.
1121 * This function returns the variable that represents the MonoDomain*.
1123 inline static MonoInst *
1124 mono_get_domainvar (MonoCompile *cfg)
1126 if (!cfg->domainvar)
1127 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1128 return cfg->domainvar;
1132 * The got_var contains the address of the Global Offset Table when AOT
1136 mono_get_got_var (MonoCompile *cfg)
1138 #ifdef MONO_ARCH_NEED_GOT_VAR
1139 if (!cfg->compile_aot)
1141 if (!cfg->got_var) {
1142 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1144 return cfg->got_var;
1151 mono_get_vtable_var (MonoCompile *cfg)
1153 g_assert (cfg->generic_sharing_context);
1155 if (!cfg->rgctx_var) {
1156 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1157 /* force the var to be stack allocated */
1158 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1161 return cfg->rgctx_var;
1165 type_from_stack_type (MonoInst *ins) {
1166 switch (ins->type) {
1167 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1168 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1169 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1170 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1172 return &ins->klass->this_arg;
1173 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1174 case STACK_VTYPE: return &ins->klass->byval_arg;
1176 g_error ("stack type %d to monotype not handled\n", ins->type);
1181 static G_GNUC_UNUSED int
1182 type_to_stack_type (MonoType *t)
1184 t = mono_type_get_underlying_type (t);
1188 case MONO_TYPE_BOOLEAN:
1191 case MONO_TYPE_CHAR:
1198 case MONO_TYPE_FNPTR:
1200 case MONO_TYPE_CLASS:
1201 case MONO_TYPE_STRING:
1202 case MONO_TYPE_OBJECT:
1203 case MONO_TYPE_SZARRAY:
1204 case MONO_TYPE_ARRAY:
1212 case MONO_TYPE_VALUETYPE:
1213 case MONO_TYPE_TYPEDBYREF:
1215 case MONO_TYPE_GENERICINST:
1216 if (mono_type_generic_inst_is_valuetype (t))
1222 g_assert_not_reached ();
1229 array_access_to_klass (int opcode)
1233 return mono_defaults.byte_class;
1235 return mono_defaults.uint16_class;
1238 return mono_defaults.int_class;
1241 return mono_defaults.sbyte_class;
1244 return mono_defaults.int16_class;
1247 return mono_defaults.int32_class;
1249 return mono_defaults.uint32_class;
1252 return mono_defaults.int64_class;
1255 return mono_defaults.single_class;
1258 return mono_defaults.double_class;
1259 case CEE_LDELEM_REF:
1260 case CEE_STELEM_REF:
1261 return mono_defaults.object_class;
1263 g_assert_not_reached ();
1269 * We try to share variables when possible
1272 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1277 /* inlining can result in deeper stacks */
1278 if (slot >= cfg->header->max_stack)
1279 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1281 pos = ins->type - 1 + slot * STACK_MAX;
1283 switch (ins->type) {
1290 if ((vnum = cfg->intvars [pos]))
1291 return cfg->varinfo [vnum];
1292 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1293 cfg->intvars [pos] = res->inst_c0;
1296 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1302 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1305 * Don't use this if a generic_context is set, since that means AOT can't
1306 * look up the method using just the image+token.
1307 * table == 0 means this is a reference made from a wrapper.
1309 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1310 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1311 jump_info_token->image = image;
1312 jump_info_token->token = token;
1313 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1318 * This function is called to handle items that are left on the evaluation stack
1319 * at basic block boundaries. What happens is that we save the values to local variables
1320 * and we reload them later when first entering the target basic block (with the
1321 * handle_loaded_temps () function).
1322 * A single joint point will use the same variables (stored in the array bb->out_stack or
1323 * bb->in_stack, if the basic block is before or after the joint point).
1325 * This function needs to be called _before_ emitting the last instruction of
1326 * the bb (i.e. before emitting a branch).
1327 * If the stack merge fails at a join point, cfg->unverifiable is set.
1330 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1333 MonoBasicBlock *bb = cfg->cbb;
1334 MonoBasicBlock *outb;
1335 MonoInst *inst, **locals;
1340 if (cfg->verbose_level > 3)
1341 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1342 if (!bb->out_scount) {
1343 bb->out_scount = count;
1344 //printf ("bblock %d has out:", bb->block_num);
1346 for (i = 0; i < bb->out_count; ++i) {
1347 outb = bb->out_bb [i];
1348 /* exception handlers are linked, but they should not be considered for stack args */
1349 if (outb->flags & BB_EXCEPTION_HANDLER)
1351 //printf (" %d", outb->block_num);
1352 if (outb->in_stack) {
1354 bb->out_stack = outb->in_stack;
1360 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1361 for (i = 0; i < count; ++i) {
1363 * try to reuse temps already allocated for this purpouse, if they occupy the same
1364 * stack slot and if they are of the same type.
1365 * This won't cause conflicts since if 'local' is used to
1366 * store one of the values in the in_stack of a bblock, then
1367 * the same variable will be used for the same outgoing stack
1369 * This doesn't work when inlining methods, since the bblocks
1370 * in the inlined methods do not inherit their in_stack from
1371 * the bblock they are inlined to. See bug #58863 for an
1374 if (cfg->inlined_method)
1375 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1377 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1382 for (i = 0; i < bb->out_count; ++i) {
1383 outb = bb->out_bb [i];
1384 /* exception handlers are linked, but they should not be considered for stack args */
1385 if (outb->flags & BB_EXCEPTION_HANDLER)
1387 if (outb->in_scount) {
1388 if (outb->in_scount != bb->out_scount) {
1389 cfg->unverifiable = TRUE;
1392 continue; /* check they are the same locals */
1394 outb->in_scount = count;
1395 outb->in_stack = bb->out_stack;
1398 locals = bb->out_stack;
1400 for (i = 0; i < count; ++i) {
1401 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1402 inst->cil_code = sp [i]->cil_code;
1403 sp [i] = locals [i];
1404 if (cfg->verbose_level > 3)
1405 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1409 * It is possible that the out bblocks already have in_stack assigned, and
1410 * the in_stacks differ. In this case, we will store to all the different
1417 /* Find a bblock which has a different in_stack */
1419 while (bindex < bb->out_count) {
1420 outb = bb->out_bb [bindex];
1421 /* exception handlers are linked, but they should not be considered for stack args */
1422 if (outb->flags & BB_EXCEPTION_HANDLER) {
1426 if (outb->in_stack != locals) {
1427 for (i = 0; i < count; ++i) {
1428 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1429 inst->cil_code = sp [i]->cil_code;
1430 sp [i] = locals [i];
1431 if (cfg->verbose_level > 3)
1432 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1434 locals = outb->in_stack;
1443 /* Emit code which loads interface_offsets [klass->interface_id]
1444 * The array is stored in memory before vtable.
1447 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1449 if (cfg->compile_aot) {
1450 int ioffset_reg = alloc_preg (cfg);
1451 int iid_reg = alloc_preg (cfg);
1453 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1454 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1455 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1458 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1463 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1465 int ibitmap_reg = alloc_preg (cfg);
1466 #ifdef COMPRESSED_INTERFACE_BITMAP
1468 MonoInst *res, *ins;
1469 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1470 MONO_ADD_INS (cfg->cbb, ins);
1472 if (cfg->compile_aot)
1473 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1475 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1476 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1477 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1479 int ibitmap_byte_reg = alloc_preg (cfg);
1481 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1483 if (cfg->compile_aot) {
1484 int iid_reg = alloc_preg (cfg);
1485 int shifted_iid_reg = alloc_preg (cfg);
1486 int ibitmap_byte_address_reg = alloc_preg (cfg);
1487 int masked_iid_reg = alloc_preg (cfg);
1488 int iid_one_bit_reg = alloc_preg (cfg);
1489 int iid_bit_reg = alloc_preg (cfg);
1490 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1491 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1492 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1493 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1494 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1495 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1496 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1497 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1499 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1500 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1506 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1507 * stored in "klass_reg" implements the interface "klass".
1510 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1512 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1516 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1517 * stored in "vtable_reg" implements the interface "klass".
1520 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1522 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1526 * Emit code which checks whenever the interface id of @klass is smaller than
1527 * than the value given by max_iid_reg.
1530 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1531 MonoBasicBlock *false_target)
1533 if (cfg->compile_aot) {
1534 int iid_reg = alloc_preg (cfg);
1535 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1536 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1539 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1541 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1543 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1546 /* Same as above, but obtains max_iid from a vtable */
1548 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1549 MonoBasicBlock *false_target)
1551 int max_iid_reg = alloc_preg (cfg);
1553 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1554 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1557 /* Same as above, but obtains max_iid from a klass */
1559 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1560 MonoBasicBlock *false_target)
1562 int max_iid_reg = alloc_preg (cfg);
1564 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1565 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1569 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1571 int idepth_reg = alloc_preg (cfg);
1572 int stypes_reg = alloc_preg (cfg);
1573 int stype = alloc_preg (cfg);
1575 mono_class_setup_supertypes (klass);
1577 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1578 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1579 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1580 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1582 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1583 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1585 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1586 } else if (cfg->compile_aot) {
1587 int const_reg = alloc_preg (cfg);
1588 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1589 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1591 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1593 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1597 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1599 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1603 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1605 int intf_reg = alloc_preg (cfg);
1607 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1608 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1609 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1611 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1613 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1617 * Variant of the above that takes a register to the class, not the vtable.
1620 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1622 int intf_bit_reg = alloc_preg (cfg);
1624 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1625 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1626 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1628 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1630 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1634 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1637 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1638 } else if (cfg->compile_aot) {
1639 int const_reg = alloc_preg (cfg);
1640 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1641 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1643 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1645 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1649 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1651 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1655 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1657 if (cfg->compile_aot) {
1658 int const_reg = alloc_preg (cfg);
1659 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1660 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1662 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1664 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1668 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1671 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1674 int rank_reg = alloc_preg (cfg);
1675 int eclass_reg = alloc_preg (cfg);
1677 g_assert (!klass_inst);
1678 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1679 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1680 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1681 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1682 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1683 if (klass->cast_class == mono_defaults.object_class) {
1684 int parent_reg = alloc_preg (cfg);
1685 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1686 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1687 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1688 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1689 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1690 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1691 } else if (klass->cast_class == mono_defaults.enum_class) {
1692 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1693 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1694 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1696 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1697 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1700 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1701 /* Check that the object is a vector too */
1702 int bounds_reg = alloc_preg (cfg);
1703 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1704 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1705 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1708 int idepth_reg = alloc_preg (cfg);
1709 int stypes_reg = alloc_preg (cfg);
1710 int stype = alloc_preg (cfg);
1712 mono_class_setup_supertypes (klass);
1714 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1715 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1716 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1717 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1719 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1720 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1721 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1726 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1728 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1732 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1736 g_assert (val == 0);
1741 if ((size <= 4) && (size <= align)) {
1744 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1747 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1750 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1752 #if SIZEOF_REGISTER == 8
1754 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1760 val_reg = alloc_preg (cfg);
1762 if (SIZEOF_REGISTER == 8)
1763 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1765 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1768 /* This could be optimized further if neccesary */
1770 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1777 #if !NO_UNALIGNED_ACCESS
1778 if (SIZEOF_REGISTER == 8) {
1780 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1785 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1793 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1798 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1803 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1810 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1817 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1818 g_assert (size < 10000);
1821 /* This could be optimized further if neccesary */
1823 cur_reg = alloc_preg (cfg);
1824 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1825 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1832 #if !NO_UNALIGNED_ACCESS
1833 if (SIZEOF_REGISTER == 8) {
1835 cur_reg = alloc_preg (cfg);
1836 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1837 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1846 cur_reg = alloc_preg (cfg);
1847 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1848 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1854 cur_reg = alloc_preg (cfg);
1855 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1856 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1862 cur_reg = alloc_preg (cfg);
1863 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1864 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1872 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1875 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1878 type = mini_get_basic_type_from_generic (gsctx, type);
1879 switch (type->type) {
1880 case MONO_TYPE_VOID:
1881 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1884 case MONO_TYPE_BOOLEAN:
1887 case MONO_TYPE_CHAR:
1890 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1894 case MONO_TYPE_FNPTR:
1895 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1896 case MONO_TYPE_CLASS:
1897 case MONO_TYPE_STRING:
1898 case MONO_TYPE_OBJECT:
1899 case MONO_TYPE_SZARRAY:
1900 case MONO_TYPE_ARRAY:
1901 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1904 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1907 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1908 case MONO_TYPE_VALUETYPE:
1909 if (type->data.klass->enumtype) {
1910 type = mono_class_enum_basetype (type->data.klass);
1913 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1914 case MONO_TYPE_TYPEDBYREF:
1915 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1916 case MONO_TYPE_GENERICINST:
1917 type = &type->data.generic_class->container_class->byval_arg;
1920 case MONO_TYPE_MVAR:
1922 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1924 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1930 * target_type_is_incompatible:
1931 * @cfg: MonoCompile context
1933 * Check that the item @arg on the evaluation stack can be stored
1934 * in the target type (can be a local, or field, etc).
1935 * The cfg arg can be used to check if we need verification or just
1938 * Returns: non-0 value if arg can't be stored on a target.
1941 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1943 MonoType *simple_type;
1946 if (target->byref) {
1947 /* FIXME: check that the pointed to types match */
1948 if (arg->type == STACK_MP)
1949 return arg->klass != mono_class_from_mono_type (target);
1950 if (arg->type == STACK_PTR)
1955 simple_type = mono_type_get_underlying_type (target);
1956 switch (simple_type->type) {
1957 case MONO_TYPE_VOID:
1961 case MONO_TYPE_BOOLEAN:
1964 case MONO_TYPE_CHAR:
1967 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1971 /* STACK_MP is needed when setting pinned locals */
1972 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1977 case MONO_TYPE_FNPTR:
1979 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
1980 * in native int. (#688008).
1982 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1985 case MONO_TYPE_CLASS:
1986 case MONO_TYPE_STRING:
1987 case MONO_TYPE_OBJECT:
1988 case MONO_TYPE_SZARRAY:
1989 case MONO_TYPE_ARRAY:
1990 if (arg->type != STACK_OBJ)
1992 /* FIXME: check type compatibility */
1996 if (arg->type != STACK_I8)
2001 if (arg->type != STACK_R8)
2004 case MONO_TYPE_VALUETYPE:
2005 if (arg->type != STACK_VTYPE)
2007 klass = mono_class_from_mono_type (simple_type);
2008 if (klass != arg->klass)
2011 case MONO_TYPE_TYPEDBYREF:
2012 if (arg->type != STACK_VTYPE)
2014 klass = mono_class_from_mono_type (simple_type);
2015 if (klass != arg->klass)
2018 case MONO_TYPE_GENERICINST:
2019 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2020 if (arg->type != STACK_VTYPE)
2022 klass = mono_class_from_mono_type (simple_type);
2023 if (klass != arg->klass)
2027 if (arg->type != STACK_OBJ)
2029 /* FIXME: check type compatibility */
2033 case MONO_TYPE_MVAR:
2034 g_assert (cfg->generic_sharing_context);
2035 if (mini_type_var_is_vt (cfg, simple_type)) {
2036 if (arg->type != STACK_VTYPE)
2039 if (arg->type != STACK_OBJ)
2044 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2050 * Prepare arguments for passing to a function call.
2051 * Return a non-zero value if the arguments can't be passed to the given
2053 * The type checks are not yet complete and some conversions may need
2054 * casts on 32 or 64 bit architectures.
2056 * FIXME: implement this using target_type_is_incompatible ()
2059 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2061 MonoType *simple_type;
2065 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2069 for (i = 0; i < sig->param_count; ++i) {
2070 if (sig->params [i]->byref) {
2071 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2075 simple_type = sig->params [i];
2076 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
2078 switch (simple_type->type) {
2079 case MONO_TYPE_VOID:
2084 case MONO_TYPE_BOOLEAN:
2087 case MONO_TYPE_CHAR:
2090 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2096 case MONO_TYPE_FNPTR:
2097 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2100 case MONO_TYPE_CLASS:
2101 case MONO_TYPE_STRING:
2102 case MONO_TYPE_OBJECT:
2103 case MONO_TYPE_SZARRAY:
2104 case MONO_TYPE_ARRAY:
2105 if (args [i]->type != STACK_OBJ)
2110 if (args [i]->type != STACK_I8)
2115 if (args [i]->type != STACK_R8)
2118 case MONO_TYPE_VALUETYPE:
2119 if (simple_type->data.klass->enumtype) {
2120 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2123 if (args [i]->type != STACK_VTYPE)
2126 case MONO_TYPE_TYPEDBYREF:
2127 if (args [i]->type != STACK_VTYPE)
2130 case MONO_TYPE_GENERICINST:
2131 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2134 case MONO_TYPE_MVAR:
2136 if (args [i]->type != STACK_VTYPE)
2140 g_error ("unknown type 0x%02x in check_call_signature",
2148 callvirt_to_call (int opcode)
2153 case OP_VOIDCALLVIRT:
2162 g_assert_not_reached ();
2169 callvirt_to_call_membase (int opcode)
2173 return OP_CALL_MEMBASE;
2174 case OP_VOIDCALLVIRT:
2175 return OP_VOIDCALL_MEMBASE;
2177 return OP_FCALL_MEMBASE;
2179 return OP_LCALL_MEMBASE;
2181 return OP_VCALL_MEMBASE;
2183 g_assert_not_reached ();
2189 #ifdef MONO_ARCH_HAVE_IMT
2190 /* Either METHOD or IMT_ARG needs to be set */
2192 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2196 if (COMPILE_LLVM (cfg)) {
2197 method_reg = alloc_preg (cfg);
2200 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2201 } else if (cfg->compile_aot) {
2202 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2205 MONO_INST_NEW (cfg, ins, OP_PCONST);
2206 ins->inst_p0 = method;
2207 ins->dreg = method_reg;
2208 MONO_ADD_INS (cfg->cbb, ins);
2212 call->imt_arg_reg = method_reg;
2214 #ifdef MONO_ARCH_IMT_REG
2215 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2217 /* Need this to keep the IMT arg alive */
2218 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2223 #ifdef MONO_ARCH_IMT_REG
2224 method_reg = alloc_preg (cfg);
2227 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2228 } else if (cfg->compile_aot) {
2229 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2232 MONO_INST_NEW (cfg, ins, OP_PCONST);
2233 ins->inst_p0 = method;
2234 ins->dreg = method_reg;
2235 MONO_ADD_INS (cfg->cbb, ins);
2238 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2240 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2245 static MonoJumpInfo *
2246 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2248 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2252 ji->data.target = target;
2258 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2260 if (cfg->generic_sharing_context)
2261 return mono_class_check_context_used (klass);
2267 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2269 if (cfg->generic_sharing_context)
2270 return mono_method_check_context_used (method);
2276 * check_method_sharing:
2278 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2281 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2283 gboolean pass_vtable = FALSE;
2284 gboolean pass_mrgctx = FALSE;
2286 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2287 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2288 gboolean sharable = FALSE;
2290 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2293 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2294 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
2295 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2297 sharable = sharing_enabled && context_sharable;
2301 * Pass vtable iff target method might
2302 * be shared, which means that sharing
2303 * is enabled for its class and its
2304 * context is sharable (and it's not a
2307 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2311 if (mini_method_get_context (cmethod) &&
2312 mini_method_get_context (cmethod)->method_inst) {
2313 g_assert (!pass_vtable);
2315 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2318 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2319 MonoGenericContext *context = mini_method_get_context (cmethod);
2320 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2322 if (sharing_enabled && context_sharable)
2324 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, mono_method_signature (cmethod)))
2329 if (out_pass_vtable)
2330 *out_pass_vtable = pass_vtable;
2331 if (out_pass_mrgctx)
2332 *out_pass_mrgctx = pass_mrgctx;
2335 inline static MonoCallInst *
2336 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2337 MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
2340 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2345 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2347 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2350 call->signature = sig;
2351 call->rgctx_reg = rgctx;
2353 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2356 if (mini_type_is_vtype (cfg, sig->ret)) {
2357 call->vret_var = cfg->vret_addr;
2358 //g_assert_not_reached ();
2360 } else if (mini_type_is_vtype (cfg, sig->ret)) {
2361 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2364 temp->backend.is_pinvoke = sig->pinvoke;
2367 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2368 * address of return value to increase optimization opportunities.
2369 * Before vtype decomposition, the dreg of the call ins itself represents the
2370 * fact the call modifies the return value. After decomposition, the call will
2371 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2372 * will be transformed into an LDADDR.
2374 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2375 loada->dreg = alloc_preg (cfg);
2376 loada->inst_p0 = temp;
2377 /* We reference the call too since call->dreg could change during optimization */
2378 loada->inst_p1 = call;
2379 MONO_ADD_INS (cfg->cbb, loada);
2381 call->inst.dreg = temp->dreg;
2383 call->vret_var = loada;
2384 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2385 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2387 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2388 if (COMPILE_SOFT_FLOAT (cfg)) {
2390 * If the call has a float argument, we would need to do an r8->r4 conversion using
2391 * an icall, but that cannot be done during the call sequence since it would clobber
2392 * the call registers + the stack. So we do it before emitting the call.
2394 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2396 MonoInst *in = call->args [i];
2398 if (i >= sig->hasthis)
2399 t = sig->params [i - sig->hasthis];
2401 t = &mono_defaults.int_class->byval_arg;
2402 t = mono_type_get_underlying_type (t);
2404 if (!t->byref && t->type == MONO_TYPE_R4) {
2405 MonoInst *iargs [1];
2409 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2411 /* The result will be in an int vreg */
2412 call->args [i] = conv;
2418 call->need_unbox_trampoline = unbox_trampoline;
2421 if (COMPILE_LLVM (cfg))
2422 mono_llvm_emit_call (cfg, call);
2424 mono_arch_emit_call (cfg, call);
2426 mono_arch_emit_call (cfg, call);
2429 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2430 cfg->flags |= MONO_CFG_HAS_CALLS;
2436 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2438 #ifdef MONO_ARCH_RGCTX_REG
2439 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2440 cfg->uses_rgctx_reg = TRUE;
2441 call->rgctx_reg = TRUE;
2443 call->rgctx_arg_reg = rgctx_reg;
2450 inline static MonoInst*
2451 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2457 rgctx_reg = mono_alloc_preg (cfg);
2458 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2461 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2463 call->inst.sreg1 = addr->dreg;
2466 emit_imt_argument (cfg, call, NULL, imt_arg);
2468 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2471 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2473 return (MonoInst*)call;
2477 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2480 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2482 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2485 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2486 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2488 #ifndef DISABLE_REMOTING
2489 gboolean might_be_remote = FALSE;
2491 gboolean virtual = this != NULL;
2492 gboolean enable_for_aot = TRUE;
2496 gboolean need_unbox_trampoline;
2499 sig = mono_method_signature (method);
2502 rgctx_reg = mono_alloc_preg (cfg);
2503 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2506 if (method->string_ctor) {
2507 /* Create the real signature */
2508 /* FIXME: Cache these */
2509 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2510 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2515 context_used = mini_method_check_context_used (cfg, method);
2517 #ifndef DISABLE_REMOTING
2518 might_be_remote = this && sig->hasthis &&
2519 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2520 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2522 if (might_be_remote && context_used) {
2525 g_assert (cfg->generic_sharing_context);
2527 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2529 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2533 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2535 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2537 #ifndef DISABLE_REMOTING
2538 if (might_be_remote)
2539 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2542 call->method = method;
2543 call->inst.flags |= MONO_INST_HAS_METHOD;
2544 call->inst.inst_left = this;
2545 call->tail_call = tail;
2548 int vtable_reg, slot_reg, this_reg;
2551 this_reg = this->dreg;
2553 if (ARCH_HAVE_DELEGATE_TRAMPOLINES && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2554 MonoInst *dummy_use;
2556 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2558 /* Make a call to delegate->invoke_impl */
2559 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2560 call->inst.inst_basereg = this_reg;
2561 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2562 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2564 /* We must emit a dummy use here because the delegate trampoline will
2565 replace the 'this' argument with the delegate target making this activation
2566 no longer a root for the delegate.
2567 This is an issue for delegates that target collectible code such as dynamic
2568 methods of GC'able assemblies.
2570 For a test case look into #667921.
2572 FIXME: a dummy use is not the best way to do it as the local register allocator
2573 will put it on a caller save register and spil it around the call.
2574 Ideally, we would either put it on a callee save register or only do the store part.
2576 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2578 return (MonoInst*)call;
2581 if ((!cfg->compile_aot || enable_for_aot) &&
2582 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2583 (MONO_METHOD_IS_FINAL (method) &&
2584 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2585 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2587 * the method is not virtual, we just need to ensure this is not null
2588 * and then we can call the method directly.
2590 #ifndef DISABLE_REMOTING
2591 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2593 * The check above ensures method is not gshared, this is needed since
2594 * gshared methods can't have wrappers.
2596 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2600 if (!method->string_ctor)
2601 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2603 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2604 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2606 * the method is virtual, but we can statically dispatch since either
2607 * it's class or the method itself are sealed.
2608 * But first we need to ensure it's not a null reference.
2610 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2612 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2614 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2616 vtable_reg = alloc_preg (cfg);
2617 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2618 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2620 #ifdef MONO_ARCH_HAVE_IMT
2622 guint32 imt_slot = mono_method_get_imt_slot (method);
2623 emit_imt_argument (cfg, call, call->method, imt_arg);
2624 slot_reg = vtable_reg;
2625 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2628 if (slot_reg == -1) {
2629 slot_reg = alloc_preg (cfg);
2630 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2631 offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2634 slot_reg = vtable_reg;
2635 offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2636 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2637 #ifdef MONO_ARCH_HAVE_IMT
2639 g_assert (mono_method_signature (method)->generic_param_count);
2640 emit_imt_argument (cfg, call, call->method, imt_arg);
2645 call->inst.sreg1 = slot_reg;
2646 call->inst.inst_offset = offset;
2647 call->virtual = TRUE;
2651 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2654 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2656 return (MonoInst*)call;
2660 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2662 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this, NULL, NULL);
2666 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2673 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2676 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2678 return (MonoInst*)call;
2682 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2684 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2688 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2692 * mono_emit_abs_call:
2694 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2696 inline static MonoInst*
2697 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2698 MonoMethodSignature *sig, MonoInst **args)
2700 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2704 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2707 if (cfg->abs_patches == NULL)
2708 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2709 g_hash_table_insert (cfg->abs_patches, ji, ji);
2710 ins = mono_emit_native_call (cfg, ji, sig, args);
2711 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2716 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2718 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2719 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2723 * Native code might return non register sized integers
2724 * without initializing the upper bits.
2726 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2727 case OP_LOADI1_MEMBASE:
2728 widen_op = OP_ICONV_TO_I1;
2730 case OP_LOADU1_MEMBASE:
2731 widen_op = OP_ICONV_TO_U1;
2733 case OP_LOADI2_MEMBASE:
2734 widen_op = OP_ICONV_TO_I2;
2736 case OP_LOADU2_MEMBASE:
2737 widen_op = OP_ICONV_TO_U2;
2743 if (widen_op != -1) {
2744 int dreg = alloc_preg (cfg);
2747 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2748 widen->type = ins->type;
2758 get_memcpy_method (void)
2760 static MonoMethod *memcpy_method = NULL;
2761 if (!memcpy_method) {
2762 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2764 g_error ("Old corlib found. Install a new one");
2766 return memcpy_method;
2770 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
2772 MonoClassField *field;
2773 gpointer iter = NULL;
2775 while ((field = mono_class_get_fields (klass, &iter))) {
2778 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2780 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2781 if (mini_type_is_reference (cfg, mono_field_get_type (field))) {
2782 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2783 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2785 MonoClass *field_class = mono_class_from_mono_type (field->type);
2786 if (field_class->has_references)
2787 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
2793 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
2795 int card_table_shift_bits;
2796 gpointer card_table_mask;
2798 MonoInst *dummy_use;
2799 int nursery_shift_bits;
2800 size_t nursery_size;
2801 gboolean has_card_table_wb = FALSE;
2803 if (!cfg->gen_write_barriers)
2806 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2808 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2810 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
2811 has_card_table_wb = TRUE;
2814 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0) {
2817 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2818 wbarrier->sreg1 = ptr->dreg;
2819 wbarrier->sreg2 = value->dreg;
2820 MONO_ADD_INS (cfg->cbb, wbarrier);
2821 } else if (card_table) {
2822 int offset_reg = alloc_preg (cfg);
2823 int card_reg = alloc_preg (cfg);
2826 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2827 if (card_table_mask)
2828 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
2830 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2831 * IMM's larger than 32bits.
2833 if (cfg->compile_aot) {
2834 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
2836 MONO_INST_NEW (cfg, ins, OP_PCONST);
2837 ins->inst_p0 = card_table;
2838 ins->dreg = card_reg;
2839 MONO_ADD_INS (cfg->cbb, ins);
2842 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
2843 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
2845 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2846 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
2849 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
2853 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
2855 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
2856 unsigned need_wb = 0;
2861 /*types with references can't have alignment smaller than sizeof(void*) */
2862 if (align < SIZEOF_VOID_P)
2865 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
2866 if (size > 32 * SIZEOF_VOID_P)
2869 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
2871 /* We don't unroll more than 5 stores to avoid code bloat. */
2872 if (size > 5 * SIZEOF_VOID_P) {
2873 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
2874 size += (SIZEOF_VOID_P - 1);
2875 size &= ~(SIZEOF_VOID_P - 1);
2877 EMIT_NEW_ICONST (cfg, iargs [2], size);
2878 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
2879 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
2883 destreg = iargs [0]->dreg;
2884 srcreg = iargs [1]->dreg;
2887 dest_ptr_reg = alloc_preg (cfg);
2888 tmp_reg = alloc_preg (cfg);
2891 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
2893 while (size >= SIZEOF_VOID_P) {
2894 MonoInst *load_inst;
2895 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
2896 load_inst->dreg = tmp_reg;
2897 load_inst->inst_basereg = srcreg;
2898 load_inst->inst_offset = offset;
2899 MONO_ADD_INS (cfg->cbb, load_inst);
2901 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
2904 emit_write_barrier (cfg, iargs [0], load_inst);
2906 offset += SIZEOF_VOID_P;
2907 size -= SIZEOF_VOID_P;
2910 /*tmp += sizeof (void*)*/
2911 if (size >= SIZEOF_VOID_P) {
2912 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
2913 MONO_ADD_INS (cfg->cbb, iargs [0]);
2917 /* Those cannot be references since size < sizeof (void*) */
2919 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
2920 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
2926 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
2927 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
2933 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
2934 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
2943 * Emit code to copy a valuetype of type @klass whose address is stored in
2944 * @src->dreg to memory whose address is stored at @dest->dreg.
2947 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2949 MonoInst *iargs [4];
2950 int context_used, n;
2952 MonoMethod *memcpy_method;
2953 MonoInst *size_ins = NULL;
2954 MonoInst *memcpy_ins = NULL;
2958 * This check breaks with spilled vars... need to handle it during verification anyway.
2959 * g_assert (klass && klass == src->klass && klass == dest->klass);
2962 if (mini_is_gsharedvt_klass (cfg, klass)) {
2964 context_used = mini_class_check_context_used (cfg, klass);
2965 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
2966 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
2970 n = mono_class_native_size (klass, &align);
2972 n = mono_class_value_size (klass, &align);
2974 /* if native is true there should be no references in the struct */
2975 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
2976 /* Avoid barriers when storing to the stack */
2977 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2978 (dest->opcode == OP_LDADDR))) {
2984 context_used = mini_class_check_context_used (cfg, klass);
2986 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
2987 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
2989 } else if (context_used) {
2990 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2992 if (cfg->compile_aot) {
2993 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2995 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2996 mono_class_compute_gc_descriptor (klass);
3001 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3003 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3008 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
3009 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3010 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3015 iargs [2] = size_ins;
3017 EMIT_NEW_ICONST (cfg, iargs [2], n);
3019 memcpy_method = get_memcpy_method ();
3021 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3023 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3028 get_memset_method (void)
3030 static MonoMethod *memset_method = NULL;
3031 if (!memset_method) {
3032 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3034 g_error ("Old corlib found. Install a new one");
3036 return memset_method;
3040 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3042 MonoInst *iargs [3];
3043 int n, context_used;
3045 MonoMethod *memset_method;
3046 MonoInst *size_ins = NULL;
3047 MonoInst *bzero_ins = NULL;
3048 static MonoMethod *bzero_method;
3050 /* FIXME: Optimize this for the case when dest is an LDADDR */
3052 mono_class_init (klass);
3053 if (mini_is_gsharedvt_klass (cfg, klass)) {
3054 context_used = mini_class_check_context_used (cfg, klass);
3055 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3056 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3058 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3059 g_assert (bzero_method);
3061 iargs [1] = size_ins;
3062 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3066 n = mono_class_value_size (klass, &align);
3068 if (n <= sizeof (gpointer) * 5) {
3069 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3072 memset_method = get_memset_method ();
3074 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3075 EMIT_NEW_ICONST (cfg, iargs [2], n);
3076 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3081 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3083 MonoInst *this = NULL;
3085 g_assert (cfg->generic_sharing_context);
3087 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3088 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3089 !method->klass->valuetype)
3090 EMIT_NEW_ARGLOAD (cfg, this, 0);
3092 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3093 MonoInst *mrgctx_loc, *mrgctx_var;
3096 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3098 mrgctx_loc = mono_get_vtable_var (cfg);
3099 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3102 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3103 MonoInst *vtable_loc, *vtable_var;
3107 vtable_loc = mono_get_vtable_var (cfg);
3108 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3110 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3111 MonoInst *mrgctx_var = vtable_var;
3114 vtable_reg = alloc_preg (cfg);
3115 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3116 vtable_var->type = STACK_PTR;
3124 vtable_reg = alloc_preg (cfg);
3125 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3130 static MonoJumpInfoRgctxEntry *
3131 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3133 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3134 res->method = method;
3135 res->in_mrgctx = in_mrgctx;
3136 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3137 res->data->type = patch_type;
3138 res->data->data.target = patch_data;
3139 res->info_type = info_type;
3144 static inline MonoInst*
3145 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3147 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3151 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3152 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3154 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3155 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3157 return emit_rgctx_fetch (cfg, rgctx, entry);
3161 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3162 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3164 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3165 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3167 return emit_rgctx_fetch (cfg, rgctx, entry);
3171 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3172 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3174 MonoJumpInfoGSharedVtCall *call_info;
3175 MonoJumpInfoRgctxEntry *entry;
3178 call_info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3179 call_info->sig = sig;
3180 call_info->method = cmethod;
3182 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3183 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3185 return emit_rgctx_fetch (cfg, rgctx, entry);
3190 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3191 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3193 MonoJumpInfoRgctxEntry *entry;
3196 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3197 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3199 return emit_rgctx_fetch (cfg, rgctx, entry);
3203 * emit_get_rgctx_method:
3205 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3206 * normal constants, else emit a load from the rgctx.
3209 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3210 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3212 if (!context_used) {
3215 switch (rgctx_type) {
3216 case MONO_RGCTX_INFO_METHOD:
3217 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3219 case MONO_RGCTX_INFO_METHOD_RGCTX:
3220 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3223 g_assert_not_reached ();
3226 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3227 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3229 return emit_rgctx_fetch (cfg, rgctx, entry);
3234 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3235 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3237 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3238 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3240 return emit_rgctx_fetch (cfg, rgctx, entry);
3244 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3246 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3247 MonoRuntimeGenericContextInfoTemplate *template;
3252 for (i = 0; i < info->entries->len; ++i) {
3253 MonoRuntimeGenericContextInfoTemplate *otemplate = g_ptr_array_index (info->entries, i);
3255 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3259 template = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate));
3260 template->info_type = rgctx_type;
3261 template->data = data;
3263 idx = info->entries->len;
3265 g_ptr_array_add (info->entries, template);
3271 * emit_get_gsharedvt_info:
3273 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3276 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3281 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3282 /* Load info->entries [idx] */
3283 dreg = alloc_preg (cfg);
3284 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3290 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3292 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3296 * On return the caller must check @klass for load errors.
3299 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3301 MonoInst *vtable_arg;
3305 context_used = mini_class_check_context_used (cfg, klass);
3308 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3309 klass, MONO_RGCTX_INFO_VTABLE);
3311 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3315 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3318 if (COMPILE_LLVM (cfg))
3319 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3321 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3322 #ifdef MONO_ARCH_VTABLE_REG
3323 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3324 cfg->uses_vtable_reg = TRUE;
3331 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc)
3335 if (cfg->gen_seq_points && cfg->method == method) {
3336 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3337 MONO_ADD_INS (cfg->cbb, ins);
3342 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
3344 if (mini_get_debug_options ()->better_cast_details) {
3345 int to_klass_reg = alloc_preg (cfg);
3346 int vtable_reg = alloc_preg (cfg);
3347 int klass_reg = alloc_preg (cfg);
3348 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3351 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3355 MONO_ADD_INS (cfg->cbb, tls_get);
3356 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3357 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3359 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3360 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3361 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3366 reset_cast_details (MonoCompile *cfg)
3368 /* Reset the variables holding the cast details */
3369 if (mini_get_debug_options ()->better_cast_details) {
3370 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3372 MONO_ADD_INS (cfg->cbb, tls_get);
3373 /* It is enough to reset the from field */
3374 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3379 * On return the caller must check @array_class for load errors
3382 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3384 int vtable_reg = alloc_preg (cfg);
3387 context_used = mini_class_check_context_used (cfg, array_class);
3389 save_cast_details (cfg, array_class, obj->dreg);
3391 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3393 if (cfg->opt & MONO_OPT_SHARED) {
3394 int class_reg = alloc_preg (cfg);
3395 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3396 if (cfg->compile_aot) {
3397 int klass_reg = alloc_preg (cfg);
3398 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3399 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3401 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3403 } else if (context_used) {
3404 MonoInst *vtable_ins;
3406 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3407 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3409 if (cfg->compile_aot) {
3413 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3415 vt_reg = alloc_preg (cfg);
3416 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3417 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3420 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3422 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3426 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3428 reset_cast_details (cfg);
3432 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3433 * generic code is generated.
3436 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3438 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3441 MonoInst *rgctx, *addr;
3443 /* FIXME: What if the class is shared? We might not
3444 have to get the address of the method from the
3446 addr = emit_get_rgctx_method (cfg, context_used, method,
3447 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3449 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3451 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3453 gboolean pass_vtable, pass_mrgctx;
3454 MonoInst *rgctx_arg = NULL;
3456 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3457 g_assert (!pass_mrgctx);
3460 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3463 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3466 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3471 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3475 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3476 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3477 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3478 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3480 obj_reg = sp [0]->dreg;
3481 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3482 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3484 /* FIXME: generics */
3485 g_assert (klass->rank == 0);
3488 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3489 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3491 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3492 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3495 MonoInst *element_class;
3497 /* This assertion is from the unboxcast insn */
3498 g_assert (klass->rank == 0);
3500 element_class = emit_get_rgctx_klass (cfg, context_used,
3501 klass->element_class, MONO_RGCTX_INFO_KLASS);
3503 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3504 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3506 save_cast_details (cfg, klass->element_class, obj_reg);
3507 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3508 reset_cast_details (cfg);
3511 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3512 MONO_ADD_INS (cfg->cbb, add);
3513 add->type = STACK_MP;
3520 handle_unbox_gsharedvt (MonoCompile *cfg, int context_used, MonoClass *klass, MonoInst *obj, MonoBasicBlock **out_cbb)
3522 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3523 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3527 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3533 args [1] = klass_inst;
3536 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3538 NEW_BBLOCK (cfg, is_ref_bb);
3539 NEW_BBLOCK (cfg, is_nullable_bb);
3540 NEW_BBLOCK (cfg, end_bb);
3541 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3542 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3543 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3545 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3546 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3548 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3549 addr_reg = alloc_dreg (cfg, STACK_MP);
3553 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3554 MONO_ADD_INS (cfg->cbb, addr);
3556 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3559 MONO_START_BB (cfg, is_ref_bb);
3561 /* Save the ref to a temporary */
3562 dreg = alloc_ireg (cfg);
3563 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3564 addr->dreg = addr_reg;
3565 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3566 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3569 MONO_START_BB (cfg, is_nullable_bb);
3572 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3573 MonoInst *unbox_call;
3574 MonoMethodSignature *unbox_sig;
3577 var = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
3579 unbox_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3580 unbox_sig->ret = &klass->byval_arg;
3581 unbox_sig->param_count = 1;
3582 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3583 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3585 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3586 addr->dreg = addr_reg;
3589 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3592 MONO_START_BB (cfg, end_bb);
3595 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3597 *out_cbb = cfg->cbb;
3603 * Returns NULL and set the cfg exception on error.
3606 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3608 MonoInst *iargs [2];
3614 MonoInst *iargs [2];
3616 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box);
3618 if (cfg->opt & MONO_OPT_SHARED)
3619 rgctx_info = MONO_RGCTX_INFO_KLASS;
3621 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3622 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3624 if (cfg->opt & MONO_OPT_SHARED) {
3625 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3627 alloc_ftn = mono_object_new;
3630 alloc_ftn = mono_object_new_specific;
3633 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED))
3634 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3636 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3639 if (cfg->opt & MONO_OPT_SHARED) {
3640 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3641 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3643 alloc_ftn = mono_object_new;
3644 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3645 /* This happens often in argument checking code, eg. throw new FooException... */
3646 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3647 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3648 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3650 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3651 MonoMethod *managed_alloc = NULL;
3655 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3656 cfg->exception_ptr = klass;
3660 #ifndef MONO_CROSS_COMPILE
3661 managed_alloc = mono_gc_get_managed_allocator (klass, for_box);
3664 if (managed_alloc) {
3665 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3666 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3668 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3670 guint32 lw = vtable->klass->instance_size;
3671 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3672 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3673 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3676 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3680 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3684 * Returns NULL and set the cfg exception on error.
3687 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoBasicBlock **out_cbb)
3689 MonoInst *alloc, *ins;
3691 *out_cbb = cfg->cbb;
3693 if (mono_class_is_nullable (klass)) {
3694 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3697 /* FIXME: What if the class is shared? We might not
3698 have to get the method address from the RGCTX. */
3699 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3700 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3701 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3703 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3705 gboolean pass_vtable, pass_mrgctx;
3706 MonoInst *rgctx_arg = NULL;
3708 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3709 g_assert (!pass_mrgctx);
3712 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3715 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3718 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3722 if (mini_is_gsharedvt_klass (cfg, klass)) {
3723 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3724 MonoInst *res, *is_ref, *src_var, *addr;
3727 dreg = alloc_ireg (cfg);
3729 NEW_BBLOCK (cfg, is_ref_bb);
3730 NEW_BBLOCK (cfg, is_nullable_bb);
3731 NEW_BBLOCK (cfg, end_bb);
3732 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3733 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3734 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3736 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3737 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3740 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3743 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3744 ins->opcode = OP_STOREV_MEMBASE;
3746 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
3747 res->type = STACK_OBJ;
3749 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3752 MONO_START_BB (cfg, is_ref_bb);
3753 addr_reg = alloc_ireg (cfg);
3755 /* val is a vtype, so has to load the value manually */
3756 src_var = get_vreg_to_inst (cfg, val->dreg);
3758 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
3759 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
3760 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
3761 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3764 MONO_START_BB (cfg, is_nullable_bb);
3767 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
3768 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
3770 MonoMethodSignature *box_sig;
3773 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
3774 * construct that method at JIT time, so have to do things by hand.
3776 box_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3777 box_sig->ret = &mono_defaults.object_class->byval_arg;
3778 box_sig->param_count = 1;
3779 box_sig->params [0] = &klass->byval_arg;
3780 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
3781 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
3782 res->type = STACK_OBJ;
3786 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3788 MONO_START_BB (cfg, end_bb);
3790 *out_cbb = cfg->cbb;
3794 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3798 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3805 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
3808 MonoGenericContainer *container;
3809 MonoGenericInst *ginst;
3811 if (klass->generic_class) {
3812 container = klass->generic_class->container_class->generic_container;
3813 ginst = klass->generic_class->context.class_inst;
3814 } else if (klass->generic_container && context_used) {
3815 container = klass->generic_container;
3816 ginst = container->context.class_inst;
3821 for (i = 0; i < container->type_argc; ++i) {
3823 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
3825 type = ginst->type_argv [i];
3826 if (mini_type_is_reference (cfg, type))
3832 // FIXME: This doesn't work yet (class libs tests fail?)
3833 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3836 * Returns NULL and set the cfg exception on error.
3839 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3841 MonoBasicBlock *is_null_bb;
3842 int obj_reg = src->dreg;
3843 int vtable_reg = alloc_preg (cfg);
3844 MonoInst *klass_inst = NULL;
3849 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
3850 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
3851 MonoInst *cache_ins;
3853 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3858 /* klass - it's the second element of the cache entry*/
3859 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3862 args [2] = cache_ins;
3864 return mono_emit_method_call (cfg, mono_castclass, args, NULL);
3867 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3870 NEW_BBLOCK (cfg, is_null_bb);
3872 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3873 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3875 save_cast_details (cfg, klass, obj_reg);
3877 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3878 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3879 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3881 int klass_reg = alloc_preg (cfg);
3883 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3885 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3886 /* the remoting code is broken, access the class for now */
3887 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3888 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3890 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3891 cfg->exception_ptr = klass;
3894 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3896 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3897 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3899 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3901 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3902 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3906 MONO_START_BB (cfg, is_null_bb);
3908 reset_cast_details (cfg);
3914 * Returns NULL and set the cfg exception on error.
3917 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3920 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3921 int obj_reg = src->dreg;
3922 int vtable_reg = alloc_preg (cfg);
3923 int res_reg = alloc_ireg_ref (cfg);
3924 MonoInst *klass_inst = NULL;
3929 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
3930 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
3931 MonoInst *cache_ins;
3933 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3938 /* klass - it's the second element of the cache entry*/
3939 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3942 args [2] = cache_ins;
3944 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
3947 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3950 NEW_BBLOCK (cfg, is_null_bb);
3951 NEW_BBLOCK (cfg, false_bb);
3952 NEW_BBLOCK (cfg, end_bb);
3954 /* Do the assignment at the beginning, so the other assignment can be if converted */
3955 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3956 ins->type = STACK_OBJ;
3959 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3960 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3962 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3964 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3965 g_assert (!context_used);
3966 /* the is_null_bb target simply copies the input register to the output */
3967 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3969 int klass_reg = alloc_preg (cfg);
3972 int rank_reg = alloc_preg (cfg);
3973 int eclass_reg = alloc_preg (cfg);
3975 g_assert (!context_used);
3976 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3977 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3978 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3979 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3980 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3981 if (klass->cast_class == mono_defaults.object_class) {
3982 int parent_reg = alloc_preg (cfg);
3983 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3984 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3985 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3986 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3987 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3988 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3989 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3990 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3991 } else if (klass->cast_class == mono_defaults.enum_class) {
3992 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3993 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3994 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3995 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3997 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3998 /* Check that the object is a vector too */
3999 int bounds_reg = alloc_preg (cfg);
4000 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
4001 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4002 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4005 /* the is_null_bb target simply copies the input register to the output */
4006 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4008 } else if (mono_class_is_nullable (klass)) {
4009 g_assert (!context_used);
4010 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4011 /* the is_null_bb target simply copies the input register to the output */
4012 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4014 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4015 g_assert (!context_used);
4016 /* the remoting code is broken, access the class for now */
4017 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4018 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4020 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4021 cfg->exception_ptr = klass;
4024 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4026 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4027 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4029 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4030 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4032 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4033 /* the is_null_bb target simply copies the input register to the output */
4034 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4039 MONO_START_BB (cfg, false_bb);
4041 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4042 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4044 MONO_START_BB (cfg, is_null_bb);
4046 MONO_START_BB (cfg, end_bb);
4052 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4054 /* This opcode takes as input an object reference and a class, and returns:
4055 0) if the object is an instance of the class,
4056 1) if the object is not instance of the class,
4057 2) if the object is a proxy whose type cannot be determined */
4060 #ifndef DISABLE_REMOTING
4061 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4063 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4065 int obj_reg = src->dreg;
4066 int dreg = alloc_ireg (cfg);
4068 #ifndef DISABLE_REMOTING
4069 int klass_reg = alloc_preg (cfg);
4072 NEW_BBLOCK (cfg, true_bb);
4073 NEW_BBLOCK (cfg, false_bb);
4074 NEW_BBLOCK (cfg, end_bb);
4075 #ifndef DISABLE_REMOTING
4076 NEW_BBLOCK (cfg, false2_bb);
4077 NEW_BBLOCK (cfg, no_proxy_bb);
4080 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4081 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4083 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4084 #ifndef DISABLE_REMOTING
4085 NEW_BBLOCK (cfg, interface_fail_bb);
4088 tmp_reg = alloc_preg (cfg);
4089 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4090 #ifndef DISABLE_REMOTING
4091 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4092 MONO_START_BB (cfg, interface_fail_bb);
4093 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4095 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4097 tmp_reg = alloc_preg (cfg);
4098 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4099 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4100 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4102 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4105 #ifndef DISABLE_REMOTING
4106 tmp_reg = alloc_preg (cfg);
4107 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4108 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4110 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4111 tmp_reg = alloc_preg (cfg);
4112 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4113 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4115 tmp_reg = alloc_preg (cfg);
4116 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4117 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4118 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4120 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4121 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4123 MONO_START_BB (cfg, no_proxy_bb);
4125 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4127 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4131 MONO_START_BB (cfg, false_bb);
4133 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4134 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4136 #ifndef DISABLE_REMOTING
4137 MONO_START_BB (cfg, false2_bb);
4139 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4140 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4143 MONO_START_BB (cfg, true_bb);
4145 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4147 MONO_START_BB (cfg, end_bb);
4150 MONO_INST_NEW (cfg, ins, OP_ICONST);
4152 ins->type = STACK_I4;
4158 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4160 /* This opcode takes as input an object reference and a class, and returns:
4161 0) if the object is an instance of the class,
4162 1) if the object is a proxy whose type cannot be determined
4163 an InvalidCastException exception is thrown otherwhise*/
4166 #ifndef DISABLE_REMOTING
4167 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4169 MonoBasicBlock *ok_result_bb;
4171 int obj_reg = src->dreg;
4172 int dreg = alloc_ireg (cfg);
4173 int tmp_reg = alloc_preg (cfg);
4175 #ifndef DISABLE_REMOTING
4176 int klass_reg = alloc_preg (cfg);
4177 NEW_BBLOCK (cfg, end_bb);
4180 NEW_BBLOCK (cfg, ok_result_bb);
4182 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4183 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4185 save_cast_details (cfg, klass, obj_reg);
4187 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4188 #ifndef DISABLE_REMOTING
4189 NEW_BBLOCK (cfg, interface_fail_bb);
4191 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4192 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4193 MONO_START_BB (cfg, interface_fail_bb);
4194 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4196 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4198 tmp_reg = alloc_preg (cfg);
4199 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4200 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4201 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4203 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4204 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4206 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4207 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4208 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4211 #ifndef DISABLE_REMOTING
4212 NEW_BBLOCK (cfg, no_proxy_bb);
4214 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4215 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4216 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4218 tmp_reg = alloc_preg (cfg);
4219 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4220 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4222 tmp_reg = alloc_preg (cfg);
4223 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4224 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4225 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4227 NEW_BBLOCK (cfg, fail_1_bb);
4229 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4231 MONO_START_BB (cfg, fail_1_bb);
4233 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4234 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4236 MONO_START_BB (cfg, no_proxy_bb);
4238 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4240 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4244 MONO_START_BB (cfg, ok_result_bb);
4246 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4248 #ifndef DISABLE_REMOTING
4249 MONO_START_BB (cfg, end_bb);
4253 MONO_INST_NEW (cfg, ins, OP_ICONST);
4255 ins->type = STACK_I4;
4261 * Returns NULL and set the cfg exception on error.
4263 static G_GNUC_UNUSED MonoInst*
4264 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
4268 gpointer *trampoline;
4269 MonoInst *obj, *method_ins, *tramp_ins;
4273 obj = handle_alloc (cfg, klass, FALSE, 0);
4277 /* Inline the contents of mono_delegate_ctor */
4279 /* Set target field */
4280 /* Optimize away setting of NULL target */
4281 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
4282 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4283 if (cfg->gen_write_barriers) {
4284 dreg = alloc_preg (cfg);
4285 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target));
4286 emit_write_barrier (cfg, ptr, target);
4290 /* Set method field */
4291 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4292 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4293 if (cfg->gen_write_barriers) {
4294 dreg = alloc_preg (cfg);
4295 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method));
4296 emit_write_barrier (cfg, ptr, method_ins);
4299 * To avoid looking up the compiled code belonging to the target method
4300 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4301 * store it, and we fill it after the method has been compiled.
4303 if (!cfg->compile_aot && !method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4304 MonoInst *code_slot_ins;
4307 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4309 domain = mono_domain_get ();
4310 mono_domain_lock (domain);
4311 if (!domain_jit_info (domain)->method_code_hash)
4312 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4313 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4315 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
4316 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4318 mono_domain_unlock (domain);
4320 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
4322 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4325 /* Set invoke_impl field */
4326 if (cfg->compile_aot) {
4327 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
4329 trampoline = mono_create_delegate_trampoline (cfg->domain, klass);
4330 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4332 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4334 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4340 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4342 MonoJitICallInfo *info;
4344 /* Need to register the icall so it gets an icall wrapper */
4345 info = mono_get_array_new_va_icall (rank);
4347 cfg->flags |= MONO_CFG_HAS_VARARGS;
4349 /* mono_array_new_va () needs a vararg calling convention */
4350 cfg->disable_llvm = TRUE;
4352 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4353 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4357 mono_emit_load_got_addr (MonoCompile *cfg)
4359 MonoInst *getaddr, *dummy_use;
4361 if (!cfg->got_var || cfg->got_var_allocated)
4364 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
4365 getaddr->cil_code = cfg->header->code;
4366 getaddr->dreg = cfg->got_var->dreg;
4368 /* Add it to the start of the first bblock */
4369 if (cfg->bb_entry->code) {
4370 getaddr->next = cfg->bb_entry->code;
4371 cfg->bb_entry->code = getaddr;
4374 MONO_ADD_INS (cfg->bb_entry, getaddr);
4376 cfg->got_var_allocated = TRUE;
4379 * Add a dummy use to keep the got_var alive, since real uses might
4380 * only be generated by the back ends.
4381 * Add it to end_bblock, so the variable's lifetime covers the whole
4383 * It would be better to make the usage of the got var explicit in all
4384 * cases when the backend needs it (i.e. calls, throw etc.), so this
4385 * wouldn't be needed.
4387 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
4388 MONO_ADD_INS (cfg->bb_exit, dummy_use);
4391 static int inline_limit;
4392 static gboolean inline_limit_inited;
4395 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
4397 MonoMethodHeaderSummary header;
4399 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4400 MonoMethodSignature *sig = mono_method_signature (method);
4404 if (cfg->generic_sharing_context)
4407 if (cfg->inline_depth > 10)
4410 #ifdef MONO_ARCH_HAVE_LMF_OPS
4411 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
4412 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
4413 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
4418 if (!mono_method_get_header_summary (method, &header))
4421 /*runtime, icall and pinvoke are checked by summary call*/
4422 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
4423 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
4424 (mono_class_is_marshalbyref (method->klass)) ||
4428 /* also consider num_locals? */
4429 /* Do the size check early to avoid creating vtables */
4430 if (!inline_limit_inited) {
4431 if (getenv ("MONO_INLINELIMIT"))
4432 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
4434 inline_limit = INLINE_LENGTH_LIMIT;
4435 inline_limit_inited = TRUE;
4437 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
4441 * if we can initialize the class of the method right away, we do,
4442 * otherwise we don't allow inlining if the class needs initialization,
4443 * since it would mean inserting a call to mono_runtime_class_init()
4444 * inside the inlined code
4446 if (!(cfg->opt & MONO_OPT_SHARED)) {
4447 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
4448 if (cfg->run_cctors && method->klass->has_cctor) {
4449 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
4450 if (!method->klass->runtime_info)
4451 /* No vtable created yet */
4453 vtable = mono_class_vtable (cfg->domain, method->klass);
4456 /* This makes so that inline cannot trigger */
4457 /* .cctors: too many apps depend on them */
4458 /* running with a specific order... */
4459 if (! vtable->initialized)
4461 mono_runtime_class_init (vtable);
4463 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
4464 if (!method->klass->runtime_info)
4465 /* No vtable created yet */
4467 vtable = mono_class_vtable (cfg->domain, method->klass);
4470 if (!vtable->initialized)
4475 * If we're compiling for shared code
4476 * the cctor will need to be run at aot method load time, for example,
4477 * or at the end of the compilation of the inlining method.
4479 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
4484 * CAS - do not inline methods with declarative security
4485 * Note: this has to be before any possible return TRUE;
4487 if (mono_security_method_has_declsec (method))
4490 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4491 if (mono_arch_is_soft_float ()) {
4493 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4495 for (i = 0; i < sig->param_count; ++i)
4496 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4505 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
4507 if (vtable->initialized && !cfg->compile_aot)
4510 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
4513 if (!mono_class_needs_cctor_run (vtable->klass, method))
4516 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
4517 /* The initialization is already done before the method is called */
4524 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4528 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4531 if (mini_is_gsharedvt_klass (cfg, klass)) {
4534 mono_class_init (klass);
4535 size = mono_class_array_element_size (klass);
4538 mult_reg = alloc_preg (cfg);
4539 array_reg = arr->dreg;
4540 index_reg = index->dreg;
4542 #if SIZEOF_REGISTER == 8
4543 /* The array reg is 64 bits but the index reg is only 32 */
4544 if (COMPILE_LLVM (cfg)) {
4546 index2_reg = index_reg;
4548 index2_reg = alloc_preg (cfg);
4549 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4552 if (index->type == STACK_I8) {
4553 index2_reg = alloc_preg (cfg);
4554 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4556 index2_reg = index_reg;
4561 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4563 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4564 if (size == 1 || size == 2 || size == 4 || size == 8) {
4565 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4567 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
4568 ins->klass = mono_class_get_element_class (klass);
4569 ins->type = STACK_MP;
4575 add_reg = alloc_ireg_mp (cfg);
4578 MonoInst *rgctx_ins;
4581 g_assert (cfg->generic_sharing_context);
4582 context_used = mini_class_check_context_used (cfg, klass);
4583 g_assert (context_used);
4584 rgctx_ins = emit_get_gsharedvt_info (cfg, &klass->byval_arg, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
4585 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
4587 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4589 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4590 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4591 ins->klass = mono_class_get_element_class (klass);
4592 ins->type = STACK_MP;
4593 MONO_ADD_INS (cfg->cbb, ins);
4598 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4600 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4602 int bounds_reg = alloc_preg (cfg);
4603 int add_reg = alloc_ireg_mp (cfg);
4604 int mult_reg = alloc_preg (cfg);
4605 int mult2_reg = alloc_preg (cfg);
4606 int low1_reg = alloc_preg (cfg);
4607 int low2_reg = alloc_preg (cfg);
4608 int high1_reg = alloc_preg (cfg);
4609 int high2_reg = alloc_preg (cfg);
4610 int realidx1_reg = alloc_preg (cfg);
4611 int realidx2_reg = alloc_preg (cfg);
4612 int sum_reg = alloc_preg (cfg);
4613 int index1, index2, tmpreg;
4617 mono_class_init (klass);
4618 size = mono_class_array_element_size (klass);
4620 index1 = index_ins1->dreg;
4621 index2 = index_ins2->dreg;
4623 #if SIZEOF_REGISTER == 8
4624 /* The array reg is 64 bits but the index reg is only 32 */
4625 if (COMPILE_LLVM (cfg)) {
4628 tmpreg = alloc_preg (cfg);
4629 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
4631 tmpreg = alloc_preg (cfg);
4632 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
4636 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
4640 /* range checking */
4641 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4642 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4644 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4645 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4646 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4647 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4648 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4649 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4650 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4652 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4653 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4654 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4655 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4656 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
4657 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4658 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4660 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4661 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4662 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4663 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4664 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4666 ins->type = STACK_MP;
4668 MONO_ADD_INS (cfg->cbb, ins);
4675 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4679 MonoMethod *addr_method;
4682 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4685 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
4687 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4688 /* emit_ldelema_2 depends on OP_LMUL */
4689 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
4690 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
4694 element_size = mono_class_array_element_size (cmethod->klass->element_class);
4695 addr_method = mono_marshal_get_array_address (rank, element_size);
4696 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4701 static MonoBreakPolicy
4702 always_insert_breakpoint (MonoMethod *method)
4704 return MONO_BREAK_POLICY_ALWAYS;
4707 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4710 * mono_set_break_policy:
4711 * policy_callback: the new callback function
4713 * Allow embedders to decide wherther to actually obey breakpoint instructions
4714 * (both break IL instructions and Debugger.Break () method calls), for example
4715 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4716 * untrusted or semi-trusted code.
4718 * @policy_callback will be called every time a break point instruction needs to
4719 * be inserted with the method argument being the method that calls Debugger.Break()
4720 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4721 * if it wants the breakpoint to not be effective in the given method.
4722 * #MONO_BREAK_POLICY_ALWAYS is the default.
4725 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4727 if (policy_callback)
4728 break_policy_func = policy_callback;
4730 break_policy_func = always_insert_breakpoint;
4734 should_insert_brekpoint (MonoMethod *method) {
4735 switch (break_policy_func (method)) {
4736 case MONO_BREAK_POLICY_ALWAYS:
4738 case MONO_BREAK_POLICY_NEVER:
4740 case MONO_BREAK_POLICY_ON_DBG:
4741 return mono_debug_using_mono_debugger ();
4743 g_warning ("Incorrect value returned from break policy callback");
4748 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4750 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4752 MonoInst *addr, *store, *load;
4753 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4755 /* the bounds check is already done by the callers */
4756 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4758 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4759 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4760 if (mini_type_is_reference (cfg, fsig->params [2]))
4761 emit_write_barrier (cfg, addr, load);
4763 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4764 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4771 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4773 return mini_type_is_reference (cfg, &klass->byval_arg);
4777 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
4779 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
4780 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
4781 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
4782 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
4783 MonoInst *iargs [3];
4786 mono_class_setup_vtable (obj_array);
4787 g_assert (helper->slot);
4789 if (sp [0]->type != STACK_OBJ)
4791 if (sp [2]->type != STACK_OBJ)
4798 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
4802 if (mini_is_gsharedvt_klass (cfg, klass)) {
4805 // FIXME-VT: OP_ICONST optimization
4806 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
4807 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
4808 ins->opcode = OP_STOREV_MEMBASE;
4809 } else if (sp [1]->opcode == OP_ICONST) {
4810 int array_reg = sp [0]->dreg;
4811 int index_reg = sp [1]->dreg;
4812 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
4815 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
4816 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
4818 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
4819 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
4820 if (generic_class_is_reference_type (cfg, klass))
4821 emit_write_barrier (cfg, addr, sp [2]);
4828 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4833 eklass = mono_class_from_mono_type (fsig->params [2]);
4835 eklass = mono_class_from_mono_type (fsig->ret);
4839 return emit_array_store (cfg, eklass, args, FALSE);
4841 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4842 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
4848 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4850 MonoInst *ins = NULL;
4851 #ifdef MONO_ARCH_SIMD_INTRINSICS
4852 if (cfg->opt & MONO_OPT_SIMD) {
4853 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4863 emit_memory_barrier (MonoCompile *cfg, int kind)
4865 MonoInst *ins = NULL;
4866 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4867 MONO_ADD_INS (cfg->cbb, ins);
4868 ins->backend.memory_barrier_kind = kind;
4874 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4876 MonoInst *ins = NULL;
4879 /* The LLVM backend supports these intrinsics */
4880 if (cmethod->klass == mono_defaults.math_class) {
4881 if (strcmp (cmethod->name, "Sin") == 0) {
4883 } else if (strcmp (cmethod->name, "Cos") == 0) {
4885 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
4887 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
4892 MONO_INST_NEW (cfg, ins, opcode);
4893 ins->type = STACK_R8;
4894 ins->dreg = mono_alloc_freg (cfg);
4895 ins->sreg1 = args [0]->dreg;
4896 MONO_ADD_INS (cfg->cbb, ins);
4900 if (cfg->opt & MONO_OPT_CMOV) {
4901 if (strcmp (cmethod->name, "Min") == 0) {
4902 if (fsig->params [0]->type == MONO_TYPE_I4)
4904 if (fsig->params [0]->type == MONO_TYPE_U4)
4905 opcode = OP_IMIN_UN;
4906 else if (fsig->params [0]->type == MONO_TYPE_I8)
4908 else if (fsig->params [0]->type == MONO_TYPE_U8)
4909 opcode = OP_LMIN_UN;
4910 } else if (strcmp (cmethod->name, "Max") == 0) {
4911 if (fsig->params [0]->type == MONO_TYPE_I4)
4913 if (fsig->params [0]->type == MONO_TYPE_U4)
4914 opcode = OP_IMAX_UN;
4915 else if (fsig->params [0]->type == MONO_TYPE_I8)
4917 else if (fsig->params [0]->type == MONO_TYPE_U8)
4918 opcode = OP_LMAX_UN;
4923 MONO_INST_NEW (cfg, ins, opcode);
4924 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
4925 ins->dreg = mono_alloc_ireg (cfg);
4926 ins->sreg1 = args [0]->dreg;
4927 ins->sreg2 = args [1]->dreg;
4928 MONO_ADD_INS (cfg->cbb, ins);
4936 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4938 if (cmethod->klass == mono_defaults.array_class) {
4939 if (strcmp (cmethod->name, "UnsafeStore") == 0)
4940 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
4941 if (strcmp (cmethod->name, "UnsafeLoad") == 0)
4942 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
4949 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4951 MonoInst *ins = NULL;
4953 static MonoClass *runtime_helpers_class = NULL;
4954 if (! runtime_helpers_class)
4955 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
4956 "System.Runtime.CompilerServices", "RuntimeHelpers");
4958 if (cmethod->klass == mono_defaults.string_class) {
4959 if (strcmp (cmethod->name, "get_Chars") == 0) {
4960 int dreg = alloc_ireg (cfg);
4961 int index_reg = alloc_preg (cfg);
4962 int mult_reg = alloc_preg (cfg);
4963 int add_reg = alloc_preg (cfg);
4965 #if SIZEOF_REGISTER == 8
4966 /* The array reg is 64 bits but the index reg is only 32 */
4967 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
4969 index_reg = args [1]->dreg;
4971 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
4973 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4974 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
4975 add_reg = ins->dreg;
4976 /* Avoid a warning */
4978 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4981 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
4982 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4983 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4984 add_reg, G_STRUCT_OFFSET (MonoString, chars));
4986 type_from_op (ins, NULL, NULL);
4988 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4989 int dreg = alloc_ireg (cfg);
4990 /* Decompose later to allow more optimizations */
4991 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
4992 ins->type = STACK_I4;
4993 ins->flags |= MONO_INST_FAULT;
4994 cfg->cbb->has_array_access = TRUE;
4995 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
4998 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
4999 int mult_reg = alloc_preg (cfg);
5000 int add_reg = alloc_preg (cfg);
5002 /* The corlib functions check for oob already. */
5003 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
5004 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5005 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
5006 return cfg->cbb->last_ins;
5009 } else if (cmethod->klass == mono_defaults.object_class) {
5011 if (strcmp (cmethod->name, "GetType") == 0) {
5012 int dreg = alloc_ireg_ref (cfg);
5013 int vt_reg = alloc_preg (cfg);
5014 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
5015 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
5016 type_from_op (ins, NULL, NULL);
5019 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
5020 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
5021 int dreg = alloc_ireg (cfg);
5022 int t1 = alloc_ireg (cfg);
5024 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5025 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5026 ins->type = STACK_I4;
5030 } else if (strcmp (cmethod->name, ".ctor") == 0) {
5031 MONO_INST_NEW (cfg, ins, OP_NOP);
5032 MONO_ADD_INS (cfg->cbb, ins);
5036 } else if (cmethod->klass == mono_defaults.array_class) {
5037 if (!cfg->gsharedvt && strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
5038 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
5040 #ifndef MONO_BIG_ARRAYS
5042 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5045 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5046 int dreg = alloc_ireg (cfg);
5047 int bounds_reg = alloc_ireg_mp (cfg);
5048 MonoBasicBlock *end_bb, *szarray_bb;
5049 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5051 NEW_BBLOCK (cfg, end_bb);
5052 NEW_BBLOCK (cfg, szarray_bb);
5054 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5055 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
5056 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5057 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5058 /* Non-szarray case */
5060 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5061 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
5063 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5064 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5065 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5066 MONO_START_BB (cfg, szarray_bb);
5069 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5070 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
5072 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5073 MONO_START_BB (cfg, end_bb);
5075 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5076 ins->type = STACK_I4;
5082 if (cmethod->name [0] != 'g')
5085 if (strcmp (cmethod->name, "get_Rank") == 0) {
5086 int dreg = alloc_ireg (cfg);
5087 int vtable_reg = alloc_preg (cfg);
5088 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5089 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
5090 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5091 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
5092 type_from_op (ins, NULL, NULL);
5095 } else if (strcmp (cmethod->name, "get_Length") == 0) {
5096 int dreg = alloc_ireg (cfg);
5098 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5099 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
5100 type_from_op (ins, NULL, NULL);
5105 } else if (cmethod->klass == runtime_helpers_class) {
5107 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
5108 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
5112 } else if (cmethod->klass == mono_defaults.thread_class) {
5113 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
5114 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5115 MONO_ADD_INS (cfg->cbb, ins);
5117 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
5118 return emit_memory_barrier (cfg, FullBarrier);
5120 } else if (cmethod->klass == mono_defaults.monitor_class) {
5122 /* FIXME this should be integrated to the check below once we support the trampoline version */
5123 #if defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
5124 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) {
5125 MonoMethod *fast_method = NULL;
5127 /* Avoid infinite recursion */
5128 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN && !strcmp (cfg->method->name, "FastMonitorEnterV4"))
5131 fast_method = mono_monitor_get_fast_path (cmethod);
5135 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
5139 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
5140 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
5143 if (COMPILE_LLVM (cfg)) {
5145 * Pass the argument normally, the LLVM backend will handle the
5146 * calling convention problems.
5148 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5150 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
5151 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5152 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5153 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5156 return (MonoInst*)call;
5157 } else if (strcmp (cmethod->name, "Exit") == 0) {
5160 if (COMPILE_LLVM (cfg)) {
5161 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5163 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
5164 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5165 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5166 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5169 return (MonoInst*)call;
5171 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
5173 MonoMethod *fast_method = NULL;
5175 /* Avoid infinite recursion */
5176 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
5177 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
5178 strcmp (cfg->method->name, "FastMonitorExit") == 0))
5181 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) ||
5182 strcmp (cmethod->name, "Exit") == 0)
5183 fast_method = mono_monitor_get_fast_path (cmethod);
5187 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
5190 } else if (cmethod->klass->image == mono_defaults.corlib &&
5191 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5192 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
5195 #if SIZEOF_REGISTER == 8
5196 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
5197 /* 64 bit reads are already atomic */
5198 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
5199 ins->dreg = mono_alloc_preg (cfg);
5200 ins->inst_basereg = args [0]->dreg;
5201 ins->inst_offset = 0;
5202 MONO_ADD_INS (cfg->cbb, ins);
5206 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
5207 if (strcmp (cmethod->name, "Increment") == 0) {
5208 MonoInst *ins_iconst;
5211 if (fsig->params [0]->type == MONO_TYPE_I4)
5212 opcode = OP_ATOMIC_ADD_NEW_I4;
5213 #if SIZEOF_REGISTER == 8
5214 else if (fsig->params [0]->type == MONO_TYPE_I8)
5215 opcode = OP_ATOMIC_ADD_NEW_I8;
5218 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5219 ins_iconst->inst_c0 = 1;
5220 ins_iconst->dreg = mono_alloc_ireg (cfg);
5221 MONO_ADD_INS (cfg->cbb, ins_iconst);
5223 MONO_INST_NEW (cfg, ins, opcode);
5224 ins->dreg = mono_alloc_ireg (cfg);
5225 ins->inst_basereg = args [0]->dreg;
5226 ins->inst_offset = 0;
5227 ins->sreg2 = ins_iconst->dreg;
5228 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5229 MONO_ADD_INS (cfg->cbb, ins);
5231 } else if (strcmp (cmethod->name, "Decrement") == 0) {
5232 MonoInst *ins_iconst;
5235 if (fsig->params [0]->type == MONO_TYPE_I4)
5236 opcode = OP_ATOMIC_ADD_NEW_I4;
5237 #if SIZEOF_REGISTER == 8
5238 else if (fsig->params [0]->type == MONO_TYPE_I8)
5239 opcode = OP_ATOMIC_ADD_NEW_I8;
5242 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5243 ins_iconst->inst_c0 = -1;
5244 ins_iconst->dreg = mono_alloc_ireg (cfg);
5245 MONO_ADD_INS (cfg->cbb, ins_iconst);
5247 MONO_INST_NEW (cfg, ins, opcode);
5248 ins->dreg = mono_alloc_ireg (cfg);
5249 ins->inst_basereg = args [0]->dreg;
5250 ins->inst_offset = 0;
5251 ins->sreg2 = ins_iconst->dreg;
5252 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5253 MONO_ADD_INS (cfg->cbb, ins);
5255 } else if (strcmp (cmethod->name, "Add") == 0) {
5258 if (fsig->params [0]->type == MONO_TYPE_I4)
5259 opcode = OP_ATOMIC_ADD_NEW_I4;
5260 #if SIZEOF_REGISTER == 8
5261 else if (fsig->params [0]->type == MONO_TYPE_I8)
5262 opcode = OP_ATOMIC_ADD_NEW_I8;
5266 MONO_INST_NEW (cfg, ins, opcode);
5267 ins->dreg = mono_alloc_ireg (cfg);
5268 ins->inst_basereg = args [0]->dreg;
5269 ins->inst_offset = 0;
5270 ins->sreg2 = args [1]->dreg;
5271 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5272 MONO_ADD_INS (cfg->cbb, ins);
5275 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
5277 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
5278 if (strcmp (cmethod->name, "Exchange") == 0) {
5280 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
5282 if (fsig->params [0]->type == MONO_TYPE_I4)
5283 opcode = OP_ATOMIC_EXCHANGE_I4;
5284 #if SIZEOF_REGISTER == 8
5285 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
5286 (fsig->params [0]->type == MONO_TYPE_I))
5287 opcode = OP_ATOMIC_EXCHANGE_I8;
5289 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
5290 opcode = OP_ATOMIC_EXCHANGE_I4;
5295 MONO_INST_NEW (cfg, ins, opcode);
5296 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
5297 ins->inst_basereg = args [0]->dreg;
5298 ins->inst_offset = 0;
5299 ins->sreg2 = args [1]->dreg;
5300 MONO_ADD_INS (cfg->cbb, ins);
5302 switch (fsig->params [0]->type) {
5304 ins->type = STACK_I4;
5308 ins->type = STACK_I8;
5310 case MONO_TYPE_OBJECT:
5311 ins->type = STACK_OBJ;
5314 g_assert_not_reached ();
5317 if (cfg->gen_write_barriers && is_ref)
5318 emit_write_barrier (cfg, args [0], args [1]);
5320 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
5322 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
5323 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
5325 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
5326 if (fsig->params [1]->type == MONO_TYPE_I4)
5328 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
5329 size = sizeof (gpointer);
5330 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
5333 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
5334 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5335 ins->sreg1 = args [0]->dreg;
5336 ins->sreg2 = args [1]->dreg;
5337 ins->sreg3 = args [2]->dreg;
5338 ins->type = STACK_I4;
5339 MONO_ADD_INS (cfg->cbb, ins);
5340 } else if (size == 8) {
5341 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
5342 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5343 ins->sreg1 = args [0]->dreg;
5344 ins->sreg2 = args [1]->dreg;
5345 ins->sreg3 = args [2]->dreg;
5346 ins->type = STACK_I8;
5347 MONO_ADD_INS (cfg->cbb, ins);
5349 /* g_assert_not_reached (); */
5351 if (cfg->gen_write_barriers && is_ref)
5352 emit_write_barrier (cfg, args [0], args [1]);
5354 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
5356 if (strcmp (cmethod->name, "MemoryBarrier") == 0)
5357 ins = emit_memory_barrier (cfg, FullBarrier);
5361 } else if (cmethod->klass->image == mono_defaults.corlib) {
5362 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
5363 && strcmp (cmethod->klass->name, "Debugger") == 0) {
5364 if (should_insert_brekpoint (cfg->method)) {
5365 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
5367 MONO_INST_NEW (cfg, ins, OP_NOP);
5368 MONO_ADD_INS (cfg->cbb, ins);
5372 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
5373 && strcmp (cmethod->klass->name, "Environment") == 0) {
5375 EMIT_NEW_ICONST (cfg, ins, 1);
5377 EMIT_NEW_ICONST (cfg, ins, 0);
5381 } else if (cmethod->klass == mono_defaults.math_class) {
5383 * There is general branches code for Min/Max, but it does not work for
5385 * http://everything2.com/?node_id=1051618
5389 #ifdef MONO_ARCH_SIMD_INTRINSICS
5390 if (cfg->opt & MONO_OPT_SIMD) {
5391 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5397 if (COMPILE_LLVM (cfg)) {
5398 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
5403 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
5407 * This entry point could be used later for arbitrary method
5410 inline static MonoInst*
5411 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
5412 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
5414 if (method->klass == mono_defaults.string_class) {
5415 /* managed string allocation support */
5416 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
5417 MonoInst *iargs [2];
5418 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
5419 MonoMethod *managed_alloc = NULL;
5421 g_assert (vtable); /*Should not fail since it System.String*/
5422 #ifndef MONO_CROSS_COMPILE
5423 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE);
5427 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
5428 iargs [1] = args [0];
5429 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
5436 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
5438 MonoInst *store, *temp;
5441 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
5442 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
5445 * FIXME: We should use *args++ = sp [0], but that would mean the arg
5446 * would be different than the MonoInst's used to represent arguments, and
5447 * the ldelema implementation can't deal with that.
5448 * Solution: When ldelema is used on an inline argument, create a var for
5449 * it, emit ldelema on that var, and emit the saving code below in
5450 * inline_method () if needed.
5452 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
5453 cfg->args [i] = temp;
5454 /* This uses cfg->args [i] which is set by the preceeding line */
5455 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
5456 store->cil_code = sp [0]->cil_code;
5461 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
5462 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
5464 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5466 check_inline_called_method_name_limit (MonoMethod *called_method)
5469 static char *limit = NULL;
5471 if (limit == NULL) {
5472 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
5474 if (limit_string != NULL)
5475 limit = limit_string;
5477 limit = (char *) "";
5480 if (limit [0] != '\0') {
5481 char *called_method_name = mono_method_full_name (called_method, TRUE);
5483 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
5484 g_free (called_method_name);
5486 //return (strncmp_result <= 0);
5487 return (strncmp_result == 0);
5494 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5496 check_inline_caller_method_name_limit (MonoMethod *caller_method)
5499 static char *limit = NULL;
5501 if (limit == NULL) {
5502 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
5503 if (limit_string != NULL) {
5504 limit = limit_string;
5506 limit = (char *) "";
5510 if (limit [0] != '\0') {
5511 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
5513 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
5514 g_free (caller_method_name);
5516 //return (strncmp_result <= 0);
5517 return (strncmp_result == 0);
5525 emit_init_rvar (MonoCompile *cfg, MonoInst *rvar, MonoType *rtype)
5527 static double r8_0 = 0.0;
5530 switch (rvar->type) {
5532 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
5535 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
5540 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
5543 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5544 ins->type = STACK_R8;
5545 ins->inst_p0 = (void*)&r8_0;
5546 ins->dreg = rvar->dreg;
5547 MONO_ADD_INS (cfg->cbb, ins);
5550 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (rtype));
5553 g_assert_not_reached ();
5558 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
5559 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_always)
5561 MonoInst *ins, *rvar = NULL;
5562 MonoMethodHeader *cheader;
5563 MonoBasicBlock *ebblock, *sbblock;
5565 MonoMethod *prev_inlined_method;
5566 MonoInst **prev_locals, **prev_args;
5567 MonoType **prev_arg_types;
5568 guint prev_real_offset;
5569 GHashTable *prev_cbb_hash;
5570 MonoBasicBlock **prev_cil_offset_to_bb;
5571 MonoBasicBlock *prev_cbb;
5572 unsigned char* prev_cil_start;
5573 guint32 prev_cil_offset_to_bb_len;
5574 MonoMethod *prev_current_method;
5575 MonoGenericContext *prev_generic_context;
5576 gboolean ret_var_set, prev_ret_var_set, virtual = FALSE;
5578 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
5580 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5581 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
5584 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5585 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
5589 if (cfg->verbose_level > 2)
5590 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
5592 if (!cmethod->inline_info) {
5593 cfg->stat_inlineable_methods++;
5594 cmethod->inline_info = 1;
5597 /* allocate local variables */
5598 cheader = mono_method_get_header (cmethod);
5600 if (cheader == NULL || mono_loader_get_last_error ()) {
5601 MonoLoaderError *error = mono_loader_get_last_error ();
5604 mono_metadata_free_mh (cheader);
5605 if (inline_always && error)
5606 mono_cfg_set_exception (cfg, error->exception_type);
5608 mono_loader_clear_error ();
5612 /*Must verify before creating locals as it can cause the JIT to assert.*/
5613 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
5614 mono_metadata_free_mh (cheader);
5618 /* allocate space to store the return value */
5619 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
5620 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
5623 prev_locals = cfg->locals;
5624 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
5625 for (i = 0; i < cheader->num_locals; ++i)
5626 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
5628 /* allocate start and end blocks */
5629 /* This is needed so if the inline is aborted, we can clean up */
5630 NEW_BBLOCK (cfg, sbblock);
5631 sbblock->real_offset = real_offset;
5633 NEW_BBLOCK (cfg, ebblock);
5634 ebblock->block_num = cfg->num_bblocks++;
5635 ebblock->real_offset = real_offset;
5637 prev_args = cfg->args;
5638 prev_arg_types = cfg->arg_types;
5639 prev_inlined_method = cfg->inlined_method;
5640 cfg->inlined_method = cmethod;
5641 cfg->ret_var_set = FALSE;
5642 cfg->inline_depth ++;
5643 prev_real_offset = cfg->real_offset;
5644 prev_cbb_hash = cfg->cbb_hash;
5645 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
5646 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
5647 prev_cil_start = cfg->cil_start;
5648 prev_cbb = cfg->cbb;
5649 prev_current_method = cfg->current_method;
5650 prev_generic_context = cfg->generic_context;
5651 prev_ret_var_set = cfg->ret_var_set;
5653 if (*ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
5656 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, virtual);
5658 ret_var_set = cfg->ret_var_set;
5660 cfg->inlined_method = prev_inlined_method;
5661 cfg->real_offset = prev_real_offset;
5662 cfg->cbb_hash = prev_cbb_hash;
5663 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
5664 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
5665 cfg->cil_start = prev_cil_start;
5666 cfg->locals = prev_locals;
5667 cfg->args = prev_args;
5668 cfg->arg_types = prev_arg_types;
5669 cfg->current_method = prev_current_method;
5670 cfg->generic_context = prev_generic_context;
5671 cfg->ret_var_set = prev_ret_var_set;
5672 cfg->inline_depth --;
5674 if ((costs >= 0 && costs < 60) || inline_always) {
5675 if (cfg->verbose_level > 2)
5676 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
5678 cfg->stat_inlined_methods++;
5680 /* always add some code to avoid block split failures */
5681 MONO_INST_NEW (cfg, ins, OP_NOP);
5682 MONO_ADD_INS (prev_cbb, ins);
5684 prev_cbb->next_bb = sbblock;
5685 link_bblock (cfg, prev_cbb, sbblock);
5688 * Get rid of the begin and end bblocks if possible to aid local
5691 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
5693 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
5694 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
5696 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
5697 MonoBasicBlock *prev = ebblock->in_bb [0];
5698 mono_merge_basic_blocks (cfg, prev, ebblock);
5700 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
5701 mono_merge_basic_blocks (cfg, prev_cbb, prev);
5702 cfg->cbb = prev_cbb;
5706 * Its possible that the rvar is set in some prev bblock, but not in others.
5712 for (i = 0; i < ebblock->in_count; ++i) {
5713 bb = ebblock->in_bb [i];
5715 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
5718 emit_init_rvar (cfg, rvar, fsig->ret);
5728 * If the inlined method contains only a throw, then the ret var is not
5729 * set, so set it to a dummy value.
5732 emit_init_rvar (cfg, rvar, fsig->ret);
5734 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
5737 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5740 if (cfg->verbose_level > 2)
5741 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
5742 cfg->exception_type = MONO_EXCEPTION_NONE;
5743 mono_loader_clear_error ();
5745 /* This gets rid of the newly added bblocks */
5746 cfg->cbb = prev_cbb;
5748 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5753 * Some of these comments may well be out-of-date.
5754 * Design decisions: we do a single pass over the IL code (and we do bblock
5755 * splitting/merging in the few cases when it's required: a back jump to an IL
5756 * address that was not already seen as bblock starting point).
5757 * Code is validated as we go (full verification is still better left to metadata/verify.c).
5758 * Complex operations are decomposed in simpler ones right away. We need to let the
5759 * arch-specific code peek and poke inside this process somehow (except when the
5760 * optimizations can take advantage of the full semantic info of coarse opcodes).
5761 * All the opcodes of the form opcode.s are 'normalized' to opcode.
5762 * MonoInst->opcode initially is the IL opcode or some simplification of that
5763 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
5764 * opcode with value bigger than OP_LAST.
5765 * At this point the IR can be handed over to an interpreter, a dumb code generator
5766 * or to the optimizing code generator that will translate it to SSA form.
5768 * Profiling directed optimizations.
5769 * We may compile by default with few or no optimizations and instrument the code
5770 * or the user may indicate what methods to optimize the most either in a config file
5771 * or through repeated runs where the compiler applies offline the optimizations to
5772 * each method and then decides if it was worth it.
5775 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
5776 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
5777 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
5778 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
5779 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
5780 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
5781 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
5782 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; LOAD_ERROR;}
5784 /* offset from br.s -> br like opcodes */
5785 #define BIG_BRANCH_OFFSET 13
5788 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
5790 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
5792 return b == NULL || b == bb;
5796 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
5798 unsigned char *ip = start;
5799 unsigned char *target;
5802 MonoBasicBlock *bblock;
5803 const MonoOpcode *opcode;
5806 cli_addr = ip - start;
5807 i = mono_opcode_value ((const guint8 **)&ip, end);
5810 opcode = &mono_opcodes [i];
5811 switch (opcode->argument) {
5812 case MonoInlineNone:
5815 case MonoInlineString:
5816 case MonoInlineType:
5817 case MonoInlineField:
5818 case MonoInlineMethod:
5821 case MonoShortInlineR:
5828 case MonoShortInlineVar:
5829 case MonoShortInlineI:
5832 case MonoShortInlineBrTarget:
5833 target = start + cli_addr + 2 + (signed char)ip [1];
5834 GET_BBLOCK (cfg, bblock, target);
5837 GET_BBLOCK (cfg, bblock, ip);
5839 case MonoInlineBrTarget:
5840 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
5841 GET_BBLOCK (cfg, bblock, target);
5844 GET_BBLOCK (cfg, bblock, ip);
5846 case MonoInlineSwitch: {
5847 guint32 n = read32 (ip + 1);
5850 cli_addr += 5 + 4 * n;
5851 target = start + cli_addr;
5852 GET_BBLOCK (cfg, bblock, target);
5854 for (j = 0; j < n; ++j) {
5855 target = start + cli_addr + (gint32)read32 (ip);
5856 GET_BBLOCK (cfg, bblock, target);
5866 g_assert_not_reached ();
5869 if (i == CEE_THROW) {
5870 unsigned char *bb_start = ip - 1;
5872 /* Find the start of the bblock containing the throw */
5874 while ((bb_start >= start) && !bblock) {
5875 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
5879 bblock->out_of_line = 1;
5889 static inline MonoMethod *
5890 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5894 if (m->wrapper_type != MONO_WRAPPER_NONE) {
5895 method = mono_method_get_wrapper_data (m, token);
5897 method = mono_class_inflate_generic_method (method, context);
5899 method = mono_get_method_full (m->klass->image, token, klass, context);
5905 static inline MonoMethod *
5906 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5908 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
5910 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
5916 static inline MonoClass*
5917 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
5921 if (method->wrapper_type != MONO_WRAPPER_NONE) {
5922 klass = mono_method_get_wrapper_data (method, token);
5924 klass = mono_class_inflate_generic_class (klass, context);
5926 klass = mono_class_get_full (method->klass->image, token, context);
5929 mono_class_init (klass);
5933 static inline MonoMethodSignature*
5934 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
5936 MonoMethodSignature *fsig;
5938 if (method->wrapper_type != MONO_WRAPPER_NONE) {
5941 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
5943 fsig = mono_inflate_generic_signature (fsig, context, &error);
5945 g_assert (mono_error_ok (&error));
5948 fsig = mono_metadata_parse_signature (method->klass->image, token);
5954 * Returns TRUE if the JIT should abort inlining because "callee"
5955 * is influenced by security attributes.
5958 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
5962 if ((cfg->method != caller) && mono_security_method_has_declsec (callee)) {
5966 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
5967 if (result == MONO_JIT_SECURITY_OK)
5970 if (result == MONO_JIT_LINKDEMAND_ECMA) {
5971 /* Generate code to throw a SecurityException before the actual call/link */
5972 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5975 NEW_ICONST (cfg, args [0], 4);
5976 NEW_METHODCONST (cfg, args [1], caller);
5977 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
5978 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
5979 /* don't hide previous results */
5980 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
5981 cfg->exception_data = result;
5989 throw_exception (void)
5991 static MonoMethod *method = NULL;
5994 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5995 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
6002 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
6004 MonoMethod *thrower = throw_exception ();
6007 EMIT_NEW_PCONST (cfg, args [0], ex);
6008 mono_emit_method_call (cfg, thrower, args, NULL);
6012 * Return the original method is a wrapper is specified. We can only access
6013 * the custom attributes from the original method.
6016 get_original_method (MonoMethod *method)
6018 if (method->wrapper_type == MONO_WRAPPER_NONE)
6021 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
6022 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
6025 /* in other cases we need to find the original method */
6026 return mono_marshal_method_from_wrapper (method);
6030 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
6031 MonoBasicBlock *bblock, unsigned char *ip)
6033 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6034 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
6036 emit_throw_exception (cfg, ex);
6040 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
6041 MonoBasicBlock *bblock, unsigned char *ip)
6043 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6044 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
6046 emit_throw_exception (cfg, ex);
6050 * Check that the IL instructions at ip are the array initialization
6051 * sequence and return the pointer to the data and the size.
6054 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
6057 * newarr[System.Int32]
6059 * ldtoken field valuetype ...
6060 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
6062 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
6063 guint32 token = read32 (ip + 7);
6064 guint32 field_token = read32 (ip + 2);
6065 guint32 field_index = field_token & 0xffffff;
6067 const char *data_ptr;
6069 MonoMethod *cmethod;
6070 MonoClass *dummy_class;
6071 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
6077 *out_field_token = field_token;
6079 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
6082 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
6084 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
6085 case MONO_TYPE_BOOLEAN:
6089 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
6090 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
6091 case MONO_TYPE_CHAR:
6101 return NULL; /* stupid ARM FP swapped format */
6111 if (size > mono_type_size (field->type, &dummy_align))
6114 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
6115 if (!method->klass->image->dynamic) {
6116 field_index = read32 (ip + 2) & 0xffffff;
6117 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
6118 data_ptr = mono_image_rva_map (method->klass->image, rva);
6119 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
6120 /* for aot code we do the lookup on load */
6121 if (aot && data_ptr)
6122 return GUINT_TO_POINTER (rva);
6124 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
6126 data_ptr = mono_field_get_data (field);
6134 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
6136 char *method_fname = mono_method_full_name (method, TRUE);
6138 MonoMethodHeader *header = mono_method_get_header (method);
6140 if (header->code_size == 0)
6141 method_code = g_strdup ("method body is empty.");
6143 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
6144 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
6145 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
6146 g_free (method_fname);
6147 g_free (method_code);
6148 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
6152 set_exception_object (MonoCompile *cfg, MonoException *exception)
6154 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
6155 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
6156 cfg->exception_ptr = exception;
6160 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
6163 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
6164 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
6165 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
6166 /* Optimize reg-reg moves away */
6168 * Can't optimize other opcodes, since sp[0] might point to
6169 * the last ins of a decomposed opcode.
6171 sp [0]->dreg = (cfg)->locals [n]->dreg;
6173 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
6178 * ldloca inhibits many optimizations so try to get rid of it in common
6181 static inline unsigned char *
6182 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
6191 local = read16 (ip + 2);
6195 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
6196 gboolean skip = FALSE;
6198 /* From the INITOBJ case */
6199 token = read32 (ip + 2);
6200 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
6201 CHECK_TYPELOAD (klass);
6202 if (mini_type_is_reference (cfg, &klass->byval_arg)) {
6203 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
6204 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
6205 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
6218 is_exception_class (MonoClass *class)
6221 if (class == mono_defaults.exception_class)
6223 class = class->parent;
6229 * is_jit_optimizer_disabled:
6231 * Determine whenever M's assembly has a DebuggableAttribute with the
6232 * IsJITOptimizerDisabled flag set.
6235 is_jit_optimizer_disabled (MonoMethod *m)
6237 MonoAssembly *ass = m->klass->image->assembly;
6238 MonoCustomAttrInfo* attrs;
6239 static MonoClass *klass;
6241 gboolean val = FALSE;
6244 if (ass->jit_optimizer_disabled_inited)
6245 return ass->jit_optimizer_disabled;
6248 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
6251 ass->jit_optimizer_disabled = FALSE;
6252 mono_memory_barrier ();
6253 ass->jit_optimizer_disabled_inited = TRUE;
6257 attrs = mono_custom_attrs_from_assembly (ass);
6259 for (i = 0; i < attrs->num_attrs; ++i) {
6260 MonoCustomAttrEntry *attr = &attrs->attrs [i];
6263 MonoMethodSignature *sig;
6265 if (!attr->ctor || attr->ctor->klass != klass)
6267 /* Decode the attribute. See reflection.c */
6268 len = attr->data_size;
6269 p = (const char*)attr->data;
6270 g_assert (read16 (p) == 0x0001);
6273 // FIXME: Support named parameters
6274 sig = mono_method_signature (attr->ctor);
6275 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
6277 /* Two boolean arguments */
6281 mono_custom_attrs_free (attrs);
6284 ass->jit_optimizer_disabled = val;
6285 mono_memory_barrier ();
6286 ass->jit_optimizer_disabled_inited = TRUE;
6292 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig)
6294 gboolean supported_tail_call;
6297 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6298 supported_tail_call = MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
6300 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6303 for (i = 0; i < fsig->param_count; ++i) {
6304 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
6305 /* These can point to the current method's stack */
6306 supported_tail_call = FALSE;
6308 if (fsig->hasthis && cmethod->klass->valuetype)
6309 /* this might point to the current method's stack */
6310 supported_tail_call = FALSE;
6311 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
6312 supported_tail_call = FALSE;
6313 if (cfg->method->save_lmf)
6314 supported_tail_call = FALSE;
6315 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
6316 supported_tail_call = FALSE;
6318 /* Debugging support */
6320 if (supported_tail_call) {
6321 if (!mono_debug_count ())
6322 supported_tail_call = FALSE;
6326 return supported_tail_call;
6329 /* the JIT intercepts ldflda instructions to the tlsdata field in ThreadLocal<T> and redirects
6330 * it to the thread local value based on the tls_offset field. Every other kind of access to
6331 * the field causes an assert.
6334 is_magic_tls_access (MonoClassField *field)
6336 if (strcmp (field->name, "tlsdata"))
6338 if (strcmp (field->parent->name, "ThreadLocal`1"))
6340 return field->parent->image == mono_defaults.corlib;
6343 /* emits the code needed to access a managed tls var (like ThreadStatic)
6344 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
6345 * pointer for the current thread.
6346 * Returns the MonoInst* representing the address of the tls var.
6349 emit_managed_static_data_access (MonoCompile *cfg, MonoInst *thread_ins, int offset_reg)
6352 int static_data_reg, array_reg, dreg;
6353 int offset2_reg, idx_reg;
6354 // inlined access to the tls data
6355 // idx = (offset >> 24) - 1;
6356 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
6357 static_data_reg = alloc_ireg (cfg);
6358 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
6359 idx_reg = alloc_ireg (cfg);
6360 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
6361 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
6362 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
6363 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
6364 array_reg = alloc_ireg (cfg);
6365 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
6366 offset2_reg = alloc_ireg (cfg);
6367 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
6368 dreg = alloc_ireg (cfg);
6369 EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
6374 * redirect access to the tlsdata field to the tls var given by the tls_offset field.
6375 * this address is cached per-method in cached_tls_addr.
6378 create_magic_tls_access (MonoCompile *cfg, MonoClassField *tls_field, MonoInst **cached_tls_addr, MonoInst *thread_local)
6380 MonoInst *load, *addr, *temp, *store, *thread_ins;
6381 MonoClassField *offset_field;
6383 if (*cached_tls_addr) {
6384 EMIT_NEW_TEMPLOAD (cfg, addr, (*cached_tls_addr)->inst_c0);
6387 thread_ins = mono_get_thread_intrinsic (cfg);
6388 offset_field = mono_class_get_field_from_name (tls_field->parent, "tls_offset");
6390 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, offset_field->type, thread_local->dreg, offset_field->offset);
6392 MONO_ADD_INS (cfg->cbb, thread_ins);
6394 MonoMethod *thread_method;
6395 thread_method = mono_class_get_method_from_name (mono_get_thread_class(), "CurrentInternalThread_internal", 0);
6396 thread_ins = mono_emit_method_call (cfg, thread_method, NULL, NULL);
6398 addr = emit_managed_static_data_access (cfg, thread_ins, load->dreg);
6399 addr->klass = mono_class_from_mono_type (tls_field->type);
6400 addr->type = STACK_MP;
6401 *cached_tls_addr = temp = mono_compile_create_var (cfg, type_from_stack_type (addr), OP_LOCAL);
6402 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, addr);
6404 EMIT_NEW_TEMPLOAD (cfg, addr, temp->inst_c0);
6409 * mono_method_to_ir:
6411 * Translate the .net IL into linear IR.
6414 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
6415 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
6416 guint inline_offset, gboolean is_virtual_call)
6419 MonoInst *ins, **sp, **stack_start;
6420 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
6421 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
6422 MonoMethod *cmethod, *method_definition;
6423 MonoInst **arg_array;
6424 MonoMethodHeader *header;
6426 guint32 token, ins_flag;
6428 MonoClass *constrained_call = NULL;
6429 unsigned char *ip, *end, *target, *err_pos;
6430 static double r8_0 = 0.0;
6431 MonoMethodSignature *sig;
6432 MonoGenericContext *generic_context = NULL;
6433 MonoGenericContainer *generic_container = NULL;
6434 MonoType **param_types;
6435 int i, n, start_new_bblock, dreg;
6436 int num_calls = 0, inline_costs = 0;
6437 int breakpoint_id = 0;
6439 MonoBoolean security, pinvoke;
6440 MonoSecurityManager* secman = NULL;
6441 MonoDeclSecurityActions actions;
6442 GSList *class_inits = NULL;
6443 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
6445 gboolean init_locals, seq_points, skip_dead_blocks;
6446 gboolean disable_inline, sym_seq_points = FALSE;
6447 MonoInst *cached_tls_addr = NULL;
6448 MonoDebugMethodInfo *minfo;
6449 MonoBitSet *seq_point_locs = NULL;
6450 MonoBitSet *seq_point_set_locs = NULL;
6452 disable_inline = is_jit_optimizer_disabled (method);
6454 /* serialization and xdomain stuff may need access to private fields and methods */
6455 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
6456 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
6457 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
6458 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
6459 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
6460 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
6462 dont_verify |= mono_security_smcs_hack_enabled ();
6464 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
6465 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
6466 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
6467 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
6468 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
6470 image = method->klass->image;
6471 header = mono_method_get_header (method);
6473 MonoLoaderError *error;
6475 if ((error = mono_loader_get_last_error ())) {
6476 mono_cfg_set_exception (cfg, error->exception_type);
6478 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
6479 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
6481 goto exception_exit;
6483 generic_container = mono_method_get_generic_container (method);
6484 sig = mono_method_signature (method);
6485 num_args = sig->hasthis + sig->param_count;
6486 ip = (unsigned char*)header->code;
6487 cfg->cil_start = ip;
6488 end = ip + header->code_size;
6489 cfg->stat_cil_code_size += header->code_size;
6490 init_locals = header->init_locals;
6492 seq_points = cfg->gen_seq_points && cfg->method == method;
6493 #ifdef PLATFORM_ANDROID
6494 seq_points &= cfg->method->wrapper_type == MONO_WRAPPER_NONE;
6497 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
6498 /* We could hit a seq point before attaching to the JIT (#8338) */
6502 if (cfg->gen_seq_points && cfg->method == method) {
6503 minfo = mono_debug_lookup_method (method);
6505 int i, n_il_offsets;
6509 mono_debug_symfile_get_line_numbers_full (minfo, NULL, NULL, &n_il_offsets, &il_offsets, &line_numbers, NULL, NULL);
6510 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6511 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6512 sym_seq_points = TRUE;
6513 for (i = 0; i < n_il_offsets; ++i) {
6514 if (il_offsets [i] < header->code_size)
6515 mono_bitset_set_fast (seq_point_locs, il_offsets [i]);
6521 * Methods without init_locals set could cause asserts in various passes
6526 method_definition = method;
6527 while (method_definition->is_inflated) {
6528 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
6529 method_definition = imethod->declaring;
6532 /* SkipVerification is not allowed if core-clr is enabled */
6533 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
6535 dont_verify_stloc = TRUE;
6538 if (mono_debug_using_mono_debugger ())
6539 cfg->keep_cil_nops = TRUE;
6541 if (sig->is_inflated)
6542 generic_context = mono_method_get_context (method);
6543 else if (generic_container)
6544 generic_context = &generic_container->context;
6545 cfg->generic_context = generic_context;
6547 if (!cfg->generic_sharing_context)
6548 g_assert (!sig->has_type_parameters);
6550 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
6551 g_assert (method->is_inflated);
6552 g_assert (mono_method_get_context (method)->method_inst);
6554 if (method->is_inflated && mono_method_get_context (method)->method_inst)
6555 g_assert (sig->generic_param_count);
6557 if (cfg->method == method) {
6558 cfg->real_offset = 0;
6560 cfg->real_offset = inline_offset;
6563 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
6564 cfg->cil_offset_to_bb_len = header->code_size;
6566 cfg->current_method = method;
6568 if (cfg->verbose_level > 2)
6569 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
6571 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
6573 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
6574 for (n = 0; n < sig->param_count; ++n)
6575 param_types [n + sig->hasthis] = sig->params [n];
6576 cfg->arg_types = param_types;
6578 dont_inline = g_list_prepend (dont_inline, method);
6579 if (cfg->method == method) {
6581 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
6582 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
6585 NEW_BBLOCK (cfg, start_bblock);
6586 cfg->bb_entry = start_bblock;
6587 start_bblock->cil_code = NULL;
6588 start_bblock->cil_length = 0;
6589 #if defined(__native_client_codegen__)
6590 MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
6591 ins->dreg = alloc_dreg (cfg, STACK_I4);
6592 MONO_ADD_INS (start_bblock, ins);
6596 NEW_BBLOCK (cfg, end_bblock);
6597 cfg->bb_exit = end_bblock;
6598 end_bblock->cil_code = NULL;
6599 end_bblock->cil_length = 0;
6600 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
6601 g_assert (cfg->num_bblocks == 2);
6603 arg_array = cfg->args;
6605 if (header->num_clauses) {
6606 cfg->spvars = g_hash_table_new (NULL, NULL);
6607 cfg->exvars = g_hash_table_new (NULL, NULL);
6609 /* handle exception clauses */
6610 for (i = 0; i < header->num_clauses; ++i) {
6611 MonoBasicBlock *try_bb;
6612 MonoExceptionClause *clause = &header->clauses [i];
6613 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
6614 try_bb->real_offset = clause->try_offset;
6615 try_bb->try_start = TRUE;
6616 try_bb->region = ((i + 1) << 8) | clause->flags;
6617 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
6618 tblock->real_offset = clause->handler_offset;
6619 tblock->flags |= BB_EXCEPTION_HANDLER;
6621 link_bblock (cfg, try_bb, tblock);
6623 if (*(ip + clause->handler_offset) == CEE_POP)
6624 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
6626 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
6627 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
6628 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
6629 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
6630 MONO_ADD_INS (tblock, ins);
6632 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) {
6633 /* finally clauses already have a seq point */
6634 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
6635 MONO_ADD_INS (tblock, ins);
6638 /* todo: is a fault block unsafe to optimize? */
6639 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
6640 tblock->flags |= BB_EXCEPTION_UNSAFE;
6644 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
6646 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
6648 /* catch and filter blocks get the exception object on the stack */
6649 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
6650 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
6651 MonoInst *dummy_use;
6653 /* mostly like handle_stack_args (), but just sets the input args */
6654 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
6655 tblock->in_scount = 1;
6656 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
6657 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
6660 * Add a dummy use for the exvar so its liveness info will be
6664 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
6666 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
6667 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
6668 tblock->flags |= BB_EXCEPTION_HANDLER;
6669 tblock->real_offset = clause->data.filter_offset;
6670 tblock->in_scount = 1;
6671 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
6672 /* The filter block shares the exvar with the handler block */
6673 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
6674 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
6675 MONO_ADD_INS (tblock, ins);
6679 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
6680 clause->data.catch_class &&
6681 cfg->generic_sharing_context &&
6682 mono_class_check_context_used (clause->data.catch_class)) {
6684 * In shared generic code with catch
6685 * clauses containing type variables
6686 * the exception handling code has to
6687 * be able to get to the rgctx.
6688 * Therefore we have to make sure that
6689 * the vtable/mrgctx argument (for
6690 * static or generic methods) or the
6691 * "this" argument (for non-static
6692 * methods) are live.
6694 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
6695 mini_method_get_context (method)->method_inst ||
6696 method->klass->valuetype) {
6697 mono_get_vtable_var (cfg);
6699 MonoInst *dummy_use;
6701 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
6706 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
6707 cfg->cbb = start_bblock;
6708 cfg->args = arg_array;
6709 mono_save_args (cfg, sig, inline_args);
6712 /* FIRST CODE BLOCK */
6713 NEW_BBLOCK (cfg, bblock);
6714 bblock->cil_code = ip;
6718 ADD_BBLOCK (cfg, bblock);
6720 if (cfg->method == method) {
6721 breakpoint_id = mono_debugger_method_has_breakpoint (method);
6722 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
6723 MONO_INST_NEW (cfg, ins, OP_BREAK);
6724 MONO_ADD_INS (bblock, ins);
6728 if (mono_security_cas_enabled ())
6729 secman = mono_security_manager_get_methods ();
6731 security = (secman && mono_security_method_has_declsec (method));
6732 /* at this point having security doesn't mean we have any code to generate */
6733 if (security && (cfg->method == method)) {
6734 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
6735 * And we do not want to enter the next section (with allocation) if we
6736 * have nothing to generate */
6737 security = mono_declsec_get_demands (method, &actions);
6740 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
6741 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
6743 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
6744 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6745 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
6747 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
6748 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
6752 mono_custom_attrs_free (custom);
6755 custom = mono_custom_attrs_from_class (wrapped->klass);
6756 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
6760 mono_custom_attrs_free (custom);
6763 /* not a P/Invoke after all */
6768 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
6769 /* we use a separate basic block for the initialization code */
6770 NEW_BBLOCK (cfg, init_localsbb);
6771 cfg->bb_init = init_localsbb;
6772 init_localsbb->real_offset = cfg->real_offset;
6773 start_bblock->next_bb = init_localsbb;
6774 init_localsbb->next_bb = bblock;
6775 link_bblock (cfg, start_bblock, init_localsbb);
6776 link_bblock (cfg, init_localsbb, bblock);
6778 cfg->cbb = init_localsbb;
6780 start_bblock->next_bb = bblock;
6781 link_bblock (cfg, start_bblock, bblock);
6784 if (cfg->gsharedvt && cfg->method == method) {
6785 MonoGSharedVtMethodInfo *info;
6786 MonoInst *var, *locals_var;
6789 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
6790 info->method = cfg->method;
6792 info->entries = g_ptr_array_new ();
6793 cfg->gsharedvt_info = info;
6795 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6796 /* prevent it from being register allocated */
6797 //var->flags |= MONO_INST_INDIRECT;
6798 cfg->gsharedvt_info_var = var;
6800 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
6801 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
6803 /* Allocate locals */
6804 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6805 /* prevent it from being register allocated */
6806 //locals_var->flags |= MONO_INST_INDIRECT;
6807 cfg->gsharedvt_locals_var = locals_var;
6809 dreg = alloc_ireg (cfg);
6810 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
6812 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
6813 ins->dreg = locals_var->dreg;
6815 MONO_ADD_INS (cfg->cbb, ins);
6816 cfg->gsharedvt_locals_var_ins = ins;
6818 cfg->flags |= MONO_CFG_HAS_ALLOCA;
6821 ins->flags |= MONO_INST_INIT;
6825 /* at this point we know, if security is TRUE, that some code needs to be generated */
6826 if (security && (cfg->method == method)) {
6829 cfg->stat_cas_demand_generation++;
6831 if (actions.demand.blob) {
6832 /* Add code for SecurityAction.Demand */
6833 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
6834 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
6835 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
6836 mono_emit_method_call (cfg, secman->demand, args, NULL);
6838 if (actions.noncasdemand.blob) {
6839 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
6840 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
6841 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
6842 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
6843 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
6844 mono_emit_method_call (cfg, secman->demand, args, NULL);
6846 if (actions.demandchoice.blob) {
6847 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
6848 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
6849 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
6850 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
6851 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
6855 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
6857 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
6860 if (mono_security_core_clr_enabled ()) {
6861 /* check if this is native code, e.g. an icall or a p/invoke */
6862 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
6863 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
6865 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
6866 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
6868 /* if this ia a native call then it can only be JITted from platform code */
6869 if ((icall || pinvk) && method->klass && method->klass->image) {
6870 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
6871 MonoException *ex = icall ? mono_get_exception_security () :
6872 mono_get_exception_method_access ();
6873 emit_throw_exception (cfg, ex);
6880 if (header->code_size == 0)
6883 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
6888 if (cfg->method == method)
6889 mono_debug_init_method (cfg, bblock, breakpoint_id);
6891 for (n = 0; n < header->num_locals; ++n) {
6892 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
6897 /* We force the vtable variable here for all shared methods
6898 for the possibility that they might show up in a stack
6899 trace where their exact instantiation is needed. */
6900 if (cfg->generic_sharing_context && method == cfg->method) {
6901 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
6902 mini_method_get_context (method)->method_inst ||
6903 method->klass->valuetype) {
6904 mono_get_vtable_var (cfg);
6906 /* FIXME: Is there a better way to do this?
6907 We need the variable live for the duration
6908 of the whole method. */
6909 cfg->args [0]->flags |= MONO_INST_INDIRECT;
6913 /* add a check for this != NULL to inlined methods */
6914 if (is_virtual_call) {
6917 NEW_ARGLOAD (cfg, arg_ins, 0);
6918 MONO_ADD_INS (cfg->cbb, arg_ins);
6919 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
6922 skip_dead_blocks = !dont_verify;
6923 if (skip_dead_blocks) {
6924 original_bb = bb = mono_basic_block_split (method, &error);
6925 if (!mono_error_ok (&error)) {
6926 mono_error_cleanup (&error);
6932 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
6933 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
6936 start_new_bblock = 0;
6939 if (cfg->method == method)
6940 cfg->real_offset = ip - header->code;
6942 cfg->real_offset = inline_offset;
6947 if (start_new_bblock) {
6948 bblock->cil_length = ip - bblock->cil_code;
6949 if (start_new_bblock == 2) {
6950 g_assert (ip == tblock->cil_code);
6952 GET_BBLOCK (cfg, tblock, ip);
6954 bblock->next_bb = tblock;
6957 start_new_bblock = 0;
6958 for (i = 0; i < bblock->in_scount; ++i) {
6959 if (cfg->verbose_level > 3)
6960 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
6961 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
6965 g_slist_free (class_inits);
6968 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
6969 link_bblock (cfg, bblock, tblock);
6970 if (sp != stack_start) {
6971 handle_stack_args (cfg, stack_start, sp - stack_start);
6973 CHECK_UNVERIFIABLE (cfg);
6975 bblock->next_bb = tblock;
6978 for (i = 0; i < bblock->in_scount; ++i) {
6979 if (cfg->verbose_level > 3)
6980 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
6981 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
6984 g_slist_free (class_inits);
6989 if (skip_dead_blocks) {
6990 int ip_offset = ip - header->code;
6992 if (ip_offset == bb->end)
6996 int op_size = mono_opcode_size (ip, end);
6997 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
6999 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
7001 if (ip_offset + op_size == bb->end) {
7002 MONO_INST_NEW (cfg, ins, OP_NOP);
7003 MONO_ADD_INS (bblock, ins);
7004 start_new_bblock = 1;
7012 * Sequence points are points where the debugger can place a breakpoint.
7013 * Currently, we generate these automatically at points where the IL
7016 if (seq_points && ((sp == stack_start) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
7018 * Make methods interruptable at the beginning, and at the targets of
7019 * backward branches.
7020 * Also, do this at the start of every bblock in methods with clauses too,
7021 * to be able to handle instructions with inprecise control flow like
7023 * Backward branches are handled at the end of method-to-ir ().
7025 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
7027 /* Avoid sequence points on empty IL like .volatile */
7028 // FIXME: Enable this
7029 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
7030 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
7031 MONO_ADD_INS (cfg->cbb, ins);
7034 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
7037 bblock->real_offset = cfg->real_offset;
7039 if ((cfg->method == method) && cfg->coverage_info) {
7040 guint32 cil_offset = ip - header->code;
7041 cfg->coverage_info->data [cil_offset].cil_code = ip;
7043 /* TODO: Use an increment here */
7044 #if defined(TARGET_X86)
7045 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
7046 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
7048 MONO_ADD_INS (cfg->cbb, ins);
7050 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
7051 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
7055 if (cfg->verbose_level > 3)
7056 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
7060 if (seq_points && !sym_seq_points && sp != stack_start) {
7062 * The C# compiler uses these nops to notify the JIT that it should
7063 * insert seq points.
7065 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
7066 MONO_ADD_INS (cfg->cbb, ins);
7068 if (cfg->keep_cil_nops)
7069 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
7071 MONO_INST_NEW (cfg, ins, OP_NOP);
7073 MONO_ADD_INS (bblock, ins);
7076 if (should_insert_brekpoint (cfg->method)) {
7077 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
7079 MONO_INST_NEW (cfg, ins, OP_NOP);
7082 MONO_ADD_INS (bblock, ins);
7088 CHECK_STACK_OVF (1);
7089 n = (*ip)-CEE_LDARG_0;
7091 EMIT_NEW_ARGLOAD (cfg, ins, n);
7099 CHECK_STACK_OVF (1);
7100 n = (*ip)-CEE_LDLOC_0;
7102 EMIT_NEW_LOCLOAD (cfg, ins, n);
7111 n = (*ip)-CEE_STLOC_0;
7114 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
7116 emit_stloc_ir (cfg, sp, header, n);
7123 CHECK_STACK_OVF (1);
7126 EMIT_NEW_ARGLOAD (cfg, ins, n);
7132 CHECK_STACK_OVF (1);
7135 NEW_ARGLOADA (cfg, ins, n);
7136 MONO_ADD_INS (cfg->cbb, ins);
7146 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
7148 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
7153 CHECK_STACK_OVF (1);
7156 EMIT_NEW_LOCLOAD (cfg, ins, n);
7160 case CEE_LDLOCA_S: {
7161 unsigned char *tmp_ip;
7163 CHECK_STACK_OVF (1);
7164 CHECK_LOCAL (ip [1]);
7166 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
7172 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
7181 CHECK_LOCAL (ip [1]);
7182 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
7184 emit_stloc_ir (cfg, sp, header, ip [1]);
7189 CHECK_STACK_OVF (1);
7190 EMIT_NEW_PCONST (cfg, ins, NULL);
7191 ins->type = STACK_OBJ;
7196 CHECK_STACK_OVF (1);
7197 EMIT_NEW_ICONST (cfg, ins, -1);
7210 CHECK_STACK_OVF (1);
7211 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
7217 CHECK_STACK_OVF (1);
7219 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
7225 CHECK_STACK_OVF (1);
7226 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
7232 CHECK_STACK_OVF (1);
7233 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7234 ins->type = STACK_I8;
7235 ins->dreg = alloc_dreg (cfg, STACK_I8);
7237 ins->inst_l = (gint64)read64 (ip);
7238 MONO_ADD_INS (bblock, ins);
7244 gboolean use_aotconst = FALSE;
7246 #ifdef TARGET_POWERPC
7247 /* FIXME: Clean this up */
7248 if (cfg->compile_aot)
7249 use_aotconst = TRUE;
7252 /* FIXME: we should really allocate this only late in the compilation process */
7253 f = mono_domain_alloc (cfg->domain, sizeof (float));
7255 CHECK_STACK_OVF (1);
7261 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
7263 dreg = alloc_freg (cfg);
7264 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
7265 ins->type = STACK_R8;
7267 MONO_INST_NEW (cfg, ins, OP_R4CONST);
7268 ins->type = STACK_R8;
7269 ins->dreg = alloc_dreg (cfg, STACK_R8);
7271 MONO_ADD_INS (bblock, ins);
7281 gboolean use_aotconst = FALSE;
7283 #ifdef TARGET_POWERPC
7284 /* FIXME: Clean this up */
7285 if (cfg->compile_aot)
7286 use_aotconst = TRUE;
7289 /* FIXME: we should really allocate this only late in the compilation process */
7290 d = mono_domain_alloc (cfg->domain, sizeof (double));
7292 CHECK_STACK_OVF (1);
7298 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
7300 dreg = alloc_freg (cfg);
7301 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
7302 ins->type = STACK_R8;
7304 MONO_INST_NEW (cfg, ins, OP_R8CONST);
7305 ins->type = STACK_R8;
7306 ins->dreg = alloc_dreg (cfg, STACK_R8);
7308 MONO_ADD_INS (bblock, ins);
7317 MonoInst *temp, *store;
7319 CHECK_STACK_OVF (1);
7323 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
7324 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
7326 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
7329 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
7342 if (sp [0]->type == STACK_R8)
7343 /* we need to pop the value from the x86 FP stack */
7344 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
7350 INLINE_FAILURE ("jmp");
7351 GSHAREDVT_FAILURE (*ip);
7354 if (stack_start != sp)
7356 token = read32 (ip + 1);
7357 /* FIXME: check the signature matches */
7358 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7360 if (!cmethod || mono_loader_get_last_error ())
7363 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
7364 GENERIC_SHARING_FAILURE (CEE_JMP);
7366 if (mono_security_cas_enabled ())
7367 CHECK_CFG_EXCEPTION;
7369 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
7371 MonoMethodSignature *fsig = mono_method_signature (cmethod);
7374 /* Handle tail calls similarly to calls */
7375 n = fsig->param_count + fsig->hasthis;
7377 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
7378 call->method = cmethod;
7379 call->tail_call = TRUE;
7380 call->signature = mono_method_signature (cmethod);
7381 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
7382 call->inst.inst_p0 = cmethod;
7383 for (i = 0; i < n; ++i)
7384 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
7386 mono_arch_emit_call (cfg, call);
7387 MONO_ADD_INS (bblock, (MonoInst*)call);
7390 for (i = 0; i < num_args; ++i)
7391 /* Prevent arguments from being optimized away */
7392 arg_array [i]->flags |= MONO_INST_VOLATILE;
7394 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
7395 ins = (MonoInst*)call;
7396 ins->inst_p0 = cmethod;
7397 MONO_ADD_INS (bblock, ins);
7401 start_new_bblock = 1;
7406 case CEE_CALLVIRT: {
7407 MonoInst *addr = NULL;
7408 MonoMethodSignature *fsig = NULL;
7410 int virtual = *ip == CEE_CALLVIRT;
7411 int calli = *ip == CEE_CALLI;
7412 gboolean pass_imt_from_rgctx = FALSE;
7413 MonoInst *imt_arg = NULL;
7414 MonoInst *keep_this_alive = NULL;
7415 gboolean pass_vtable = FALSE;
7416 gboolean pass_mrgctx = FALSE;
7417 MonoInst *vtable_arg = NULL;
7418 gboolean check_this = FALSE;
7419 gboolean supported_tail_call = FALSE;
7420 gboolean tail_call = FALSE;
7421 gboolean need_seq_point = FALSE;
7422 guint32 call_opcode = *ip;
7423 gboolean emit_widen = TRUE;
7424 gboolean push_res = TRUE;
7425 gboolean skip_ret = FALSE;
7426 gboolean delegate_invoke = FALSE;
7429 token = read32 (ip + 1);
7434 //GSHAREDVT_FAILURE (*ip);
7439 fsig = mini_get_signature (method, token, generic_context);
7440 n = fsig->param_count + fsig->hasthis;
7442 if (method->dynamic && fsig->pinvoke) {
7446 * This is a call through a function pointer using a pinvoke
7447 * signature. Have to create a wrapper and call that instead.
7448 * FIXME: This is very slow, need to create a wrapper at JIT time
7449 * instead based on the signature.
7451 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
7452 EMIT_NEW_PCONST (cfg, args [1], fsig);
7454 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
7457 MonoMethod *cil_method;
7459 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7460 cil_method = cmethod;
7462 if (constrained_call) {
7463 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7464 if (cfg->verbose_level > 2)
7465 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_call));
7466 if (!((constrained_call->byval_arg.type == MONO_TYPE_VAR ||
7467 constrained_call->byval_arg.type == MONO_TYPE_MVAR) &&
7468 cfg->generic_sharing_context)) {
7469 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_call, generic_context);
7472 if (cfg->verbose_level > 2)
7473 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_call));
7475 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
7477 * This is needed since get_method_constrained can't find
7478 * the method in klass representing a type var.
7479 * The type var is guaranteed to be a reference type in this
7482 if (!mini_is_gsharedvt_klass (cfg, constrained_call))
7483 g_assert (!cmethod->klass->valuetype);
7485 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
7490 if (!cmethod || mono_loader_get_last_error ())
7492 if (!dont_verify && !cfg->skip_visibility) {
7493 MonoMethod *target_method = cil_method;
7494 if (method->is_inflated) {
7495 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
7497 if (!mono_method_can_access_method (method_definition, target_method) &&
7498 !mono_method_can_access_method (method, cil_method))
7499 METHOD_ACCESS_FAILURE;
7502 if (mono_security_core_clr_enabled ())
7503 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
7505 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
7506 /* MS.NET seems to silently convert this to a callvirt */
7511 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
7512 * converts to a callvirt.
7514 * tests/bug-515884.il is an example of this behavior
7516 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
7517 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
7518 if (!virtual && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
7522 if (!cmethod->klass->inited)
7523 if (!mono_class_init (cmethod->klass))
7524 TYPE_LOAD_ERROR (cmethod->klass);
7526 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
7527 mini_class_is_system_array (cmethod->klass)) {
7528 array_rank = cmethod->klass->rank;
7529 fsig = mono_method_signature (cmethod);
7531 fsig = mono_method_signature (cmethod);
7536 if (fsig->pinvoke) {
7537 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
7538 check_for_pending_exc, FALSE);
7539 fsig = mono_method_signature (wrapper);
7540 } else if (constrained_call) {
7541 fsig = mono_method_signature (cmethod);
7543 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
7547 mono_save_token_info (cfg, image, token, cil_method);
7549 if (!MONO_TYPE_IS_VOID (fsig->ret) && !sym_seq_points) {
7551 * Need to emit an implicit seq point after every non-void call so single stepping through nested calls like
7552 * foo (bar (), baz ())
7553 * works correctly. MS does this also:
7554 * http://stackoverflow.com/questions/6937198/making-your-net-language-step-correctly-in-the-debugger
7555 * The problem with this approach is that the debugger will stop after all calls returning a value,
7556 * even for simple cases, like:
7559 /* Special case a few common successor opcodes */
7560 if (!(ip + 5 < end && ip [5] == CEE_POP))
7561 need_seq_point = TRUE;
7564 n = fsig->param_count + fsig->hasthis;
7566 /* Don't support calls made using type arguments for now */
7568 if (cfg->gsharedvt) {
7569 if (mini_is_gsharedvt_signature (cfg, fsig))
7570 GSHAREDVT_FAILURE (*ip);
7574 if (mono_security_cas_enabled ()) {
7575 if (check_linkdemand (cfg, method, cmethod))
7576 INLINE_FAILURE ("linkdemand");
7577 CHECK_CFG_EXCEPTION;
7580 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
7581 g_assert_not_reached ();
7584 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
7587 if (!cfg->generic_sharing_context && cmethod)
7588 g_assert (!mono_method_check_context_used (cmethod));
7592 //g_assert (!virtual || fsig->hasthis);
7596 if (constrained_call) {
7597 if (mini_is_gsharedvt_klass (cfg, constrained_call)) {
7599 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
7601 if ((cmethod->klass != mono_defaults.object_class) && constrained_call->valuetype && cmethod->klass->valuetype) {
7602 /* The 'Own method' case below */
7603 } else if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) &&
7604 (MONO_TYPE_IS_VOID (fsig->ret) || fsig->ret->type == MONO_TYPE_I4 || fsig->ret->type == MONO_TYPE_BOOLEAN || fsig->ret->type == MONO_TYPE_STRING) &&
7605 (fsig->param_count == 0 || (fsig->param_count == 1 && MONO_TYPE_IS_REFERENCE (fsig->params [0])))) {
7606 MonoInst *args [16];
7609 * This case handles calls to object:ToString()/Equals()/GetHashCode(), plus some simple interface calls enough to support
7610 * AsyncTaskMethodBuilder.
7614 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
7615 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_call), constrained_call, MONO_RGCTX_INFO_KLASS);
7617 if (fsig->param_count) {
7618 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
7619 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
7620 ins->dreg = alloc_preg (cfg);
7621 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
7622 MONO_ADD_INS (cfg->cbb, ins);
7625 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [3]->dreg, 0, sp [1]->dreg);
7627 EMIT_NEW_ICONST (cfg, args [3], 0);
7629 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
7632 if (fsig->ret->type == MONO_TYPE_I4 || fsig->ret->type == MONO_TYPE_BOOLEAN) {
7637 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
7638 MONO_ADD_INS (cfg->cbb, add);
7639 dreg = alloc_ireg (cfg);
7641 if (fsig->ret->type == MONO_TYPE_BOOLEAN)
7642 NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg, add->dreg, 0);
7644 NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg, add->dreg, 0);
7645 MONO_ADD_INS (cfg->cbb, ins);
7646 /* ins represents the call result */
7651 GSHAREDVT_FAILURE (*ip);
7655 * We have the `constrained.' prefix opcode.
7657 if (constrained_call->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
7659 * The type parameter is instantiated as a valuetype,
7660 * but that type doesn't override the method we're
7661 * calling, so we need to box `this'.
7663 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
7664 ins->klass = constrained_call;
7665 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
7666 CHECK_CFG_EXCEPTION;
7667 } else if (!constrained_call->valuetype) {
7668 int dreg = alloc_ireg_ref (cfg);
7671 * The type parameter is instantiated as a reference
7672 * type. We have a managed pointer on the stack, so
7673 * we need to dereference it here.
7675 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
7676 ins->type = STACK_OBJ;
7679 if (cmethod->klass->valuetype) {
7682 /* Interface method */
7685 mono_class_setup_vtable (constrained_call);
7686 CHECK_TYPELOAD (constrained_call);
7687 ioffset = mono_class_interface_offset (constrained_call, cmethod->klass);
7689 TYPE_LOAD_ERROR (constrained_call);
7690 slot = mono_method_get_vtable_slot (cmethod);
7692 TYPE_LOAD_ERROR (cmethod->klass);
7693 cmethod = constrained_call->vtable [ioffset + slot];
7695 if (cmethod->klass == mono_defaults.enum_class) {
7696 /* Enum implements some interfaces, so treat this as the first case */
7697 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
7698 ins->klass = constrained_call;
7699 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
7700 CHECK_CFG_EXCEPTION;
7705 constrained_call = NULL;
7708 if (!calli && check_call_signature (cfg, fsig, sp))
7711 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
7712 if (cmethod && (cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
7713 delegate_invoke = TRUE;
7716 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
7718 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7719 type_to_eval_stack_type ((cfg), fsig->ret, ins);
7727 * If the callee is a shared method, then its static cctor
7728 * might not get called after the call was patched.
7730 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
7731 emit_generic_class_init (cfg, cmethod->klass);
7732 CHECK_TYPELOAD (cmethod->klass);
7736 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
7738 if (cfg->generic_sharing_context && cmethod) {
7739 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
7741 context_used = mini_method_check_context_used (cfg, cmethod);
7743 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7744 /* Generic method interface
7745 calls are resolved via a
7746 helper function and don't
7748 if (!cmethod_context || !cmethod_context->method_inst)
7749 pass_imt_from_rgctx = TRUE;
7753 * If a shared method calls another
7754 * shared method then the caller must
7755 * have a generic sharing context
7756 * because the magic trampoline
7757 * requires it. FIXME: We shouldn't
7758 * have to force the vtable/mrgctx
7759 * variable here. Instead there
7760 * should be a flag in the cfg to
7761 * request a generic sharing context.
7764 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
7765 mono_get_vtable_var (cfg);
7770 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7772 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7774 CHECK_TYPELOAD (cmethod->klass);
7775 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7780 g_assert (!vtable_arg);
7782 if (!cfg->compile_aot) {
7784 * emit_get_rgctx_method () calls mono_class_vtable () so check
7785 * for type load errors before.
7787 mono_class_setup_vtable (cmethod->klass);
7788 CHECK_TYPELOAD (cmethod->klass);
7791 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7793 /* !marshalbyref is needed to properly handle generic methods + remoting */
7794 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
7795 MONO_METHOD_IS_FINAL (cmethod)) &&
7796 !mono_class_is_marshalbyref (cmethod->klass)) {
7803 if (pass_imt_from_rgctx) {
7804 g_assert (!pass_vtable);
7807 imt_arg = emit_get_rgctx_method (cfg, context_used,
7808 cmethod, MONO_RGCTX_INFO_METHOD);
7812 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
7814 /* Calling virtual generic methods */
7815 if (cmethod && virtual &&
7816 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
7817 !(MONO_METHOD_IS_FINAL (cmethod) &&
7818 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
7819 fsig->generic_param_count &&
7820 !(cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))) {
7821 MonoInst *this_temp, *this_arg_temp, *store;
7822 MonoInst *iargs [4];
7823 gboolean use_imt = FALSE;
7825 g_assert (fsig->is_inflated);
7827 /* Prevent inlining of methods that contain indirect calls */
7828 INLINE_FAILURE ("virtual generic call");
7830 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
7831 GSHAREDVT_FAILURE (*ip);
7833 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
7834 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt)
7839 g_assert (!imt_arg);
7841 g_assert (cmethod->is_inflated);
7842 imt_arg = emit_get_rgctx_method (cfg, context_used,
7843 cmethod, MONO_RGCTX_INFO_METHOD);
7844 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
7846 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
7847 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
7848 MONO_ADD_INS (bblock, store);
7850 /* FIXME: This should be a managed pointer */
7851 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7853 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
7854 iargs [1] = emit_get_rgctx_method (cfg, context_used,
7855 cmethod, MONO_RGCTX_INFO_METHOD);
7856 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
7857 addr = mono_emit_jit_icall (cfg,
7858 mono_helper_compile_generic_method, iargs);
7860 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
7862 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
7869 * Implement a workaround for the inherent races involved in locking:
7875 * If a thread abort happens between the call to Monitor.Enter () and the start of the
7876 * try block, the Exit () won't be executed, see:
7877 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
7878 * To work around this, we extend such try blocks to include the last x bytes
7879 * of the Monitor.Enter () call.
7881 if (cmethod && cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
7882 MonoBasicBlock *tbb;
7884 GET_BBLOCK (cfg, tbb, ip + 5);
7886 * Only extend try blocks with a finally, to avoid catching exceptions thrown
7887 * from Monitor.Enter like ArgumentNullException.
7889 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
7890 /* Mark this bblock as needing to be extended */
7891 tbb->extend_try_block = TRUE;
7895 /* Conversion to a JIT intrinsic */
7896 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
7898 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7899 type_to_eval_stack_type ((cfg), fsig->ret, ins);
7906 if (cmethod && (cfg->opt & MONO_OPT_INLINE) &&
7907 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
7908 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
7909 !g_list_find (dont_inline, cmethod)) {
7911 gboolean always = FALSE;
7913 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
7914 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
7915 /* Prevent inlining of methods that call wrappers */
7916 INLINE_FAILURE ("wrapper call");
7917 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
7921 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, always);
7923 cfg->real_offset += 5;
7926 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7927 /* *sp is already set by inline_method */
7932 inline_costs += costs;
7938 /* Tail recursion elimination */
7939 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
7940 gboolean has_vtargs = FALSE;
7943 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
7944 INLINE_FAILURE ("tail call");
7946 /* keep it simple */
7947 for (i = fsig->param_count - 1; i >= 0; i--) {
7948 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
7953 for (i = 0; i < n; ++i)
7954 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
7955 MONO_INST_NEW (cfg, ins, OP_BR);
7956 MONO_ADD_INS (bblock, ins);
7957 tblock = start_bblock->out_bb [0];
7958 link_bblock (cfg, bblock, tblock);
7959 ins->inst_target_bb = tblock;
7960 start_new_bblock = 1;
7962 /* skip the CEE_RET, too */
7963 if (ip_in_bb (cfg, bblock, ip + 5))
7970 inline_costs += 10 * num_calls++;
7973 * Making generic calls out of gsharedvt methods.
7975 if (cmethod && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
7976 MonoRgctxInfoType info_type;
7979 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
7980 //GSHAREDVT_FAILURE (*ip);
7981 // disable for possible remoting calls
7982 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
7983 GSHAREDVT_FAILURE (*ip);
7984 if (fsig->generic_param_count) {
7985 /* virtual generic call */
7986 g_assert (mono_use_imt);
7987 g_assert (!imt_arg);
7988 /* Same as the virtual generic case above */
7989 imt_arg = emit_get_rgctx_method (cfg, context_used,
7990 cmethod, MONO_RGCTX_INFO_METHOD);
7991 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
7996 if (cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)
7997 /* test_0_multi_dim_arrays () in gshared.cs */
7998 GSHAREDVT_FAILURE (*ip);
8000 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
8001 keep_this_alive = sp [0];
8003 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
8004 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
8006 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
8007 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
8009 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8011 } else if (calli && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
8013 * We pass the address to the gsharedvt trampoline in the rgctx reg
8015 MonoInst *callee = addr;
8017 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8019 GSHAREDVT_FAILURE (*ip);
8021 addr = emit_get_rgctx_sig (cfg, context_used,
8022 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8023 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8027 /* Generic sharing */
8028 /* FIXME: only do this for generic methods if
8029 they are not shared! */
8030 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
8031 (!mono_method_is_generic_sharable (cmethod, TRUE) ||
8032 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
8033 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
8034 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
8035 INLINE_FAILURE ("gshared");
8037 g_assert (cfg->generic_sharing_context && cmethod);
8041 * We are compiling a call to a
8042 * generic method from shared code,
8043 * which means that we have to look up
8044 * the method in the rgctx and do an
8048 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8050 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8051 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8055 /* Indirect calls */
8057 if (call_opcode == CEE_CALL)
8058 g_assert (context_used);
8059 else if (call_opcode == CEE_CALLI)
8060 g_assert (!vtable_arg);
8062 /* FIXME: what the hell is this??? */
8063 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
8064 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
8066 /* Prevent inlining of methods with indirect calls */
8067 INLINE_FAILURE ("indirect call");
8069 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8074 * Instead of emitting an indirect call, emit a direct call
8075 * with the contents of the aotconst as the patch info.
8077 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8078 info_type = addr->inst_c1;
8079 info_data = addr->inst_p0;
8081 info_type = addr->inst_right->inst_c1;
8082 info_data = addr->inst_right->inst_left;
8085 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8086 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8091 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8099 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
8100 MonoInst *val = sp [fsig->param_count];
8102 if (val->type == STACK_OBJ) {
8103 MonoInst *iargs [2];
8108 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
8111 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
8112 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
8113 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
8114 emit_write_barrier (cfg, addr, val);
8115 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
8116 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
8118 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
8119 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
8120 if (!cmethod->klass->element_class->valuetype && !readonly)
8121 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
8122 CHECK_TYPELOAD (cmethod->klass);
8125 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
8128 g_assert_not_reached ();
8135 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
8139 /* Tail prefix / tail call optimization */
8141 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
8142 /* FIXME: runtime generic context pointer for jumps? */
8143 /* FIXME: handle this for generic sharing eventually */
8144 if (cmethod && (ins_flag & MONO_INST_TAILCALL) &&
8145 !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig))
8146 supported_tail_call = TRUE;
8147 if (supported_tail_call) {
8148 if (call_opcode != CEE_CALL)
8149 supported_tail_call = FALSE;
8152 if (supported_tail_call) {
8155 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8156 INLINE_FAILURE ("tail call");
8158 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
8160 if (ARCH_USE_OP_TAIL_CALL) {
8161 /* Handle tail calls similarly to normal calls */
8164 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8165 call->tail_call = TRUE;
8166 call->method = cmethod;
8167 call->signature = mono_method_signature (cmethod);
8170 * We implement tail calls by storing the actual arguments into the
8171 * argument variables, then emitting a CEE_JMP.
8173 for (i = 0; i < n; ++i) {
8174 /* Prevent argument from being register allocated */
8175 arg_array [i]->flags |= MONO_INST_VOLATILE;
8176 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8178 ins = (MonoInst*)call;
8179 ins->inst_p0 = cmethod;
8180 ins->inst_p1 = arg_array [0];
8181 MONO_ADD_INS (bblock, ins);
8182 link_bblock (cfg, bblock, end_bblock);
8183 start_new_bblock = 1;
8185 // FIXME: Eliminate unreachable epilogs
8188 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
8189 * only reachable from this call.
8191 GET_BBLOCK (cfg, tblock, ip + 5);
8192 if (tblock == bblock || tblock->in_count == 0)
8201 * Synchronized wrappers.
8202 * Its hard to determine where to replace a method with its synchronized
8203 * wrapper without causing an infinite recursion. The current solution is
8204 * to add the synchronized wrapper in the trampolines, and to
8205 * change the called method to a dummy wrapper, and resolve that wrapper
8206 * to the real method in mono_jit_compile_method ().
8208 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8209 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
8210 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
8211 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
8215 INLINE_FAILURE ("call");
8216 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual ? sp [0] : NULL,
8217 imt_arg, vtable_arg);
8220 link_bblock (cfg, bblock, end_bblock);
8221 start_new_bblock = 1;
8223 // FIXME: Eliminate unreachable epilogs
8226 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
8227 * only reachable from this call.
8229 GET_BBLOCK (cfg, tblock, ip + 5);
8230 if (tblock == bblock || tblock->in_count == 0)
8237 /* End of call, INS should contain the result of the call, if any */
8239 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
8242 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8247 if (keep_this_alive) {
8248 MonoInst *dummy_use;
8250 /* See mono_emit_method_call_full () */
8251 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
8254 CHECK_CFG_EXCEPTION;
8258 g_assert (*ip == CEE_RET);
8262 constrained_call = NULL;
8264 emit_seq_point (cfg, method, ip, FALSE);
8268 if (cfg->method != method) {
8269 /* return from inlined method */
8271 * If in_count == 0, that means the ret is unreachable due to
8272 * being preceeded by a throw. In that case, inline_method () will
8273 * handle setting the return value
8274 * (test case: test_0_inline_throw ()).
8276 if (return_var && cfg->cbb->in_count) {
8277 MonoType *ret_type = mono_method_signature (method)->ret;
8283 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
8286 //g_assert (returnvar != -1);
8287 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
8288 cfg->ret_var_set = TRUE;
8292 MonoType *ret_type = mono_method_signature (method)->ret;
8294 if (seq_points && !sym_seq_points) {
8296 * Place a seq point here too even through the IL stack is not
8297 * empty, so a step over on
8300 * will work correctly.
8302 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
8303 MONO_ADD_INS (cfg->cbb, ins);
8306 g_assert (!return_var);
8310 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
8313 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
8316 if (!cfg->vret_addr) {
8319 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
8321 EMIT_NEW_RETLOADA (cfg, ret_addr);
8323 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
8324 ins->klass = mono_class_from_mono_type (ret_type);
8327 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
8328 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
8329 MonoInst *iargs [1];
8333 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
8334 mono_arch_emit_setret (cfg, method, conv);
8336 mono_arch_emit_setret (cfg, method, *sp);
8339 mono_arch_emit_setret (cfg, method, *sp);
8344 if (sp != stack_start)
8346 MONO_INST_NEW (cfg, ins, OP_BR);
8348 ins->inst_target_bb = end_bblock;
8349 MONO_ADD_INS (bblock, ins);
8350 link_bblock (cfg, bblock, end_bblock);
8351 start_new_bblock = 1;
8355 MONO_INST_NEW (cfg, ins, OP_BR);
8357 target = ip + 1 + (signed char)(*ip);
8359 GET_BBLOCK (cfg, tblock, target);
8360 link_bblock (cfg, bblock, tblock);
8361 ins->inst_target_bb = tblock;
8362 if (sp != stack_start) {
8363 handle_stack_args (cfg, stack_start, sp - stack_start);
8365 CHECK_UNVERIFIABLE (cfg);
8367 MONO_ADD_INS (bblock, ins);
8368 start_new_bblock = 1;
8369 inline_costs += BRANCH_COST;
8383 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
8385 target = ip + 1 + *(signed char*)ip;
8391 inline_costs += BRANCH_COST;
8395 MONO_INST_NEW (cfg, ins, OP_BR);
8398 target = ip + 4 + (gint32)read32(ip);
8400 GET_BBLOCK (cfg, tblock, target);
8401 link_bblock (cfg, bblock, tblock);
8402 ins->inst_target_bb = tblock;
8403 if (sp != stack_start) {
8404 handle_stack_args (cfg, stack_start, sp - stack_start);
8406 CHECK_UNVERIFIABLE (cfg);
8409 MONO_ADD_INS (bblock, ins);
8411 start_new_bblock = 1;
8412 inline_costs += BRANCH_COST;
8419 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
8420 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
8421 guint32 opsize = is_short ? 1 : 4;
8423 CHECK_OPSIZE (opsize);
8425 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
8428 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
8433 GET_BBLOCK (cfg, tblock, target);
8434 link_bblock (cfg, bblock, tblock);
8435 GET_BBLOCK (cfg, tblock, ip);
8436 link_bblock (cfg, bblock, tblock);
8438 if (sp != stack_start) {
8439 handle_stack_args (cfg, stack_start, sp - stack_start);
8440 CHECK_UNVERIFIABLE (cfg);
8443 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
8444 cmp->sreg1 = sp [0]->dreg;
8445 type_from_op (cmp, sp [0], NULL);
8448 #if SIZEOF_REGISTER == 4
8449 if (cmp->opcode == OP_LCOMPARE_IMM) {
8450 /* Convert it to OP_LCOMPARE */
8451 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8452 ins->type = STACK_I8;
8453 ins->dreg = alloc_dreg (cfg, STACK_I8);
8455 MONO_ADD_INS (bblock, ins);
8456 cmp->opcode = OP_LCOMPARE;
8457 cmp->sreg2 = ins->dreg;
8460 MONO_ADD_INS (bblock, cmp);
8462 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
8463 type_from_op (ins, sp [0], NULL);
8464 MONO_ADD_INS (bblock, ins);
8465 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
8466 GET_BBLOCK (cfg, tblock, target);
8467 ins->inst_true_bb = tblock;
8468 GET_BBLOCK (cfg, tblock, ip);
8469 ins->inst_false_bb = tblock;
8470 start_new_bblock = 2;
8473 inline_costs += BRANCH_COST;
8488 MONO_INST_NEW (cfg, ins, *ip);
8490 target = ip + 4 + (gint32)read32(ip);
8496 inline_costs += BRANCH_COST;
8500 MonoBasicBlock **targets;
8501 MonoBasicBlock *default_bblock;
8502 MonoJumpInfoBBTable *table;
8503 int offset_reg = alloc_preg (cfg);
8504 int target_reg = alloc_preg (cfg);
8505 int table_reg = alloc_preg (cfg);
8506 int sum_reg = alloc_preg (cfg);
8507 gboolean use_op_switch;
8511 n = read32 (ip + 1);
8514 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
8518 CHECK_OPSIZE (n * sizeof (guint32));
8519 target = ip + n * sizeof (guint32);
8521 GET_BBLOCK (cfg, default_bblock, target);
8522 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
8524 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
8525 for (i = 0; i < n; ++i) {
8526 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
8527 targets [i] = tblock;
8528 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
8532 if (sp != stack_start) {
8534 * Link the current bb with the targets as well, so handle_stack_args
8535 * will set their in_stack correctly.
8537 link_bblock (cfg, bblock, default_bblock);
8538 for (i = 0; i < n; ++i)
8539 link_bblock (cfg, bblock, targets [i]);
8541 handle_stack_args (cfg, stack_start, sp - stack_start);
8543 CHECK_UNVERIFIABLE (cfg);
8546 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
8547 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
8550 for (i = 0; i < n; ++i)
8551 link_bblock (cfg, bblock, targets [i]);
8553 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
8554 table->table = targets;
8555 table->table_size = n;
8557 use_op_switch = FALSE;
8559 /* ARM implements SWITCH statements differently */
8560 /* FIXME: Make it use the generic implementation */
8561 if (!cfg->compile_aot)
8562 use_op_switch = TRUE;
8565 if (COMPILE_LLVM (cfg))
8566 use_op_switch = TRUE;
8568 cfg->cbb->has_jump_table = 1;
8570 if (use_op_switch) {
8571 MONO_INST_NEW (cfg, ins, OP_SWITCH);
8572 ins->sreg1 = src1->dreg;
8573 ins->inst_p0 = table;
8574 ins->inst_many_bb = targets;
8575 ins->klass = GUINT_TO_POINTER (n);
8576 MONO_ADD_INS (cfg->cbb, ins);
8578 if (sizeof (gpointer) == 8)
8579 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
8581 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
8583 #if SIZEOF_REGISTER == 8
8584 /* The upper word might not be zero, and we add it to a 64 bit address later */
8585 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
8588 if (cfg->compile_aot) {
8589 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
8591 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
8592 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
8593 ins->inst_p0 = table;
8594 ins->dreg = table_reg;
8595 MONO_ADD_INS (cfg->cbb, ins);
8598 /* FIXME: Use load_memindex */
8599 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
8600 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
8601 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
8603 start_new_bblock = 1;
8604 inline_costs += (BRANCH_COST * 2);
8624 dreg = alloc_freg (cfg);
8627 dreg = alloc_lreg (cfg);
8630 dreg = alloc_ireg_ref (cfg);
8633 dreg = alloc_preg (cfg);
8636 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
8637 ins->type = ldind_type [*ip - CEE_LDIND_I1];
8638 ins->flags |= ins_flag;
8640 MONO_ADD_INS (bblock, ins);
8642 if (ins->flags & MONO_INST_VOLATILE) {
8643 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
8644 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
8645 emit_memory_barrier (cfg, FullBarrier);
8660 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
8661 ins->flags |= ins_flag;
8664 if (ins->flags & MONO_INST_VOLATILE) {
8665 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
8666 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
8667 emit_memory_barrier (cfg, FullBarrier);
8670 MONO_ADD_INS (bblock, ins);
8672 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
8673 emit_write_barrier (cfg, sp [0], sp [1]);
8682 MONO_INST_NEW (cfg, ins, (*ip));
8684 ins->sreg1 = sp [0]->dreg;
8685 ins->sreg2 = sp [1]->dreg;
8686 type_from_op (ins, sp [0], sp [1]);
8688 ins->dreg = alloc_dreg ((cfg), (ins)->type);
8690 /* Use the immediate opcodes if possible */
8691 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
8692 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
8693 if (imm_opcode != -1) {
8694 ins->opcode = imm_opcode;
8695 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
8698 sp [1]->opcode = OP_NOP;
8702 MONO_ADD_INS ((cfg)->cbb, (ins));
8704 *sp++ = mono_decompose_opcode (cfg, ins);
8721 MONO_INST_NEW (cfg, ins, (*ip));
8723 ins->sreg1 = sp [0]->dreg;
8724 ins->sreg2 = sp [1]->dreg;
8725 type_from_op (ins, sp [0], sp [1]);
8727 ADD_WIDEN_OP (ins, sp [0], sp [1]);
8728 ins->dreg = alloc_dreg ((cfg), (ins)->type);
8730 /* FIXME: Pass opcode to is_inst_imm */
8732 /* Use the immediate opcodes if possible */
8733 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
8736 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
8737 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
8738 /* Keep emulated opcodes which are optimized away later */
8739 if ((ins->opcode == OP_IREM_UN || ins->opcode == OP_IDIV_UN_IMM) && (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) && sp [1]->opcode == OP_ICONST && mono_is_power_of_two (sp [1]->inst_c0) >= 0) {
8740 imm_opcode = mono_op_to_op_imm (ins->opcode);
8743 if (imm_opcode != -1) {
8744 ins->opcode = imm_opcode;
8745 if (sp [1]->opcode == OP_I8CONST) {
8746 #if SIZEOF_REGISTER == 8
8747 ins->inst_imm = sp [1]->inst_l;
8749 ins->inst_ls_word = sp [1]->inst_ls_word;
8750 ins->inst_ms_word = sp [1]->inst_ms_word;
8754 ins->inst_imm = (gssize)(sp [1]->inst_c0);
8757 /* Might be followed by an instruction added by ADD_WIDEN_OP */
8758 if (sp [1]->next == NULL)
8759 sp [1]->opcode = OP_NOP;
8762 MONO_ADD_INS ((cfg)->cbb, (ins));
8764 *sp++ = mono_decompose_opcode (cfg, ins);
8777 case CEE_CONV_OVF_I8:
8778 case CEE_CONV_OVF_U8:
8782 /* Special case this earlier so we have long constants in the IR */
8783 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
8784 int data = sp [-1]->inst_c0;
8785 sp [-1]->opcode = OP_I8CONST;
8786 sp [-1]->type = STACK_I8;
8787 #if SIZEOF_REGISTER == 8
8788 if ((*ip) == CEE_CONV_U8)
8789 sp [-1]->inst_c0 = (guint32)data;
8791 sp [-1]->inst_c0 = data;
8793 sp [-1]->inst_ls_word = data;
8794 if ((*ip) == CEE_CONV_U8)
8795 sp [-1]->inst_ms_word = 0;
8797 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
8799 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
8806 case CEE_CONV_OVF_I4:
8807 case CEE_CONV_OVF_I1:
8808 case CEE_CONV_OVF_I2:
8809 case CEE_CONV_OVF_I:
8810 case CEE_CONV_OVF_U:
8813 if (sp [-1]->type == STACK_R8) {
8814 ADD_UNOP (CEE_CONV_OVF_I8);
8821 case CEE_CONV_OVF_U1:
8822 case CEE_CONV_OVF_U2:
8823 case CEE_CONV_OVF_U4:
8826 if (sp [-1]->type == STACK_R8) {
8827 ADD_UNOP (CEE_CONV_OVF_U8);
8834 case CEE_CONV_OVF_I1_UN:
8835 case CEE_CONV_OVF_I2_UN:
8836 case CEE_CONV_OVF_I4_UN:
8837 case CEE_CONV_OVF_I8_UN:
8838 case CEE_CONV_OVF_U1_UN:
8839 case CEE_CONV_OVF_U2_UN:
8840 case CEE_CONV_OVF_U4_UN:
8841 case CEE_CONV_OVF_U8_UN:
8842 case CEE_CONV_OVF_I_UN:
8843 case CEE_CONV_OVF_U_UN:
8850 CHECK_CFG_EXCEPTION;
8854 case CEE_ADD_OVF_UN:
8856 case CEE_MUL_OVF_UN:
8858 case CEE_SUB_OVF_UN:
8864 GSHAREDVT_FAILURE (*ip);
8867 token = read32 (ip + 1);
8868 klass = mini_get_class (method, token, generic_context);
8869 CHECK_TYPELOAD (klass);
8871 if (generic_class_is_reference_type (cfg, klass)) {
8872 MonoInst *store, *load;
8873 int dreg = alloc_ireg_ref (cfg);
8875 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
8876 load->flags |= ins_flag;
8877 MONO_ADD_INS (cfg->cbb, load);
8879 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
8880 store->flags |= ins_flag;
8881 MONO_ADD_INS (cfg->cbb, store);
8883 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
8884 emit_write_barrier (cfg, sp [0], sp [1]);
8886 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
8898 token = read32 (ip + 1);
8899 klass = mini_get_class (method, token, generic_context);
8900 CHECK_TYPELOAD (klass);
8902 /* Optimize the common ldobj+stloc combination */
8912 loc_index = ip [5] - CEE_STLOC_0;
8919 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
8920 CHECK_LOCAL (loc_index);
8922 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
8923 ins->dreg = cfg->locals [loc_index]->dreg;
8929 /* Optimize the ldobj+stobj combination */
8930 /* The reference case ends up being a load+store anyway */
8931 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
8936 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
8943 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
8952 CHECK_STACK_OVF (1);
8954 n = read32 (ip + 1);
8956 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
8957 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
8958 ins->type = STACK_OBJ;
8961 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
8962 MonoInst *iargs [1];
8964 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
8965 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
8967 if (cfg->opt & MONO_OPT_SHARED) {
8968 MonoInst *iargs [3];
8970 if (cfg->compile_aot) {
8971 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
8973 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8974 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
8975 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
8976 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
8977 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
8979 if (bblock->out_of_line) {
8980 MonoInst *iargs [2];
8982 if (image == mono_defaults.corlib) {
8984 * Avoid relocations in AOT and save some space by using a
8985 * version of helper_ldstr specialized to mscorlib.
8987 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
8988 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
8990 /* Avoid creating the string object */
8991 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8992 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
8993 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
8997 if (cfg->compile_aot) {
8998 NEW_LDSTRCONST (cfg, ins, image, n);
9000 MONO_ADD_INS (bblock, ins);
9003 NEW_PCONST (cfg, ins, NULL);
9004 ins->type = STACK_OBJ;
9005 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
9007 OUT_OF_MEMORY_FAILURE;
9010 MONO_ADD_INS (bblock, ins);
9019 MonoInst *iargs [2];
9020 MonoMethodSignature *fsig;
9023 MonoInst *vtable_arg = NULL;
9026 token = read32 (ip + 1);
9027 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
9028 if (!cmethod || mono_loader_get_last_error ())
9030 fsig = mono_method_get_signature (cmethod, image, token);
9034 mono_save_token_info (cfg, image, token, cmethod);
9036 if (!mono_class_init (cmethod->klass))
9037 TYPE_LOAD_ERROR (cmethod->klass);
9039 context_used = mini_method_check_context_used (cfg, cmethod);
9041 if (mono_security_cas_enabled ()) {
9042 if (check_linkdemand (cfg, method, cmethod))
9043 INLINE_FAILURE ("linkdemand");
9044 CHECK_CFG_EXCEPTION;
9045 } else if (mono_security_core_clr_enabled ()) {
9046 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9049 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9050 emit_generic_class_init (cfg, cmethod->klass);
9051 CHECK_TYPELOAD (cmethod->klass);
9055 if (cfg->gsharedvt) {
9056 if (mini_is_gsharedvt_variable_signature (sig))
9057 GSHAREDVT_FAILURE (*ip);
9061 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
9062 mono_method_is_generic_sharable (cmethod, TRUE)) {
9063 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
9064 mono_class_vtable (cfg->domain, cmethod->klass);
9065 CHECK_TYPELOAD (cmethod->klass);
9067 vtable_arg = emit_get_rgctx_method (cfg, context_used,
9068 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9071 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
9072 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9074 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9076 CHECK_TYPELOAD (cmethod->klass);
9077 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9082 n = fsig->param_count;
9086 * Generate smaller code for the common newobj <exception> instruction in
9087 * argument checking code.
9089 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
9090 is_exception_class (cmethod->klass) && n <= 2 &&
9091 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
9092 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
9093 MonoInst *iargs [3];
9095 g_assert (!vtable_arg);
9099 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
9102 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
9106 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
9111 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
9114 g_assert_not_reached ();
9122 /* move the args to allow room for 'this' in the first position */
9128 /* check_call_signature () requires sp[0] to be set */
9129 this_ins.type = STACK_OBJ;
9131 if (check_call_signature (cfg, fsig, sp))
9136 if (mini_class_is_system_array (cmethod->klass)) {
9137 g_assert (!vtable_arg);
9139 *sp = emit_get_rgctx_method (cfg, context_used,
9140 cmethod, MONO_RGCTX_INFO_METHOD);
9142 /* Avoid varargs in the common case */
9143 if (fsig->param_count == 1)
9144 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
9145 else if (fsig->param_count == 2)
9146 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
9147 else if (fsig->param_count == 3)
9148 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
9149 else if (fsig->param_count == 4)
9150 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
9152 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
9153 } else if (cmethod->string_ctor) {
9154 g_assert (!context_used);
9155 g_assert (!vtable_arg);
9156 /* we simply pass a null pointer */
9157 EMIT_NEW_PCONST (cfg, *sp, NULL);
9158 /* now call the string ctor */
9159 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
9161 MonoInst* callvirt_this_arg = NULL;
9163 if (cmethod->klass->valuetype) {
9164 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
9165 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
9166 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
9171 * The code generated by mini_emit_virtual_call () expects
9172 * iargs [0] to be a boxed instance, but luckily the vcall
9173 * will be transformed into a normal call there.
9175 } else if (context_used) {
9176 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
9179 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9181 CHECK_TYPELOAD (cmethod->klass);
9184 * TypeInitializationExceptions thrown from the mono_runtime_class_init
9185 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
9186 * As a workaround, we call class cctors before allocating objects.
9188 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
9189 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
9190 if (cfg->verbose_level > 2)
9191 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
9192 class_inits = g_slist_prepend (class_inits, vtable);
9195 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
9198 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
9201 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
9203 /* Now call the actual ctor */
9204 /* Avoid virtual calls to ctors if possible */
9205 if (mono_class_is_marshalbyref (cmethod->klass))
9206 callvirt_this_arg = sp [0];
9209 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
9210 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9211 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9216 CHECK_CFG_EXCEPTION;
9217 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
9218 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
9219 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
9220 !g_list_find (dont_inline, cmethod)) {
9223 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
9224 cfg->real_offset += 5;
9227 inline_costs += costs - 5;
9229 INLINE_FAILURE ("inline failure");
9230 // FIXME-VT: Clean this up
9231 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
9232 GSHAREDVT_FAILURE(*ip);
9233 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
9235 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
9238 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
9239 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
9240 } else if (context_used &&
9241 (!mono_method_is_generic_sharable (cmethod, TRUE) ||
9242 !mono_class_generic_sharing_enabled (cmethod->klass))) {
9243 MonoInst *cmethod_addr;
9245 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
9246 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9248 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
9250 INLINE_FAILURE ("ctor call");
9251 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
9252 callvirt_this_arg, NULL, vtable_arg);
9256 if (alloc == NULL) {
9258 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
9259 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
9273 token = read32 (ip + 1);
9274 klass = mini_get_class (method, token, generic_context);
9275 CHECK_TYPELOAD (klass);
9276 if (sp [0]->type != STACK_OBJ)
9279 context_used = mini_class_check_context_used (cfg, klass);
9281 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9282 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
9289 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9292 if (cfg->compile_aot)
9293 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9295 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9297 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
9298 save_cast_details (cfg, klass, sp [0]->dreg);
9299 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
9300 reset_cast_details (cfg);
9303 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9304 MonoMethod *mono_castclass;
9305 MonoInst *iargs [1];
9308 mono_castclass = mono_marshal_get_castclass (klass);
9311 save_cast_details (cfg, klass, sp [0]->dreg);
9312 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
9313 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9314 reset_cast_details (cfg);
9315 CHECK_CFG_EXCEPTION;
9316 g_assert (costs > 0);
9319 cfg->real_offset += 5;
9324 inline_costs += costs;
9327 ins = handle_castclass (cfg, klass, *sp, context_used);
9328 CHECK_CFG_EXCEPTION;
9338 token = read32 (ip + 1);
9339 klass = mini_get_class (method, token, generic_context);
9340 CHECK_TYPELOAD (klass);
9341 if (sp [0]->type != STACK_OBJ)
9344 context_used = mini_class_check_context_used (cfg, klass);
9346 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9347 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
9354 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9357 if (cfg->compile_aot)
9358 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9360 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9362 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
9365 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9366 MonoMethod *mono_isinst;
9367 MonoInst *iargs [1];
9370 mono_isinst = mono_marshal_get_isinst (klass);
9373 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
9374 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9375 CHECK_CFG_EXCEPTION;
9376 g_assert (costs > 0);
9379 cfg->real_offset += 5;
9384 inline_costs += costs;
9387 ins = handle_isinst (cfg, klass, *sp, context_used);
9388 CHECK_CFG_EXCEPTION;
9395 case CEE_UNBOX_ANY: {
9399 token = read32 (ip + 1);
9400 klass = mini_get_class (method, token, generic_context);
9401 CHECK_TYPELOAD (klass);
9403 mono_save_token_info (cfg, image, token, klass);
9405 context_used = mini_class_check_context_used (cfg, klass);
9407 if (mini_is_gsharedvt_klass (cfg, klass)) {
9408 *sp = handle_unbox_gsharedvt (cfg, context_used, klass, *sp, &bblock);
9416 if (generic_class_is_reference_type (cfg, klass)) {
9417 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
9418 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9419 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
9426 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9429 /*FIXME AOT support*/
9430 if (cfg->compile_aot)
9431 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9433 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9435 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
9436 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
9439 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9440 MonoMethod *mono_castclass;
9441 MonoInst *iargs [1];
9444 mono_castclass = mono_marshal_get_castclass (klass);
9447 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
9448 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9449 CHECK_CFG_EXCEPTION;
9450 g_assert (costs > 0);
9453 cfg->real_offset += 5;
9457 inline_costs += costs;
9459 ins = handle_castclass (cfg, klass, *sp, context_used);
9460 CHECK_CFG_EXCEPTION;
9468 if (mono_class_is_nullable (klass)) {
9469 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
9476 ins = handle_unbox (cfg, klass, sp, context_used);
9482 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9495 token = read32 (ip + 1);
9496 klass = mini_get_class (method, token, generic_context);
9497 CHECK_TYPELOAD (klass);
9499 mono_save_token_info (cfg, image, token, klass);
9501 context_used = mini_class_check_context_used (cfg, klass);
9503 if (generic_class_is_reference_type (cfg, klass)) {
9509 if (klass == mono_defaults.void_class)
9511 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
9513 /* frequent check in generic code: box (struct), brtrue */
9515 // FIXME: LLVM can't handle the inconsistent bb linking
9516 if (!mono_class_is_nullable (klass) &&
9517 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
9518 (ip [5] == CEE_BRTRUE ||
9519 ip [5] == CEE_BRTRUE_S ||
9520 ip [5] == CEE_BRFALSE ||
9521 ip [5] == CEE_BRFALSE_S)) {
9522 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
9524 MonoBasicBlock *true_bb, *false_bb;
9528 if (cfg->verbose_level > 3) {
9529 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9530 printf ("<box+brtrue opt>\n");
9538 target = ip + 1 + (signed char)(*ip);
9545 target = ip + 4 + (gint)(read32 (ip));
9549 g_assert_not_reached ();
9553 * We need to link both bblocks, since it is needed for handling stack
9554 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
9555 * Branching to only one of them would lead to inconsistencies, so
9556 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
9558 GET_BBLOCK (cfg, true_bb, target);
9559 GET_BBLOCK (cfg, false_bb, ip);
9561 mono_link_bblock (cfg, cfg->cbb, true_bb);
9562 mono_link_bblock (cfg, cfg->cbb, false_bb);
9564 if (sp != stack_start) {
9565 handle_stack_args (cfg, stack_start, sp - stack_start);
9567 CHECK_UNVERIFIABLE (cfg);
9570 if (COMPILE_LLVM (cfg)) {
9571 dreg = alloc_ireg (cfg);
9572 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
9573 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
9575 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
9577 /* The JIT can't eliminate the iconst+compare */
9578 MONO_INST_NEW (cfg, ins, OP_BR);
9579 ins->inst_target_bb = is_true ? true_bb : false_bb;
9580 MONO_ADD_INS (cfg->cbb, ins);
9583 start_new_bblock = 1;
9587 *sp++ = handle_box (cfg, val, klass, context_used, &bblock);
9589 CHECK_CFG_EXCEPTION;
9598 token = read32 (ip + 1);
9599 klass = mini_get_class (method, token, generic_context);
9600 CHECK_TYPELOAD (klass);
9602 mono_save_token_info (cfg, image, token, klass);
9604 context_used = mini_class_check_context_used (cfg, klass);
9606 if (mono_class_is_nullable (klass)) {
9609 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
9610 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
9614 ins = handle_unbox (cfg, klass, sp, context_used);
9627 MonoClassField *field;
9628 #ifndef DISABLE_REMOTING
9632 gboolean is_instance;
9634 gpointer addr = NULL;
9635 gboolean is_special_static;
9637 MonoInst *store_val = NULL;
9640 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
9642 if (op == CEE_STFLD) {
9650 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
9652 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
9655 if (op == CEE_STSFLD) {
9663 token = read32 (ip + 1);
9664 if (method->wrapper_type != MONO_WRAPPER_NONE) {
9665 field = mono_method_get_wrapper_data (method, token);
9666 klass = field->parent;
9669 field = mono_field_from_token (image, token, &klass, generic_context);
9673 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
9674 FIELD_ACCESS_FAILURE;
9675 mono_class_init (klass);
9677 if (is_instance && *ip != CEE_LDFLDA && is_magic_tls_access (field))
9680 /* if the class is Critical then transparent code cannot access it's fields */
9681 if (!is_instance && mono_security_core_clr_enabled ())
9682 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
9684 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
9685 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
9686 if (mono_security_core_clr_enabled ())
9687 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
9691 * LDFLD etc. is usable on static fields as well, so convert those cases to
9694 if (is_instance && field->type->attrs & FIELD_ATTRIBUTE_STATIC) {
9706 g_assert_not_reached ();
9708 is_instance = FALSE;
9711 context_used = mini_class_check_context_used (cfg, klass);
9715 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
9716 if (op == CEE_STFLD) {
9717 if (target_type_is_incompatible (cfg, field->type, sp [1]))
9719 #ifndef DISABLE_REMOTING
9720 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
9721 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
9722 MonoInst *iargs [5];
9724 GSHAREDVT_FAILURE (op);
9727 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9728 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
9729 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
9733 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
9734 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
9735 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9736 CHECK_CFG_EXCEPTION;
9737 g_assert (costs > 0);
9739 cfg->real_offset += 5;
9742 inline_costs += costs;
9744 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
9751 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
9753 if (mini_is_gsharedvt_klass (cfg, klass)) {
9754 MonoInst *offset_ins;
9756 context_used = mini_class_check_context_used (cfg, klass);
9758 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9759 dreg = alloc_ireg_mp (cfg);
9760 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
9761 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
9762 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
9764 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
9766 if (sp [0]->opcode != OP_LDADDR)
9767 store->flags |= MONO_INST_FAULT;
9769 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
9770 /* insert call to write barrier */
9774 dreg = alloc_ireg_mp (cfg);
9775 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
9776 emit_write_barrier (cfg, ptr, sp [1]);
9779 store->flags |= ins_flag;
9786 #ifndef DISABLE_REMOTING
9787 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
9788 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
9789 MonoInst *iargs [4];
9791 GSHAREDVT_FAILURE (op);
9794 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9795 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
9796 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
9797 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
9798 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
9799 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9800 CHECK_CFG_EXCEPTION;
9802 g_assert (costs > 0);
9804 cfg->real_offset += 5;
9808 inline_costs += costs;
9810 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
9816 if (sp [0]->type == STACK_VTYPE) {
9819 /* Have to compute the address of the variable */
9821 var = get_vreg_to_inst (cfg, sp [0]->dreg);
9823 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
9825 g_assert (var->klass == klass);
9827 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
9831 if (op == CEE_LDFLDA) {
9832 if (is_magic_tls_access (field)) {
9833 GSHAREDVT_FAILURE (*ip);
9835 *sp++ = create_magic_tls_access (cfg, field, &cached_tls_addr, ins);
9837 if (sp [0]->type == STACK_OBJ) {
9838 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
9839 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
9842 dreg = alloc_ireg_mp (cfg);
9844 if (mini_is_gsharedvt_klass (cfg, klass)) {
9845 MonoInst *offset_ins;
9847 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9848 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
9850 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
9852 ins->klass = mono_class_from_mono_type (field->type);
9853 ins->type = STACK_MP;
9859 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
9861 if (mini_is_gsharedvt_klass (cfg, klass)) {
9862 MonoInst *offset_ins;
9864 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9865 dreg = alloc_ireg_mp (cfg);
9866 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
9867 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
9869 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
9871 load->flags |= ins_flag;
9872 if (sp [0]->opcode != OP_LDADDR)
9873 load->flags |= MONO_INST_FAULT;
9887 * We can only support shared generic static
9888 * field access on architectures where the
9889 * trampoline code has been extended to handle
9890 * the generic class init.
9892 #ifndef MONO_ARCH_VTABLE_REG
9893 GENERIC_SHARING_FAILURE (op);
9896 context_used = mini_class_check_context_used (cfg, klass);
9898 ftype = mono_field_get_type (field);
9900 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
9903 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
9904 * to be called here.
9906 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
9907 mono_class_vtable (cfg->domain, klass);
9908 CHECK_TYPELOAD (klass);
9910 mono_domain_lock (cfg->domain);
9911 if (cfg->domain->special_static_fields)
9912 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
9913 mono_domain_unlock (cfg->domain);
9915 is_special_static = mono_class_field_is_special_static (field);
9917 /* Generate IR to compute the field address */
9918 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
9920 * Fast access to TLS data
9921 * Inline version of get_thread_static_data () in
9925 int idx, static_data_reg, array_reg, dreg;
9926 MonoInst *thread_ins;
9928 GSHAREDVT_FAILURE (op);
9930 // offset &= 0x7fffffff;
9931 // idx = (offset >> 24) - 1;
9932 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
9934 thread_ins = mono_get_thread_intrinsic (cfg);
9935 MONO_ADD_INS (cfg->cbb, thread_ins);
9936 static_data_reg = alloc_ireg (cfg);
9937 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
9939 if (cfg->compile_aot) {
9940 int offset_reg, offset2_reg, idx_reg;
9942 /* For TLS variables, this will return the TLS offset */
9943 EMIT_NEW_SFLDACONST (cfg, ins, field);
9944 offset_reg = ins->dreg;
9945 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
9946 idx_reg = alloc_ireg (cfg);
9947 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
9948 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
9949 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
9950 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
9951 array_reg = alloc_ireg (cfg);
9952 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
9953 offset2_reg = alloc_ireg (cfg);
9954 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
9955 dreg = alloc_ireg (cfg);
9956 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
9958 offset = (gsize)addr & 0x7fffffff;
9959 idx = (offset >> 24) - 1;
9961 array_reg = alloc_ireg (cfg);
9962 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
9963 dreg = alloc_ireg (cfg);
9964 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
9966 } else if ((cfg->opt & MONO_OPT_SHARED) ||
9967 (cfg->compile_aot && is_special_static) ||
9968 (context_used && is_special_static)) {
9969 MonoInst *iargs [2];
9971 g_assert (field->parent);
9972 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9974 iargs [1] = emit_get_rgctx_field (cfg, context_used,
9975 field, MONO_RGCTX_INFO_CLASS_FIELD);
9977 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
9979 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
9980 } else if (context_used) {
9981 MonoInst *static_data;
9984 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
9985 method->klass->name_space, method->klass->name, method->name,
9986 depth, field->offset);
9989 if (mono_class_needs_cctor_run (klass, method))
9990 emit_generic_class_init (cfg, klass);
9993 * The pointer we're computing here is
9995 * super_info.static_data + field->offset
9997 static_data = emit_get_rgctx_klass (cfg, context_used,
9998 klass, MONO_RGCTX_INFO_STATIC_DATA);
10000 if (mini_is_gsharedvt_klass (cfg, klass)) {
10001 MonoInst *offset_ins;
10003 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10004 dreg = alloc_ireg_mp (cfg);
10005 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
10006 } else if (field->offset == 0) {
10009 int addr_reg = mono_alloc_preg (cfg);
10010 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
10012 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
10013 MonoInst *iargs [2];
10015 g_assert (field->parent);
10016 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10017 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10018 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10020 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
10022 CHECK_TYPELOAD (klass);
10024 if (mini_field_access_needs_cctor_run (cfg, method, vtable)) {
10025 if (!(g_slist_find (class_inits, vtable))) {
10026 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
10027 if (cfg->verbose_level > 2)
10028 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
10029 class_inits = g_slist_prepend (class_inits, vtable);
10032 if (cfg->run_cctors) {
10034 /* This makes so that inline cannot trigger */
10035 /* .cctors: too many apps depend on them */
10036 /* running with a specific order... */
10037 if (! vtable->initialized)
10038 INLINE_FAILURE ("class init");
10039 ex = mono_runtime_class_init_full (vtable, FALSE);
10041 set_exception_object (cfg, ex);
10042 goto exception_exit;
10046 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10048 if (cfg->compile_aot)
10049 EMIT_NEW_SFLDACONST (cfg, ins, field);
10051 EMIT_NEW_PCONST (cfg, ins, addr);
10053 MonoInst *iargs [1];
10054 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
10055 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
10059 /* Generate IR to do the actual load/store operation */
10061 if (op == CEE_LDSFLDA) {
10062 ins->klass = mono_class_from_mono_type (ftype);
10063 ins->type = STACK_PTR;
10065 } else if (op == CEE_STSFLD) {
10068 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
10069 store->flags |= ins_flag;
10071 gboolean is_const = FALSE;
10072 MonoVTable *vtable = NULL;
10073 gpointer addr = NULL;
10075 if (!context_used) {
10076 vtable = mono_class_vtable (cfg->domain, klass);
10077 CHECK_TYPELOAD (klass);
10079 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
10080 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
10081 int ro_type = ftype->type;
10083 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10084 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
10085 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
10088 GSHAREDVT_FAILURE (op);
10090 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
10093 case MONO_TYPE_BOOLEAN:
10095 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
10099 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
10102 case MONO_TYPE_CHAR:
10104 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
10108 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
10113 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
10117 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
10122 case MONO_TYPE_PTR:
10123 case MONO_TYPE_FNPTR:
10124 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10125 type_to_eval_stack_type ((cfg), field->type, *sp);
10128 case MONO_TYPE_STRING:
10129 case MONO_TYPE_OBJECT:
10130 case MONO_TYPE_CLASS:
10131 case MONO_TYPE_SZARRAY:
10132 case MONO_TYPE_ARRAY:
10133 if (!mono_gc_is_moving ()) {
10134 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10135 type_to_eval_stack_type ((cfg), field->type, *sp);
10143 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
10148 case MONO_TYPE_VALUETYPE:
10158 CHECK_STACK_OVF (1);
10160 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
10161 load->flags |= ins_flag;
10174 token = read32 (ip + 1);
10175 klass = mini_get_class (method, token, generic_context);
10176 CHECK_TYPELOAD (klass);
10177 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
10178 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
10179 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
10180 generic_class_is_reference_type (cfg, klass)) {
10181 /* insert call to write barrier */
10182 emit_write_barrier (cfg, sp [0], sp [1]);
10194 const char *data_ptr;
10196 guint32 field_token;
10202 token = read32 (ip + 1);
10204 klass = mini_get_class (method, token, generic_context);
10205 CHECK_TYPELOAD (klass);
10207 context_used = mini_class_check_context_used (cfg, klass);
10209 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
10210 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
10211 ins->sreg1 = sp [0]->dreg;
10212 ins->type = STACK_I4;
10213 ins->dreg = alloc_ireg (cfg);
10214 MONO_ADD_INS (cfg->cbb, ins);
10215 *sp = mono_decompose_opcode (cfg, ins);
10218 if (context_used) {
10219 MonoInst *args [3];
10220 MonoClass *array_class = mono_array_class_get (klass, 1);
10221 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
10223 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
10226 args [0] = emit_get_rgctx_klass (cfg, context_used,
10227 array_class, MONO_RGCTX_INFO_VTABLE);
10232 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
10234 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
10236 if (cfg->opt & MONO_OPT_SHARED) {
10237 /* Decompose now to avoid problems with references to the domainvar */
10238 MonoInst *iargs [3];
10240 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10241 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10242 iargs [2] = sp [0];
10244 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
10246 /* Decompose later since it is needed by abcrem */
10247 MonoClass *array_type = mono_array_class_get (klass, 1);
10248 mono_class_vtable (cfg->domain, array_type);
10249 CHECK_TYPELOAD (array_type);
10251 MONO_INST_NEW (cfg, ins, OP_NEWARR);
10252 ins->dreg = alloc_ireg_ref (cfg);
10253 ins->sreg1 = sp [0]->dreg;
10254 ins->inst_newa_class = klass;
10255 ins->type = STACK_OBJ;
10256 ins->klass = array_type;
10257 MONO_ADD_INS (cfg->cbb, ins);
10258 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10259 cfg->cbb->has_array_access = TRUE;
10261 /* Needed so mono_emit_load_get_addr () gets called */
10262 mono_get_got_var (cfg);
10272 * we inline/optimize the initialization sequence if possible.
10273 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
10274 * for small sizes open code the memcpy
10275 * ensure the rva field is big enough
10277 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
10278 MonoMethod *memcpy_method = get_memcpy_method ();
10279 MonoInst *iargs [3];
10280 int add_reg = alloc_ireg_mp (cfg);
10282 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
10283 if (cfg->compile_aot) {
10284 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
10286 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
10288 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
10289 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
10298 if (sp [0]->type != STACK_OBJ)
10301 MONO_INST_NEW (cfg, ins, OP_LDLEN);
10302 ins->dreg = alloc_preg (cfg);
10303 ins->sreg1 = sp [0]->dreg;
10304 ins->type = STACK_I4;
10305 /* This flag will be inherited by the decomposition */
10306 ins->flags |= MONO_INST_FAULT;
10307 MONO_ADD_INS (cfg->cbb, ins);
10308 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10309 cfg->cbb->has_array_access = TRUE;
10317 if (sp [0]->type != STACK_OBJ)
10320 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10322 klass = mini_get_class (method, read32 (ip + 1), generic_context);
10323 CHECK_TYPELOAD (klass);
10324 /* we need to make sure that this array is exactly the type it needs
10325 * to be for correctness. the wrappers are lax with their usage
10326 * so we need to ignore them here
10328 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
10329 MonoClass *array_class = mono_array_class_get (klass, 1);
10330 mini_emit_check_array_type (cfg, sp [0], array_class);
10331 CHECK_TYPELOAD (array_class);
10335 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10340 case CEE_LDELEM_I1:
10341 case CEE_LDELEM_U1:
10342 case CEE_LDELEM_I2:
10343 case CEE_LDELEM_U2:
10344 case CEE_LDELEM_I4:
10345 case CEE_LDELEM_U4:
10346 case CEE_LDELEM_I8:
10348 case CEE_LDELEM_R4:
10349 case CEE_LDELEM_R8:
10350 case CEE_LDELEM_REF: {
10356 if (*ip == CEE_LDELEM) {
10358 token = read32 (ip + 1);
10359 klass = mini_get_class (method, token, generic_context);
10360 CHECK_TYPELOAD (klass);
10361 mono_class_init (klass);
10364 klass = array_access_to_klass (*ip);
10366 if (sp [0]->type != STACK_OBJ)
10369 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10371 if (mini_is_gsharedvt_klass (cfg, klass)) {
10372 // FIXME-VT: OP_ICONST optimization
10373 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10374 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10375 ins->opcode = OP_LOADV_MEMBASE;
10376 } else if (sp [1]->opcode == OP_ICONST) {
10377 int array_reg = sp [0]->dreg;
10378 int index_reg = sp [1]->dreg;
10379 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
10381 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
10382 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
10384 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10385 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10388 if (*ip == CEE_LDELEM)
10395 case CEE_STELEM_I1:
10396 case CEE_STELEM_I2:
10397 case CEE_STELEM_I4:
10398 case CEE_STELEM_I8:
10399 case CEE_STELEM_R4:
10400 case CEE_STELEM_R8:
10401 case CEE_STELEM_REF:
10406 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10408 if (*ip == CEE_STELEM) {
10410 token = read32 (ip + 1);
10411 klass = mini_get_class (method, token, generic_context);
10412 CHECK_TYPELOAD (klass);
10413 mono_class_init (klass);
10416 klass = array_access_to_klass (*ip);
10418 if (sp [0]->type != STACK_OBJ)
10421 emit_array_store (cfg, klass, sp, TRUE);
10423 if (*ip == CEE_STELEM)
10430 case CEE_CKFINITE: {
10434 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
10435 ins->sreg1 = sp [0]->dreg;
10436 ins->dreg = alloc_freg (cfg);
10437 ins->type = STACK_R8;
10438 MONO_ADD_INS (bblock, ins);
10440 *sp++ = mono_decompose_opcode (cfg, ins);
10445 case CEE_REFANYVAL: {
10446 MonoInst *src_var, *src;
10448 int klass_reg = alloc_preg (cfg);
10449 int dreg = alloc_preg (cfg);
10451 GSHAREDVT_FAILURE (*ip);
10454 MONO_INST_NEW (cfg, ins, *ip);
10457 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
10458 CHECK_TYPELOAD (klass);
10459 mono_class_init (klass);
10461 context_used = mini_class_check_context_used (cfg, klass);
10464 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10466 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
10467 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
10468 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
10470 if (context_used) {
10471 MonoInst *klass_ins;
10473 klass_ins = emit_get_rgctx_klass (cfg, context_used,
10474 klass, MONO_RGCTX_INFO_KLASS);
10477 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
10478 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
10480 mini_emit_class_check (cfg, klass_reg, klass);
10482 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
10483 ins->type = STACK_MP;
10488 case CEE_MKREFANY: {
10489 MonoInst *loc, *addr;
10491 GSHAREDVT_FAILURE (*ip);
10494 MONO_INST_NEW (cfg, ins, *ip);
10497 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
10498 CHECK_TYPELOAD (klass);
10499 mono_class_init (klass);
10501 context_used = mini_class_check_context_used (cfg, klass);
10503 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
10504 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
10506 if (context_used) {
10507 MonoInst *const_ins;
10508 int type_reg = alloc_preg (cfg);
10510 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
10511 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
10512 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
10513 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
10514 } else if (cfg->compile_aot) {
10515 int const_reg = alloc_preg (cfg);
10516 int type_reg = alloc_preg (cfg);
10518 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
10519 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
10520 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
10521 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
10523 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
10524 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
10526 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
10528 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
10529 ins->type = STACK_VTYPE;
10530 ins->klass = mono_defaults.typed_reference_class;
10535 case CEE_LDTOKEN: {
10537 MonoClass *handle_class;
10539 CHECK_STACK_OVF (1);
10542 n = read32 (ip + 1);
10544 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
10545 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
10546 handle = mono_method_get_wrapper_data (method, n);
10547 handle_class = mono_method_get_wrapper_data (method, n + 1);
10548 if (handle_class == mono_defaults.typehandle_class)
10549 handle = &((MonoClass*)handle)->byval_arg;
10552 handle = mono_ldtoken (image, n, &handle_class, generic_context);
10556 mono_class_init (handle_class);
10557 if (cfg->generic_sharing_context) {
10558 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
10559 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
10560 /* This case handles ldtoken
10561 of an open type, like for
10564 } else if (handle_class == mono_defaults.typehandle_class) {
10565 /* If we get a MONO_TYPE_CLASS
10566 then we need to provide the
10568 instantiation of it. */
10569 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
10572 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type (handle));
10573 } else if (handle_class == mono_defaults.fieldhandle_class)
10574 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
10575 else if (handle_class == mono_defaults.methodhandle_class)
10576 context_used = mini_method_check_context_used (cfg, handle);
10578 g_assert_not_reached ();
10581 if ((cfg->opt & MONO_OPT_SHARED) &&
10582 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
10583 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
10584 MonoInst *addr, *vtvar, *iargs [3];
10585 int method_context_used;
10587 method_context_used = mini_method_check_context_used (cfg, method);
10589 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
10591 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10592 EMIT_NEW_ICONST (cfg, iargs [1], n);
10593 if (method_context_used) {
10594 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
10595 method, MONO_RGCTX_INFO_METHOD);
10596 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
10598 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
10599 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
10601 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
10603 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
10605 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
10607 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
10608 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
10609 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
10610 (cmethod->klass == mono_defaults.monotype_class->parent) &&
10611 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
10612 MonoClass *tclass = mono_class_from_mono_type (handle);
10614 mono_class_init (tclass);
10615 if (context_used) {
10616 ins = emit_get_rgctx_klass (cfg, context_used,
10617 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
10618 } else if (cfg->compile_aot) {
10619 if (method->wrapper_type) {
10620 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
10621 /* Special case for static synchronized wrappers */
10622 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
10624 /* FIXME: n is not a normal token */
10625 cfg->disable_aot = TRUE;
10626 EMIT_NEW_PCONST (cfg, ins, NULL);
10629 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
10632 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
10634 ins->type = STACK_OBJ;
10635 ins->klass = cmethod->klass;
10638 MonoInst *addr, *vtvar;
10640 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
10642 if (context_used) {
10643 if (handle_class == mono_defaults.typehandle_class) {
10644 ins = emit_get_rgctx_klass (cfg, context_used,
10645 mono_class_from_mono_type (handle),
10646 MONO_RGCTX_INFO_TYPE);
10647 } else if (handle_class == mono_defaults.methodhandle_class) {
10648 ins = emit_get_rgctx_method (cfg, context_used,
10649 handle, MONO_RGCTX_INFO_METHOD);
10650 } else if (handle_class == mono_defaults.fieldhandle_class) {
10651 ins = emit_get_rgctx_field (cfg, context_used,
10652 handle, MONO_RGCTX_INFO_CLASS_FIELD);
10654 g_assert_not_reached ();
10656 } else if (cfg->compile_aot) {
10657 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
10659 EMIT_NEW_PCONST (cfg, ins, handle);
10661 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
10662 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
10663 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
10673 MONO_INST_NEW (cfg, ins, OP_THROW);
10675 ins->sreg1 = sp [0]->dreg;
10677 bblock->out_of_line = TRUE;
10678 MONO_ADD_INS (bblock, ins);
10679 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
10680 MONO_ADD_INS (bblock, ins);
10683 link_bblock (cfg, bblock, end_bblock);
10684 start_new_bblock = 1;
10686 case CEE_ENDFINALLY:
10687 /* mono_save_seq_point_info () depends on this */
10688 if (sp != stack_start)
10689 emit_seq_point (cfg, method, ip, FALSE);
10690 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
10691 MONO_ADD_INS (bblock, ins);
10693 start_new_bblock = 1;
10696 * Control will leave the method so empty the stack, otherwise
10697 * the next basic block will start with a nonempty stack.
10699 while (sp != stack_start) {
10704 case CEE_LEAVE_S: {
10707 if (*ip == CEE_LEAVE) {
10709 target = ip + 5 + (gint32)read32(ip + 1);
10712 target = ip + 2 + (signed char)(ip [1]);
10715 /* empty the stack */
10716 while (sp != stack_start) {
10721 * If this leave statement is in a catch block, check for a
10722 * pending exception, and rethrow it if necessary.
10723 * We avoid doing this in runtime invoke wrappers, since those are called
10724 * by native code which excepts the wrapper to catch all exceptions.
10726 for (i = 0; i < header->num_clauses; ++i) {
10727 MonoExceptionClause *clause = &header->clauses [i];
10730 * Use <= in the final comparison to handle clauses with multiple
10731 * leave statements, like in bug #78024.
10732 * The ordering of the exception clauses guarantees that we find the
10733 * innermost clause.
10735 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
10737 MonoBasicBlock *dont_throw;
10742 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
10745 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
10747 NEW_BBLOCK (cfg, dont_throw);
10750 * Currently, we always rethrow the abort exception, despite the
10751 * fact that this is not correct. See thread6.cs for an example.
10752 * But propagating the abort exception is more important than
10753 * getting the sematics right.
10755 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
10756 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
10757 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
10759 MONO_START_BB (cfg, dont_throw);
10764 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
10766 MonoExceptionClause *clause;
10768 for (tmp = handlers; tmp; tmp = tmp->next) {
10769 clause = tmp->data;
10770 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
10772 link_bblock (cfg, bblock, tblock);
10773 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
10774 ins->inst_target_bb = tblock;
10775 ins->inst_eh_block = clause;
10776 MONO_ADD_INS (bblock, ins);
10777 bblock->has_call_handler = 1;
10778 if (COMPILE_LLVM (cfg)) {
10779 MonoBasicBlock *target_bb;
10782 * Link the finally bblock with the target, since it will
10783 * conceptually branch there.
10784 * FIXME: Have to link the bblock containing the endfinally.
10786 GET_BBLOCK (cfg, target_bb, target);
10787 link_bblock (cfg, tblock, target_bb);
10790 g_list_free (handlers);
10793 MONO_INST_NEW (cfg, ins, OP_BR);
10794 MONO_ADD_INS (bblock, ins);
10795 GET_BBLOCK (cfg, tblock, target);
10796 link_bblock (cfg, bblock, tblock);
10797 ins->inst_target_bb = tblock;
10798 start_new_bblock = 1;
10800 if (*ip == CEE_LEAVE)
10809 * Mono specific opcodes
10811 case MONO_CUSTOM_PREFIX: {
10813 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10817 case CEE_MONO_ICALL: {
10819 MonoJitICallInfo *info;
10821 token = read32 (ip + 2);
10822 func = mono_method_get_wrapper_data (method, token);
10823 info = mono_find_jit_icall_by_addr (func);
10825 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
10828 CHECK_STACK (info->sig->param_count);
10829 sp -= info->sig->param_count;
10831 ins = mono_emit_jit_icall (cfg, info->func, sp);
10832 if (!MONO_TYPE_IS_VOID (info->sig->ret))
10836 inline_costs += 10 * num_calls++;
10840 case CEE_MONO_LDPTR: {
10843 CHECK_STACK_OVF (1);
10845 token = read32 (ip + 2);
10847 ptr = mono_method_get_wrapper_data (method, token);
10848 /* FIXME: Generalize this */
10849 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
10850 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
10855 EMIT_NEW_PCONST (cfg, ins, ptr);
10858 inline_costs += 10 * num_calls++;
10859 /* Can't embed random pointers into AOT code */
10860 cfg->disable_aot = 1;
10863 case CEE_MONO_JIT_ICALL_ADDR: {
10864 MonoJitICallInfo *callinfo;
10867 CHECK_STACK_OVF (1);
10869 token = read32 (ip + 2);
10871 ptr = mono_method_get_wrapper_data (method, token);
10872 callinfo = mono_find_jit_icall_by_addr (ptr);
10873 g_assert (callinfo);
10874 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
10877 inline_costs += 10 * num_calls++;
10880 case CEE_MONO_ICALL_ADDR: {
10881 MonoMethod *cmethod;
10884 CHECK_STACK_OVF (1);
10886 token = read32 (ip + 2);
10888 cmethod = mono_method_get_wrapper_data (method, token);
10890 if (cfg->compile_aot) {
10891 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
10893 ptr = mono_lookup_internal_call (cmethod);
10895 EMIT_NEW_PCONST (cfg, ins, ptr);
10901 case CEE_MONO_VTADDR: {
10902 MonoInst *src_var, *src;
10908 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10909 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
10914 case CEE_MONO_NEWOBJ: {
10915 MonoInst *iargs [2];
10917 CHECK_STACK_OVF (1);
10919 token = read32 (ip + 2);
10920 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
10921 mono_class_init (klass);
10922 NEW_DOMAINCONST (cfg, iargs [0]);
10923 MONO_ADD_INS (cfg->cbb, iargs [0]);
10924 NEW_CLASSCONST (cfg, iargs [1], klass);
10925 MONO_ADD_INS (cfg->cbb, iargs [1]);
10926 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
10928 inline_costs += 10 * num_calls++;
10931 case CEE_MONO_OBJADDR:
10934 MONO_INST_NEW (cfg, ins, OP_MOVE);
10935 ins->dreg = alloc_ireg_mp (cfg);
10936 ins->sreg1 = sp [0]->dreg;
10937 ins->type = STACK_MP;
10938 MONO_ADD_INS (cfg->cbb, ins);
10942 case CEE_MONO_LDNATIVEOBJ:
10944 * Similar to LDOBJ, but instead load the unmanaged
10945 * representation of the vtype to the stack.
10950 token = read32 (ip + 2);
10951 klass = mono_method_get_wrapper_data (method, token);
10952 g_assert (klass->valuetype);
10953 mono_class_init (klass);
10956 MonoInst *src, *dest, *temp;
10959 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
10960 temp->backend.is_pinvoke = 1;
10961 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
10962 mini_emit_stobj (cfg, dest, src, klass, TRUE);
10964 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
10965 dest->type = STACK_VTYPE;
10966 dest->klass = klass;
10972 case CEE_MONO_RETOBJ: {
10974 * Same as RET, but return the native representation of a vtype
10977 g_assert (cfg->ret);
10978 g_assert (mono_method_signature (method)->pinvoke);
10983 token = read32 (ip + 2);
10984 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
10986 if (!cfg->vret_addr) {
10987 g_assert (cfg->ret_var_is_local);
10989 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
10991 EMIT_NEW_RETLOADA (cfg, ins);
10993 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
10995 if (sp != stack_start)
10998 MONO_INST_NEW (cfg, ins, OP_BR);
10999 ins->inst_target_bb = end_bblock;
11000 MONO_ADD_INS (bblock, ins);
11001 link_bblock (cfg, bblock, end_bblock);
11002 start_new_bblock = 1;
11006 case CEE_MONO_CISINST:
11007 case CEE_MONO_CCASTCLASS: {
11012 token = read32 (ip + 2);
11013 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11014 if (ip [1] == CEE_MONO_CISINST)
11015 ins = handle_cisinst (cfg, klass, sp [0]);
11017 ins = handle_ccastclass (cfg, klass, sp [0]);
11023 case CEE_MONO_SAVE_LMF:
11024 case CEE_MONO_RESTORE_LMF:
11025 #ifdef MONO_ARCH_HAVE_LMF_OPS
11026 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
11027 MONO_ADD_INS (bblock, ins);
11028 cfg->need_lmf_area = TRUE;
11032 case CEE_MONO_CLASSCONST:
11033 CHECK_STACK_OVF (1);
11035 token = read32 (ip + 2);
11036 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
11039 inline_costs += 10 * num_calls++;
11041 case CEE_MONO_NOT_TAKEN:
11042 bblock->out_of_line = TRUE;
11046 CHECK_STACK_OVF (1);
11048 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
11049 ins->dreg = alloc_preg (cfg);
11050 ins->inst_offset = (gint32)read32 (ip + 2);
11051 ins->type = STACK_PTR;
11052 MONO_ADD_INS (bblock, ins);
11056 case CEE_MONO_DYN_CALL: {
11057 MonoCallInst *call;
11059 /* It would be easier to call a trampoline, but that would put an
11060 * extra frame on the stack, confusing exception handling. So
11061 * implement it inline using an opcode for now.
11064 if (!cfg->dyn_call_var) {
11065 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11066 /* prevent it from being register allocated */
11067 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
11070 /* Has to use a call inst since it local regalloc expects it */
11071 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
11072 ins = (MonoInst*)call;
11074 ins->sreg1 = sp [0]->dreg;
11075 ins->sreg2 = sp [1]->dreg;
11076 MONO_ADD_INS (bblock, ins);
11078 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
11079 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
11083 inline_costs += 10 * num_calls++;
11087 case CEE_MONO_MEMORY_BARRIER: {
11089 emit_memory_barrier (cfg, (int)read32 (ip + 1));
11093 case CEE_MONO_JIT_ATTACH: {
11094 MonoInst *args [16];
11095 MonoInst *ad_ins, *lmf_ins;
11096 MonoBasicBlock *next_bb = NULL;
11098 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11100 EMIT_NEW_PCONST (cfg, ins, NULL);
11101 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11107 ad_ins = mono_get_domain_intrinsic (cfg);
11108 lmf_ins = mono_get_lmf_intrinsic (cfg);
11111 if (MONO_ARCH_HAVE_TLS_GET && ad_ins && lmf_ins) {
11112 NEW_BBLOCK (cfg, next_bb);
11114 MONO_ADD_INS (cfg->cbb, ad_ins);
11115 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ad_ins->dreg, 0);
11116 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
11118 MONO_ADD_INS (cfg->cbb, lmf_ins);
11119 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, lmf_ins->dreg, 0);
11120 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
11123 if (cfg->compile_aot) {
11124 /* AOT code is only used in the root domain */
11125 EMIT_NEW_PCONST (cfg, args [0], NULL);
11127 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
11129 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
11130 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11133 MONO_START_BB (cfg, next_bb);
11139 case CEE_MONO_JIT_DETACH: {
11140 MonoInst *args [16];
11142 /* Restore the original domain */
11143 dreg = alloc_ireg (cfg);
11144 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
11145 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
11150 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
11156 case CEE_PREFIX1: {
11159 case CEE_ARGLIST: {
11160 /* somewhat similar to LDTOKEN */
11161 MonoInst *addr, *vtvar;
11162 CHECK_STACK_OVF (1);
11163 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
11165 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11166 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
11168 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11169 ins->type = STACK_VTYPE;
11170 ins->klass = mono_defaults.argumenthandle_class;
11183 * The following transforms:
11184 * CEE_CEQ into OP_CEQ
11185 * CEE_CGT into OP_CGT
11186 * CEE_CGT_UN into OP_CGT_UN
11187 * CEE_CLT into OP_CLT
11188 * CEE_CLT_UN into OP_CLT_UN
11190 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
11192 MONO_INST_NEW (cfg, ins, cmp->opcode);
11194 cmp->sreg1 = sp [0]->dreg;
11195 cmp->sreg2 = sp [1]->dreg;
11196 type_from_op (cmp, sp [0], sp [1]);
11198 if ((sp [0]->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
11199 cmp->opcode = OP_LCOMPARE;
11200 else if (sp [0]->type == STACK_R8)
11201 cmp->opcode = OP_FCOMPARE;
11203 cmp->opcode = OP_ICOMPARE;
11204 MONO_ADD_INS (bblock, cmp);
11205 ins->type = STACK_I4;
11206 ins->dreg = alloc_dreg (cfg, ins->type);
11207 type_from_op (ins, sp [0], sp [1]);
11209 if (cmp->opcode == OP_FCOMPARE) {
11211 * The backends expect the fceq opcodes to do the
11214 cmp->opcode = OP_NOP;
11215 ins->sreg1 = cmp->sreg1;
11216 ins->sreg2 = cmp->sreg2;
11218 MONO_ADD_INS (bblock, ins);
11224 MonoInst *argconst;
11225 MonoMethod *cil_method;
11227 CHECK_STACK_OVF (1);
11229 n = read32 (ip + 2);
11230 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
11231 if (!cmethod || mono_loader_get_last_error ())
11233 mono_class_init (cmethod->klass);
11235 mono_save_token_info (cfg, image, n, cmethod);
11237 context_used = mini_method_check_context_used (cfg, cmethod);
11239 cil_method = cmethod;
11240 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
11241 METHOD_ACCESS_FAILURE;
11243 if (mono_security_cas_enabled ()) {
11244 if (check_linkdemand (cfg, method, cmethod))
11245 INLINE_FAILURE ("linkdemand");
11246 CHECK_CFG_EXCEPTION;
11247 } else if (mono_security_core_clr_enabled ()) {
11248 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
11252 * Optimize the common case of ldftn+delegate creation
11254 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
11255 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
11256 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
11257 MonoInst *target_ins;
11258 MonoMethod *invoke;
11259 int invoke_context_used;
11261 invoke = mono_get_delegate_invoke (ctor_method->klass);
11262 if (!invoke || !mono_method_signature (invoke))
11265 invoke_context_used = mini_method_check_context_used (cfg, invoke);
11267 target_ins = sp [-1];
11269 if (mono_security_core_clr_enabled ())
11270 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
11272 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
11273 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
11274 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
11275 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
11276 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
11280 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
11281 /* FIXME: SGEN support */
11282 if (invoke_context_used == 0) {
11284 if (cfg->verbose_level > 3)
11285 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
11287 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
11288 CHECK_CFG_EXCEPTION;
11297 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
11298 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
11302 inline_costs += 10 * num_calls++;
11305 case CEE_LDVIRTFTN: {
11306 MonoInst *args [2];
11310 n = read32 (ip + 2);
11311 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
11312 if (!cmethod || mono_loader_get_last_error ())
11314 mono_class_init (cmethod->klass);
11316 context_used = mini_method_check_context_used (cfg, cmethod);
11318 if (mono_security_cas_enabled ()) {
11319 if (check_linkdemand (cfg, method, cmethod))
11320 INLINE_FAILURE ("linkdemand");
11321 CHECK_CFG_EXCEPTION;
11322 } else if (mono_security_core_clr_enabled ()) {
11323 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
11329 args [1] = emit_get_rgctx_method (cfg, context_used,
11330 cmethod, MONO_RGCTX_INFO_METHOD);
11333 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
11335 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
11338 inline_costs += 10 * num_calls++;
11342 CHECK_STACK_OVF (1);
11344 n = read16 (ip + 2);
11346 EMIT_NEW_ARGLOAD (cfg, ins, n);
11351 CHECK_STACK_OVF (1);
11353 n = read16 (ip + 2);
11355 NEW_ARGLOADA (cfg, ins, n);
11356 MONO_ADD_INS (cfg->cbb, ins);
11364 n = read16 (ip + 2);
11366 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
11368 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
11372 CHECK_STACK_OVF (1);
11374 n = read16 (ip + 2);
11376 EMIT_NEW_LOCLOAD (cfg, ins, n);
11381 unsigned char *tmp_ip;
11382 CHECK_STACK_OVF (1);
11384 n = read16 (ip + 2);
11387 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
11393 EMIT_NEW_LOCLOADA (cfg, ins, n);
11402 n = read16 (ip + 2);
11404 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
11406 emit_stloc_ir (cfg, sp, header, n);
11413 if (sp != stack_start)
11415 if (cfg->method != method)
11417 * Inlining this into a loop in a parent could lead to
11418 * stack overflows which is different behavior than the
11419 * non-inlined case, thus disable inlining in this case.
11421 goto inline_failure;
11423 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
11424 ins->dreg = alloc_preg (cfg);
11425 ins->sreg1 = sp [0]->dreg;
11426 ins->type = STACK_PTR;
11427 MONO_ADD_INS (cfg->cbb, ins);
11429 cfg->flags |= MONO_CFG_HAS_ALLOCA;
11431 ins->flags |= MONO_INST_INIT;
11436 case CEE_ENDFILTER: {
11437 MonoExceptionClause *clause, *nearest;
11438 int cc, nearest_num;
11442 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
11444 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
11445 ins->sreg1 = (*sp)->dreg;
11446 MONO_ADD_INS (bblock, ins);
11447 start_new_bblock = 1;
11452 for (cc = 0; cc < header->num_clauses; ++cc) {
11453 clause = &header->clauses [cc];
11454 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
11455 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
11456 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
11461 g_assert (nearest);
11462 if ((ip - header->code) != nearest->handler_offset)
11467 case CEE_UNALIGNED_:
11468 ins_flag |= MONO_INST_UNALIGNED;
11469 /* FIXME: record alignment? we can assume 1 for now */
11473 case CEE_VOLATILE_:
11474 ins_flag |= MONO_INST_VOLATILE;
11478 ins_flag |= MONO_INST_TAILCALL;
11479 cfg->flags |= MONO_CFG_HAS_TAIL;
11480 /* Can't inline tail calls at this time */
11481 inline_costs += 100000;
11488 token = read32 (ip + 2);
11489 klass = mini_get_class (method, token, generic_context);
11490 CHECK_TYPELOAD (klass);
11491 if (generic_class_is_reference_type (cfg, klass))
11492 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
11494 mini_emit_initobj (cfg, *sp, NULL, klass);
11498 case CEE_CONSTRAINED_:
11500 token = read32 (ip + 2);
11501 constrained_call = mini_get_class (method, token, generic_context);
11502 CHECK_TYPELOAD (constrained_call);
11506 case CEE_INITBLK: {
11507 MonoInst *iargs [3];
11511 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
11512 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
11513 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
11514 /* emit_memset only works when val == 0 */
11515 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
11517 iargs [0] = sp [0];
11518 iargs [1] = sp [1];
11519 iargs [2] = sp [2];
11520 if (ip [1] == CEE_CPBLK) {
11521 MonoMethod *memcpy_method = get_memcpy_method ();
11522 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11524 MonoMethod *memset_method = get_memset_method ();
11525 mono_emit_method_call (cfg, memset_method, iargs, NULL);
11535 ins_flag |= MONO_INST_NOTYPECHECK;
11537 ins_flag |= MONO_INST_NORANGECHECK;
11538 /* we ignore the no-nullcheck for now since we
11539 * really do it explicitly only when doing callvirt->call
11543 case CEE_RETHROW: {
11545 int handler_offset = -1;
11547 for (i = 0; i < header->num_clauses; ++i) {
11548 MonoExceptionClause *clause = &header->clauses [i];
11549 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
11550 handler_offset = clause->handler_offset;
11555 bblock->flags |= BB_EXCEPTION_UNSAFE;
11557 g_assert (handler_offset != -1);
11559 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
11560 MONO_INST_NEW (cfg, ins, OP_RETHROW);
11561 ins->sreg1 = load->dreg;
11562 MONO_ADD_INS (bblock, ins);
11564 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11565 MONO_ADD_INS (bblock, ins);
11568 link_bblock (cfg, bblock, end_bblock);
11569 start_new_bblock = 1;
11577 GSHAREDVT_FAILURE (*ip);
11579 CHECK_STACK_OVF (1);
11581 token = read32 (ip + 2);
11582 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic && !generic_context) {
11583 MonoType *type = mono_type_create_from_typespec (image, token);
11584 val = mono_type_size (type, &ialign);
11586 MonoClass *klass = mono_class_get_full (image, token, generic_context);
11587 CHECK_TYPELOAD (klass);
11588 mono_class_init (klass);
11589 val = mono_type_size (&klass->byval_arg, &ialign);
11591 EMIT_NEW_ICONST (cfg, ins, val);
11596 case CEE_REFANYTYPE: {
11597 MonoInst *src_var, *src;
11599 GSHAREDVT_FAILURE (*ip);
11605 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11607 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11608 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11609 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
11614 case CEE_READONLY_:
11627 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
11637 g_warning ("opcode 0x%02x not handled", *ip);
11641 if (start_new_bblock != 1)
11644 bblock->cil_length = ip - bblock->cil_code;
11645 if (bblock->next_bb) {
11646 /* This could already be set because of inlining, #693905 */
11647 MonoBasicBlock *bb = bblock;
11649 while (bb->next_bb)
11651 bb->next_bb = end_bblock;
11653 bblock->next_bb = end_bblock;
11656 if (cfg->method == method && cfg->domainvar) {
11658 MonoInst *get_domain;
11660 cfg->cbb = init_localsbb;
11662 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
11663 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
11666 get_domain->dreg = alloc_preg (cfg);
11667 MONO_ADD_INS (cfg->cbb, get_domain);
11669 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
11670 MONO_ADD_INS (cfg->cbb, store);
11673 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
11674 if (cfg->compile_aot)
11675 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
11676 mono_get_got_var (cfg);
11679 if (cfg->method == method && cfg->got_var)
11680 mono_emit_load_got_addr (cfg);
11685 cfg->cbb = init_localsbb;
11687 for (i = 0; i < header->num_locals; ++i) {
11688 MonoType *ptype = header->locals [i];
11689 int t = ptype->type;
11690 dreg = cfg->locals [i]->dreg;
11692 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
11693 t = mono_class_enum_basetype (ptype->data.klass)->type;
11694 if (ptype->byref) {
11695 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
11696 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
11697 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
11698 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
11699 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
11700 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
11701 MONO_INST_NEW (cfg, ins, OP_R8CONST);
11702 ins->type = STACK_R8;
11703 ins->inst_p0 = (void*)&r8_0;
11704 ins->dreg = alloc_dreg (cfg, STACK_R8);
11705 MONO_ADD_INS (init_localsbb, ins);
11706 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
11707 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
11708 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
11709 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
11710 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, ptype)) {
11711 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
11713 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
11718 if (cfg->init_ref_vars && cfg->method == method) {
11719 /* Emit initialization for ref vars */
11720 // FIXME: Avoid duplication initialization for IL locals.
11721 for (i = 0; i < cfg->num_varinfo; ++i) {
11722 MonoInst *ins = cfg->varinfo [i];
11724 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
11725 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
11730 MonoBasicBlock *bb;
11733 * Make seq points at backward branch targets interruptable.
11735 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
11736 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
11737 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
11740 /* Add a sequence point for method entry/exit events */
11742 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
11743 MONO_ADD_INS (init_localsbb, ins);
11744 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
11745 MONO_ADD_INS (cfg->bb_exit, ins);
11749 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
11750 * the code they refer to was dead (#11880).
11752 if (sym_seq_points) {
11753 for (i = 0; i < header->code_size; ++i) {
11754 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
11757 NEW_SEQ_POINT (cfg, ins, i, FALSE);
11758 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
11765 if (cfg->method == method) {
11766 MonoBasicBlock *bb;
11767 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
11768 bb->region = mono_find_block_region (cfg, bb->real_offset);
11770 mono_create_spvar_for_region (cfg, bb->region);
11771 if (cfg->verbose_level > 2)
11772 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
11776 g_slist_free (class_inits);
11777 dont_inline = g_list_remove (dont_inline, method);
11779 if (inline_costs < 0) {
11782 /* Method is too large */
11783 mname = mono_method_full_name (method, TRUE);
11784 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
11785 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
11787 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
11788 mono_basic_block_free (original_bb);
11792 if ((cfg->verbose_level > 2) && (cfg->method == method))
11793 mono_print_code (cfg, "AFTER METHOD-TO-IR");
11795 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
11796 mono_basic_block_free (original_bb);
11797 return inline_costs;
11800 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
11807 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
11811 set_exception_type_from_invalid_il (cfg, method, ip);
11815 g_slist_free (class_inits);
11816 mono_basic_block_free (original_bb);
11817 dont_inline = g_list_remove (dont_inline, method);
11818 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
11823 store_membase_reg_to_store_membase_imm (int opcode)
11826 case OP_STORE_MEMBASE_REG:
11827 return OP_STORE_MEMBASE_IMM;
11828 case OP_STOREI1_MEMBASE_REG:
11829 return OP_STOREI1_MEMBASE_IMM;
11830 case OP_STOREI2_MEMBASE_REG:
11831 return OP_STOREI2_MEMBASE_IMM;
11832 case OP_STOREI4_MEMBASE_REG:
11833 return OP_STOREI4_MEMBASE_IMM;
11834 case OP_STOREI8_MEMBASE_REG:
11835 return OP_STOREI8_MEMBASE_IMM;
11837 g_assert_not_reached ();
11844 mono_op_to_op_imm (int opcode)
11848 return OP_IADD_IMM;
11850 return OP_ISUB_IMM;
11852 return OP_IDIV_IMM;
11854 return OP_IDIV_UN_IMM;
11856 return OP_IREM_IMM;
11858 return OP_IREM_UN_IMM;
11860 return OP_IMUL_IMM;
11862 return OP_IAND_IMM;
11866 return OP_IXOR_IMM;
11868 return OP_ISHL_IMM;
11870 return OP_ISHR_IMM;
11872 return OP_ISHR_UN_IMM;
11875 return OP_LADD_IMM;
11877 return OP_LSUB_IMM;
11879 return OP_LAND_IMM;
11883 return OP_LXOR_IMM;
11885 return OP_LSHL_IMM;
11887 return OP_LSHR_IMM;
11889 return OP_LSHR_UN_IMM;
11892 return OP_COMPARE_IMM;
11894 return OP_ICOMPARE_IMM;
11896 return OP_LCOMPARE_IMM;
11898 case OP_STORE_MEMBASE_REG:
11899 return OP_STORE_MEMBASE_IMM;
11900 case OP_STOREI1_MEMBASE_REG:
11901 return OP_STOREI1_MEMBASE_IMM;
11902 case OP_STOREI2_MEMBASE_REG:
11903 return OP_STOREI2_MEMBASE_IMM;
11904 case OP_STOREI4_MEMBASE_REG:
11905 return OP_STOREI4_MEMBASE_IMM;
11907 #if defined(TARGET_X86) || defined (TARGET_AMD64)
11909 return OP_X86_PUSH_IMM;
11910 case OP_X86_COMPARE_MEMBASE_REG:
11911 return OP_X86_COMPARE_MEMBASE_IMM;
11913 #if defined(TARGET_AMD64)
11914 case OP_AMD64_ICOMPARE_MEMBASE_REG:
11915 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
11917 case OP_VOIDCALL_REG:
11918 return OP_VOIDCALL;
11926 return OP_LOCALLOC_IMM;
11933 ldind_to_load_membase (int opcode)
11937 return OP_LOADI1_MEMBASE;
11939 return OP_LOADU1_MEMBASE;
11941 return OP_LOADI2_MEMBASE;
11943 return OP_LOADU2_MEMBASE;
11945 return OP_LOADI4_MEMBASE;
11947 return OP_LOADU4_MEMBASE;
11949 return OP_LOAD_MEMBASE;
11950 case CEE_LDIND_REF:
11951 return OP_LOAD_MEMBASE;
11953 return OP_LOADI8_MEMBASE;
11955 return OP_LOADR4_MEMBASE;
11957 return OP_LOADR8_MEMBASE;
11959 g_assert_not_reached ();
11966 stind_to_store_membase (int opcode)
11970 return OP_STOREI1_MEMBASE_REG;
11972 return OP_STOREI2_MEMBASE_REG;
11974 return OP_STOREI4_MEMBASE_REG;
11976 case CEE_STIND_REF:
11977 return OP_STORE_MEMBASE_REG;
11979 return OP_STOREI8_MEMBASE_REG;
11981 return OP_STORER4_MEMBASE_REG;
11983 return OP_STORER8_MEMBASE_REG;
11985 g_assert_not_reached ();
11992 mono_load_membase_to_load_mem (int opcode)
11994 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
11995 #if defined(TARGET_X86) || defined(TARGET_AMD64)
11997 case OP_LOAD_MEMBASE:
11998 return OP_LOAD_MEM;
11999 case OP_LOADU1_MEMBASE:
12000 return OP_LOADU1_MEM;
12001 case OP_LOADU2_MEMBASE:
12002 return OP_LOADU2_MEM;
12003 case OP_LOADI4_MEMBASE:
12004 return OP_LOADI4_MEM;
12005 case OP_LOADU4_MEMBASE:
12006 return OP_LOADU4_MEM;
12007 #if SIZEOF_REGISTER == 8
12008 case OP_LOADI8_MEMBASE:
12009 return OP_LOADI8_MEM;
12018 op_to_op_dest_membase (int store_opcode, int opcode)
12020 #if defined(TARGET_X86)
12021 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
12026 return OP_X86_ADD_MEMBASE_REG;
12028 return OP_X86_SUB_MEMBASE_REG;
12030 return OP_X86_AND_MEMBASE_REG;
12032 return OP_X86_OR_MEMBASE_REG;
12034 return OP_X86_XOR_MEMBASE_REG;
12037 return OP_X86_ADD_MEMBASE_IMM;
12040 return OP_X86_SUB_MEMBASE_IMM;
12043 return OP_X86_AND_MEMBASE_IMM;
12046 return OP_X86_OR_MEMBASE_IMM;
12049 return OP_X86_XOR_MEMBASE_IMM;
12055 #if defined(TARGET_AMD64)
12056 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
12061 return OP_X86_ADD_MEMBASE_REG;
12063 return OP_X86_SUB_MEMBASE_REG;
12065 return OP_X86_AND_MEMBASE_REG;
12067 return OP_X86_OR_MEMBASE_REG;
12069 return OP_X86_XOR_MEMBASE_REG;
12071 return OP_X86_ADD_MEMBASE_IMM;
12073 return OP_X86_SUB_MEMBASE_IMM;
12075 return OP_X86_AND_MEMBASE_IMM;
12077 return OP_X86_OR_MEMBASE_IMM;
12079 return OP_X86_XOR_MEMBASE_IMM;
12081 return OP_AMD64_ADD_MEMBASE_REG;
12083 return OP_AMD64_SUB_MEMBASE_REG;
12085 return OP_AMD64_AND_MEMBASE_REG;
12087 return OP_AMD64_OR_MEMBASE_REG;
12089 return OP_AMD64_XOR_MEMBASE_REG;
12092 return OP_AMD64_ADD_MEMBASE_IMM;
12095 return OP_AMD64_SUB_MEMBASE_IMM;
12098 return OP_AMD64_AND_MEMBASE_IMM;
12101 return OP_AMD64_OR_MEMBASE_IMM;
12104 return OP_AMD64_XOR_MEMBASE_IMM;
12114 op_to_op_store_membase (int store_opcode, int opcode)
12116 #if defined(TARGET_X86) || defined(TARGET_AMD64)
12119 if (store_opcode == OP_STOREI1_MEMBASE_REG)
12120 return OP_X86_SETEQ_MEMBASE;
12122 if (store_opcode == OP_STOREI1_MEMBASE_REG)
12123 return OP_X86_SETNE_MEMBASE;
12131 op_to_op_src1_membase (int load_opcode, int opcode)
12134 /* FIXME: This has sign extension issues */
12136 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
12137 return OP_X86_COMPARE_MEMBASE8_IMM;
12140 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
12145 return OP_X86_PUSH_MEMBASE;
12146 case OP_COMPARE_IMM:
12147 case OP_ICOMPARE_IMM:
12148 return OP_X86_COMPARE_MEMBASE_IMM;
12151 return OP_X86_COMPARE_MEMBASE_REG;
12155 #ifdef TARGET_AMD64
12156 /* FIXME: This has sign extension issues */
12158 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
12159 return OP_X86_COMPARE_MEMBASE8_IMM;
12164 #ifdef __mono_ilp32__
12165 if (load_opcode == OP_LOADI8_MEMBASE)
12167 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12169 return OP_X86_PUSH_MEMBASE;
12171 /* FIXME: This only works for 32 bit immediates
12172 case OP_COMPARE_IMM:
12173 case OP_LCOMPARE_IMM:
12174 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12175 return OP_AMD64_COMPARE_MEMBASE_IMM;
12177 case OP_ICOMPARE_IMM:
12178 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
12179 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
12183 #ifdef __mono_ilp32__
12184 if (load_opcode == OP_LOAD_MEMBASE)
12185 return OP_AMD64_ICOMPARE_MEMBASE_REG;
12186 if (load_opcode == OP_LOADI8_MEMBASE)
12188 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12190 return OP_AMD64_COMPARE_MEMBASE_REG;
12193 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
12194 return OP_AMD64_ICOMPARE_MEMBASE_REG;
12203 op_to_op_src2_membase (int load_opcode, int opcode)
12206 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
12212 return OP_X86_COMPARE_REG_MEMBASE;
12214 return OP_X86_ADD_REG_MEMBASE;
12216 return OP_X86_SUB_REG_MEMBASE;
12218 return OP_X86_AND_REG_MEMBASE;
12220 return OP_X86_OR_REG_MEMBASE;
12222 return OP_X86_XOR_REG_MEMBASE;
12226 #ifdef TARGET_AMD64
12227 #ifdef __mono_ilp32__
12228 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
12230 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
12234 return OP_AMD64_ICOMPARE_REG_MEMBASE;
12236 return OP_X86_ADD_REG_MEMBASE;
12238 return OP_X86_SUB_REG_MEMBASE;
12240 return OP_X86_AND_REG_MEMBASE;
12242 return OP_X86_OR_REG_MEMBASE;
12244 return OP_X86_XOR_REG_MEMBASE;
12246 #ifdef __mono_ilp32__
12247 } else if (load_opcode == OP_LOADI8_MEMBASE) {
12249 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
12254 return OP_AMD64_COMPARE_REG_MEMBASE;
12256 return OP_AMD64_ADD_REG_MEMBASE;
12258 return OP_AMD64_SUB_REG_MEMBASE;
12260 return OP_AMD64_AND_REG_MEMBASE;
12262 return OP_AMD64_OR_REG_MEMBASE;
12264 return OP_AMD64_XOR_REG_MEMBASE;
12273 mono_op_to_op_imm_noemul (int opcode)
12276 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
12282 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
12289 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
12294 return mono_op_to_op_imm (opcode);
12299 * mono_handle_global_vregs:
12301 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
12305 mono_handle_global_vregs (MonoCompile *cfg)
12307 gint32 *vreg_to_bb;
12308 MonoBasicBlock *bb;
12311 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
12313 #ifdef MONO_ARCH_SIMD_INTRINSICS
12314 if (cfg->uses_simd_intrinsics)
12315 mono_simd_simplify_indirection (cfg);
12318 /* Find local vregs used in more than one bb */
12319 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12320 MonoInst *ins = bb->code;
12321 int block_num = bb->block_num;
12323 if (cfg->verbose_level > 2)
12324 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
12327 for (; ins; ins = ins->next) {
12328 const char *spec = INS_INFO (ins->opcode);
12329 int regtype = 0, regindex;
12332 if (G_UNLIKELY (cfg->verbose_level > 2))
12333 mono_print_ins (ins);
12335 g_assert (ins->opcode >= MONO_CEE_LAST);
12337 for (regindex = 0; regindex < 4; regindex ++) {
12340 if (regindex == 0) {
12341 regtype = spec [MONO_INST_DEST];
12342 if (regtype == ' ')
12345 } else if (regindex == 1) {
12346 regtype = spec [MONO_INST_SRC1];
12347 if (regtype == ' ')
12350 } else if (regindex == 2) {
12351 regtype = spec [MONO_INST_SRC2];
12352 if (regtype == ' ')
12355 } else if (regindex == 3) {
12356 regtype = spec [MONO_INST_SRC3];
12357 if (regtype == ' ')
12362 #if SIZEOF_REGISTER == 4
12363 /* In the LLVM case, the long opcodes are not decomposed */
12364 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
12366 * Since some instructions reference the original long vreg,
12367 * and some reference the two component vregs, it is quite hard
12368 * to determine when it needs to be global. So be conservative.
12370 if (!get_vreg_to_inst (cfg, vreg)) {
12371 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
12373 if (cfg->verbose_level > 2)
12374 printf ("LONG VREG R%d made global.\n", vreg);
12378 * Make the component vregs volatile since the optimizations can
12379 * get confused otherwise.
12381 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
12382 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
12386 g_assert (vreg != -1);
12388 prev_bb = vreg_to_bb [vreg];
12389 if (prev_bb == 0) {
12390 /* 0 is a valid block num */
12391 vreg_to_bb [vreg] = block_num + 1;
12392 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
12393 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
12396 if (!get_vreg_to_inst (cfg, vreg)) {
12397 if (G_UNLIKELY (cfg->verbose_level > 2))
12398 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
12402 if (vreg_is_ref (cfg, vreg))
12403 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
12405 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
12408 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
12411 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
12414 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
12417 g_assert_not_reached ();
12421 /* Flag as having been used in more than one bb */
12422 vreg_to_bb [vreg] = -1;
12428 /* If a variable is used in only one bblock, convert it into a local vreg */
12429 for (i = 0; i < cfg->num_varinfo; i++) {
12430 MonoInst *var = cfg->varinfo [i];
12431 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
12433 switch (var->type) {
12439 #if SIZEOF_REGISTER == 8
12442 #if !defined(TARGET_X86)
12443 /* Enabling this screws up the fp stack on x86 */
12446 if (mono_arch_is_soft_float ())
12449 /* Arguments are implicitly global */
12450 /* Putting R4 vars into registers doesn't work currently */
12451 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
12452 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var) {
12454 * Make that the variable's liveness interval doesn't contain a call, since
12455 * that would cause the lvreg to be spilled, making the whole optimization
12458 /* This is too slow for JIT compilation */
12460 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
12462 int def_index, call_index, ins_index;
12463 gboolean spilled = FALSE;
12468 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
12469 const char *spec = INS_INFO (ins->opcode);
12471 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
12472 def_index = ins_index;
12474 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
12475 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
12476 if (call_index > def_index) {
12482 if (MONO_IS_CALL (ins))
12483 call_index = ins_index;
12493 if (G_UNLIKELY (cfg->verbose_level > 2))
12494 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
12495 var->flags |= MONO_INST_IS_DEAD;
12496 cfg->vreg_to_inst [var->dreg] = NULL;
12503 * Compress the varinfo and vars tables so the liveness computation is faster and
12504 * takes up less space.
12507 for (i = 0; i < cfg->num_varinfo; ++i) {
12508 MonoInst *var = cfg->varinfo [i];
12509 if (pos < i && cfg->locals_start == i)
12510 cfg->locals_start = pos;
12511 if (!(var->flags & MONO_INST_IS_DEAD)) {
12513 cfg->varinfo [pos] = cfg->varinfo [i];
12514 cfg->varinfo [pos]->inst_c0 = pos;
12515 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
12516 cfg->vars [pos].idx = pos;
12517 #if SIZEOF_REGISTER == 4
12518 if (cfg->varinfo [pos]->type == STACK_I8) {
12519 /* Modify the two component vars too */
12522 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
12523 var1->inst_c0 = pos;
12524 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
12525 var1->inst_c0 = pos;
12532 cfg->num_varinfo = pos;
12533 if (cfg->locals_start > cfg->num_varinfo)
12534 cfg->locals_start = cfg->num_varinfo;
12538 * mono_spill_global_vars:
12540 * Generate spill code for variables which are not allocated to registers,
12541 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
12542 * code is generated which could be optimized by the local optimization passes.
12545 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
12547 MonoBasicBlock *bb;
12549 int orig_next_vreg;
12550 guint32 *vreg_to_lvreg;
12552 guint32 i, lvregs_len;
12553 gboolean dest_has_lvreg = FALSE;
12554 guint32 stacktypes [128];
12555 MonoInst **live_range_start, **live_range_end;
12556 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
12557 int *gsharedvt_vreg_to_idx = NULL;
12559 *need_local_opts = FALSE;
12561 memset (spec2, 0, sizeof (spec2));
12563 /* FIXME: Move this function to mini.c */
12564 stacktypes ['i'] = STACK_PTR;
12565 stacktypes ['l'] = STACK_I8;
12566 stacktypes ['f'] = STACK_R8;
12567 #ifdef MONO_ARCH_SIMD_INTRINSICS
12568 stacktypes ['x'] = STACK_VTYPE;
12571 #if SIZEOF_REGISTER == 4
12572 /* Create MonoInsts for longs */
12573 for (i = 0; i < cfg->num_varinfo; i++) {
12574 MonoInst *ins = cfg->varinfo [i];
12576 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
12577 switch (ins->type) {
12582 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
12585 g_assert (ins->opcode == OP_REGOFFSET);
12587 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
12589 tree->opcode = OP_REGOFFSET;
12590 tree->inst_basereg = ins->inst_basereg;
12591 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
12593 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
12595 tree->opcode = OP_REGOFFSET;
12596 tree->inst_basereg = ins->inst_basereg;
12597 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
12607 if (cfg->compute_gc_maps) {
12608 /* registers need liveness info even for !non refs */
12609 for (i = 0; i < cfg->num_varinfo; i++) {
12610 MonoInst *ins = cfg->varinfo [i];
12612 if (ins->opcode == OP_REGVAR)
12613 ins->flags |= MONO_INST_GC_TRACK;
12617 if (cfg->gsharedvt) {
12618 gsharedvt_vreg_to_idx = g_new0 (int, cfg->next_vreg);
12620 for (i = 0; i < cfg->num_varinfo; ++i) {
12621 MonoInst *ins = cfg->varinfo [i];
12624 if (mini_is_gsharedvt_variable_type (cfg, ins->inst_vtype)) {
12625 if (i >= cfg->locals_start) {
12627 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
12628 gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
12629 ins->opcode = OP_GSHAREDVT_LOCAL;
12630 ins->inst_imm = idx;
12633 gsharedvt_vreg_to_idx [ins->dreg] = -1;
12634 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
12640 /* FIXME: widening and truncation */
12643 * As an optimization, when a variable allocated to the stack is first loaded into
12644 * an lvreg, we will remember the lvreg and use it the next time instead of loading
12645 * the variable again.
12647 orig_next_vreg = cfg->next_vreg;
12648 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
12649 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
12653 * These arrays contain the first and last instructions accessing a given
12655 * Since we emit bblocks in the same order we process them here, and we
12656 * don't split live ranges, these will precisely describe the live range of
12657 * the variable, i.e. the instruction range where a valid value can be found
12658 * in the variables location.
12659 * The live range is computed using the liveness info computed by the liveness pass.
12660 * We can't use vmv->range, since that is an abstract live range, and we need
12661 * one which is instruction precise.
12662 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
12664 /* FIXME: Only do this if debugging info is requested */
12665 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
12666 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
12667 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
12668 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
12670 /* Add spill loads/stores */
12671 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12674 if (cfg->verbose_level > 2)
12675 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
12677 /* Clear vreg_to_lvreg array */
12678 for (i = 0; i < lvregs_len; i++)
12679 vreg_to_lvreg [lvregs [i]] = 0;
12683 MONO_BB_FOR_EACH_INS (bb, ins) {
12684 const char *spec = INS_INFO (ins->opcode);
12685 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
12686 gboolean store, no_lvreg;
12687 int sregs [MONO_MAX_SRC_REGS];
12689 if (G_UNLIKELY (cfg->verbose_level > 2))
12690 mono_print_ins (ins);
12692 if (ins->opcode == OP_NOP)
12696 * We handle LDADDR here as well, since it can only be decomposed
12697 * when variable addresses are known.
12699 if (ins->opcode == OP_LDADDR) {
12700 MonoInst *var = ins->inst_p0;
12702 if (var->opcode == OP_VTARG_ADDR) {
12703 /* Happens on SPARC/S390 where vtypes are passed by reference */
12704 MonoInst *vtaddr = var->inst_left;
12705 if (vtaddr->opcode == OP_REGVAR) {
12706 ins->opcode = OP_MOVE;
12707 ins->sreg1 = vtaddr->dreg;
12709 else if (var->inst_left->opcode == OP_REGOFFSET) {
12710 ins->opcode = OP_LOAD_MEMBASE;
12711 ins->inst_basereg = vtaddr->inst_basereg;
12712 ins->inst_offset = vtaddr->inst_offset;
12715 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg] < 0) {
12716 /* gsharedvt arg passed by ref */
12717 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
12719 ins->opcode = OP_LOAD_MEMBASE;
12720 ins->inst_basereg = var->inst_basereg;
12721 ins->inst_offset = var->inst_offset;
12722 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg]) {
12723 MonoInst *load, *load2, *load3;
12724 int idx = gsharedvt_vreg_to_idx [var->dreg] - 1;
12725 int reg1, reg2, reg3;
12726 MonoInst *info_var = cfg->gsharedvt_info_var;
12727 MonoInst *locals_var = cfg->gsharedvt_locals_var;
12731 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
12734 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
12736 g_assert (info_var);
12737 g_assert (locals_var);
12739 /* Mark the instruction used to compute the locals var as used */
12740 cfg->gsharedvt_locals_var_ins = NULL;
12742 /* Load the offset */
12743 if (info_var->opcode == OP_REGOFFSET) {
12744 reg1 = alloc_ireg (cfg);
12745 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
12746 } else if (info_var->opcode == OP_REGVAR) {
12748 reg1 = info_var->dreg;
12750 g_assert_not_reached ();
12752 reg2 = alloc_ireg (cfg);
12753 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
12754 /* Load the locals area address */
12755 reg3 = alloc_ireg (cfg);
12756 if (locals_var->opcode == OP_REGOFFSET) {
12757 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
12758 } else if (locals_var->opcode == OP_REGVAR) {
12759 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
12761 g_assert_not_reached ();
12763 /* Compute the address */
12764 ins->opcode = OP_PADD;
12768 mono_bblock_insert_before_ins (bb, ins, load3);
12769 mono_bblock_insert_before_ins (bb, load3, load2);
12771 mono_bblock_insert_before_ins (bb, load2, load);
12773 g_assert (var->opcode == OP_REGOFFSET);
12775 ins->opcode = OP_ADD_IMM;
12776 ins->sreg1 = var->inst_basereg;
12777 ins->inst_imm = var->inst_offset;
12780 *need_local_opts = TRUE;
12781 spec = INS_INFO (ins->opcode);
12784 if (ins->opcode < MONO_CEE_LAST) {
12785 mono_print_ins (ins);
12786 g_assert_not_reached ();
12790 * Store opcodes have destbasereg in the dreg, but in reality, it is an
12794 if (MONO_IS_STORE_MEMBASE (ins)) {
12795 tmp_reg = ins->dreg;
12796 ins->dreg = ins->sreg2;
12797 ins->sreg2 = tmp_reg;
12800 spec2 [MONO_INST_DEST] = ' ';
12801 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
12802 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
12803 spec2 [MONO_INST_SRC3] = ' ';
12805 } else if (MONO_IS_STORE_MEMINDEX (ins))
12806 g_assert_not_reached ();
12811 if (G_UNLIKELY (cfg->verbose_level > 2)) {
12812 printf ("\t %.3s %d", spec, ins->dreg);
12813 num_sregs = mono_inst_get_src_registers (ins, sregs);
12814 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
12815 printf (" %d", sregs [srcindex]);
12822 regtype = spec [MONO_INST_DEST];
12823 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
12826 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
12827 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
12828 MonoInst *store_ins;
12830 MonoInst *def_ins = ins;
12831 int dreg = ins->dreg; /* The original vreg */
12833 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
12835 if (var->opcode == OP_REGVAR) {
12836 ins->dreg = var->dreg;
12837 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
12839 * Instead of emitting a load+store, use a _membase opcode.
12841 g_assert (var->opcode == OP_REGOFFSET);
12842 if (ins->opcode == OP_MOVE) {
12846 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
12847 ins->inst_basereg = var->inst_basereg;
12848 ins->inst_offset = var->inst_offset;
12851 spec = INS_INFO (ins->opcode);
12855 g_assert (var->opcode == OP_REGOFFSET);
12857 prev_dreg = ins->dreg;
12859 /* Invalidate any previous lvreg for this vreg */
12860 vreg_to_lvreg [ins->dreg] = 0;
12864 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
12866 store_opcode = OP_STOREI8_MEMBASE_REG;
12869 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
12871 #if SIZEOF_REGISTER != 8
12872 if (regtype == 'l') {
12873 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
12874 mono_bblock_insert_after_ins (bb, ins, store_ins);
12875 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
12876 mono_bblock_insert_after_ins (bb, ins, store_ins);
12877 def_ins = store_ins;
12882 g_assert (store_opcode != OP_STOREV_MEMBASE);
12884 /* Try to fuse the store into the instruction itself */
12885 /* FIXME: Add more instructions */
12886 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
12887 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
12888 ins->inst_imm = ins->inst_c0;
12889 ins->inst_destbasereg = var->inst_basereg;
12890 ins->inst_offset = var->inst_offset;
12891 spec = INS_INFO (ins->opcode);
12892 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
12893 ins->opcode = store_opcode;
12894 ins->inst_destbasereg = var->inst_basereg;
12895 ins->inst_offset = var->inst_offset;
12899 tmp_reg = ins->dreg;
12900 ins->dreg = ins->sreg2;
12901 ins->sreg2 = tmp_reg;
12904 spec2 [MONO_INST_DEST] = ' ';
12905 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
12906 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
12907 spec2 [MONO_INST_SRC3] = ' ';
12909 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
12910 // FIXME: The backends expect the base reg to be in inst_basereg
12911 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
12913 ins->inst_basereg = var->inst_basereg;
12914 ins->inst_offset = var->inst_offset;
12915 spec = INS_INFO (ins->opcode);
12917 /* printf ("INS: "); mono_print_ins (ins); */
12918 /* Create a store instruction */
12919 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
12921 /* Insert it after the instruction */
12922 mono_bblock_insert_after_ins (bb, ins, store_ins);
12924 def_ins = store_ins;
12927 * We can't assign ins->dreg to var->dreg here, since the
12928 * sregs could use it. So set a flag, and do it after
12931 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
12932 dest_has_lvreg = TRUE;
12937 if (def_ins && !live_range_start [dreg]) {
12938 live_range_start [dreg] = def_ins;
12939 live_range_start_bb [dreg] = bb;
12942 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
12945 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
12946 tmp->inst_c1 = dreg;
12947 mono_bblock_insert_after_ins (bb, def_ins, tmp);
12954 num_sregs = mono_inst_get_src_registers (ins, sregs);
12955 for (srcindex = 0; srcindex < 3; ++srcindex) {
12956 regtype = spec [MONO_INST_SRC1 + srcindex];
12957 sreg = sregs [srcindex];
12959 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
12960 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
12961 MonoInst *var = get_vreg_to_inst (cfg, sreg);
12962 MonoInst *use_ins = ins;
12963 MonoInst *load_ins;
12964 guint32 load_opcode;
12966 if (var->opcode == OP_REGVAR) {
12967 sregs [srcindex] = var->dreg;
12968 //mono_inst_set_src_registers (ins, sregs);
12969 live_range_end [sreg] = use_ins;
12970 live_range_end_bb [sreg] = bb;
12972 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
12975 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
12976 /* var->dreg is a hreg */
12977 tmp->inst_c1 = sreg;
12978 mono_bblock_insert_after_ins (bb, ins, tmp);
12984 g_assert (var->opcode == OP_REGOFFSET);
12986 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
12988 g_assert (load_opcode != OP_LOADV_MEMBASE);
12990 if (vreg_to_lvreg [sreg]) {
12991 g_assert (vreg_to_lvreg [sreg] != -1);
12993 /* The variable is already loaded to an lvreg */
12994 if (G_UNLIKELY (cfg->verbose_level > 2))
12995 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
12996 sregs [srcindex] = vreg_to_lvreg [sreg];
12997 //mono_inst_set_src_registers (ins, sregs);
13001 /* Try to fuse the load into the instruction */
13002 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
13003 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
13004 sregs [0] = var->inst_basereg;
13005 //mono_inst_set_src_registers (ins, sregs);
13006 ins->inst_offset = var->inst_offset;
13007 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
13008 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
13009 sregs [1] = var->inst_basereg;
13010 //mono_inst_set_src_registers (ins, sregs);
13011 ins->inst_offset = var->inst_offset;
13013 if (MONO_IS_REAL_MOVE (ins)) {
13014 ins->opcode = OP_NOP;
13017 //printf ("%d ", srcindex); mono_print_ins (ins);
13019 sreg = alloc_dreg (cfg, stacktypes [regtype]);
13021 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
13022 if (var->dreg == prev_dreg) {
13024 * sreg refers to the value loaded by the load
13025 * emitted below, but we need to use ins->dreg
13026 * since it refers to the store emitted earlier.
13030 g_assert (sreg != -1);
13031 vreg_to_lvreg [var->dreg] = sreg;
13032 g_assert (lvregs_len < 1024);
13033 lvregs [lvregs_len ++] = var->dreg;
13037 sregs [srcindex] = sreg;
13038 //mono_inst_set_src_registers (ins, sregs);
13040 #if SIZEOF_REGISTER != 8
13041 if (regtype == 'l') {
13042 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
13043 mono_bblock_insert_before_ins (bb, ins, load_ins);
13044 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
13045 mono_bblock_insert_before_ins (bb, ins, load_ins);
13046 use_ins = load_ins;
13051 #if SIZEOF_REGISTER == 4
13052 g_assert (load_opcode != OP_LOADI8_MEMBASE);
13054 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
13055 mono_bblock_insert_before_ins (bb, ins, load_ins);
13056 use_ins = load_ins;
13060 if (var->dreg < orig_next_vreg) {
13061 live_range_end [var->dreg] = use_ins;
13062 live_range_end_bb [var->dreg] = bb;
13065 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
13068 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
13069 tmp->inst_c1 = var->dreg;
13070 mono_bblock_insert_after_ins (bb, ins, tmp);
13074 mono_inst_set_src_registers (ins, sregs);
13076 if (dest_has_lvreg) {
13077 g_assert (ins->dreg != -1);
13078 vreg_to_lvreg [prev_dreg] = ins->dreg;
13079 g_assert (lvregs_len < 1024);
13080 lvregs [lvregs_len ++] = prev_dreg;
13081 dest_has_lvreg = FALSE;
13085 tmp_reg = ins->dreg;
13086 ins->dreg = ins->sreg2;
13087 ins->sreg2 = tmp_reg;
13090 if (MONO_IS_CALL (ins)) {
13091 /* Clear vreg_to_lvreg array */
13092 for (i = 0; i < lvregs_len; i++)
13093 vreg_to_lvreg [lvregs [i]] = 0;
13095 } else if (ins->opcode == OP_NOP) {
13097 MONO_INST_NULLIFY_SREGS (ins);
13100 if (cfg->verbose_level > 2)
13101 mono_print_ins_index (1, ins);
13104 /* Extend the live range based on the liveness info */
13105 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
13106 for (i = 0; i < cfg->num_varinfo; i ++) {
13107 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
13109 if (vreg_is_volatile (cfg, vi->vreg))
13110 /* The liveness info is incomplete */
13113 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
13114 /* Live from at least the first ins of this bb */
13115 live_range_start [vi->vreg] = bb->code;
13116 live_range_start_bb [vi->vreg] = bb;
13119 if (mono_bitset_test_fast (bb->live_out_set, i)) {
13120 /* Live at least until the last ins of this bb */
13121 live_range_end [vi->vreg] = bb->last_ins;
13122 live_range_end_bb [vi->vreg] = bb;
13128 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
13130 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
13131 * by storing the current native offset into MonoMethodVar->live_range_start/end.
13133 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
13134 for (i = 0; i < cfg->num_varinfo; ++i) {
13135 int vreg = MONO_VARINFO (cfg, i)->vreg;
13138 if (live_range_start [vreg]) {
13139 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
13141 ins->inst_c1 = vreg;
13142 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
13144 if (live_range_end [vreg]) {
13145 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
13147 ins->inst_c1 = vreg;
13148 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
13149 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
13151 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
13157 if (cfg->gsharedvt_locals_var_ins) {
13158 /* Nullify if unused */
13159 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
13160 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
13163 g_free (live_range_start);
13164 g_free (live_range_end);
13165 g_free (live_range_start_bb);
13166 g_free (live_range_end_bb);
13171 * - use 'iadd' instead of 'int_add'
13172 * - handling ovf opcodes: decompose in method_to_ir.
13173 * - unify iregs/fregs
13174 * -> partly done, the missing parts are:
13175 * - a more complete unification would involve unifying the hregs as well, so
13176 * code wouldn't need if (fp) all over the place. but that would mean the hregs
13177 * would no longer map to the machine hregs, so the code generators would need to
13178 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
13179 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
13180 * fp/non-fp branches speeds it up by about 15%.
13181 * - use sext/zext opcodes instead of shifts
13183 * - get rid of TEMPLOADs if possible and use vregs instead
13184 * - clean up usage of OP_P/OP_ opcodes
13185 * - cleanup usage of DUMMY_USE
13186 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
13188 * - set the stack type and allocate a dreg in the EMIT_NEW macros
13189 * - get rid of all the <foo>2 stuff when the new JIT is ready.
13190 * - make sure handle_stack_args () is called before the branch is emitted
13191 * - when the new IR is done, get rid of all unused stuff
13192 * - COMPARE/BEQ as separate instructions or unify them ?
13193 * - keeping them separate allows specialized compare instructions like
13194 * compare_imm, compare_membase
13195 * - most back ends unify fp compare+branch, fp compare+ceq
13196 * - integrate mono_save_args into inline_method
13197 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
13198 * - handle long shift opts on 32 bit platforms somehow: they require
13199 * 3 sregs (2 for arg1 and 1 for arg2)
13200 * - make byref a 'normal' type.
13201 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
13202 * variable if needed.
13203 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
13204 * like inline_method.
13205 * - remove inlining restrictions
13206 * - fix LNEG and enable cfold of INEG
13207 * - generalize x86 optimizations like ldelema as a peephole optimization
13208 * - add store_mem_imm for amd64
13209 * - optimize the loading of the interruption flag in the managed->native wrappers
13210 * - avoid special handling of OP_NOP in passes
13211 * - move code inserting instructions into one function/macro.
13212 * - try a coalescing phase after liveness analysis
13213 * - add float -> vreg conversion + local optimizations on !x86
13214 * - figure out how to handle decomposed branches during optimizations, ie.
13215 * compare+branch, op_jump_table+op_br etc.
13216 * - promote RuntimeXHandles to vregs
13217 * - vtype cleanups:
13218 * - add a NEW_VARLOADA_VREG macro
13219 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
13220 * accessing vtype fields.
13221 * - get rid of I8CONST on 64 bit platforms
13222 * - dealing with the increase in code size due to branches created during opcode
13224 * - use extended basic blocks
13225 * - all parts of the JIT
13226 * - handle_global_vregs () && local regalloc
13227 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
13228 * - sources of increase in code size:
13231 * - isinst and castclass
13232 * - lvregs not allocated to global registers even if used multiple times
13233 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
13235 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
13236 * - add all micro optimizations from the old JIT
13237 * - put tree optimizations into the deadce pass
13238 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
13239 * specific function.
13240 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
13241 * fcompare + branchCC.
13242 * - create a helper function for allocating a stack slot, taking into account
13243 * MONO_CFG_HAS_SPILLUP.
13245 * - merge the ia64 switch changes.
13246 * - optimize mono_regstate2_alloc_int/float.
13247 * - fix the pessimistic handling of variables accessed in exception handler blocks.
13248 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
13249 * parts of the tree could be separated by other instructions, killing the tree
13250 * arguments, or stores killing loads etc. Also, should we fold loads into other
13251 * instructions if the result of the load is used multiple times ?
13252 * - make the REM_IMM optimization in mini-x86.c arch-independent.
13253 * - LAST MERGE: 108395.
13254 * - when returning vtypes in registers, generate IR and append it to the end of the
13255 * last bb instead of doing it in the epilog.
13256 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
13264 - When to decompose opcodes:
13265 - earlier: this makes some optimizations hard to implement, since the low level IR
13266 no longer contains the neccessary information. But it is easier to do.
13267 - later: harder to implement, enables more optimizations.
13268 - Branches inside bblocks:
13269 - created when decomposing complex opcodes.
13270 - branches to another bblock: harmless, but not tracked by the branch
13271 optimizations, so need to branch to a label at the start of the bblock.
13272 - branches to inside the same bblock: very problematic, trips up the local
13273 reg allocator. Can be fixed by spitting the current bblock, but that is a
13274 complex operation, since some local vregs can become global vregs etc.
13275 - Local/global vregs:
13276 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
13277 local register allocator.
13278 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
13279 structure, created by mono_create_var (). Assigned to hregs or the stack by
13280 the global register allocator.
13281 - When to do optimizations like alu->alu_imm:
13282 - earlier -> saves work later on since the IR will be smaller/simpler
13283 - later -> can work on more instructions
13284 - Handling of valuetypes:
13285 - When a vtype is pushed on the stack, a new temporary is created, an
13286 instruction computing its address (LDADDR) is emitted and pushed on
13287 the stack. Need to optimize cases when the vtype is used immediately as in
13288 argument passing, stloc etc.
13289 - Instead of the to_end stuff in the old JIT, simply call the function handling
13290 the values on the stack before emitting the last instruction of the bb.
13293 #endif /* DISABLE_JIT */