2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/assembly.h>
38 #include <mono/metadata/attrdefs.h>
39 #include <mono/metadata/loader.h>
40 #include <mono/metadata/tabledefs.h>
41 #include <mono/metadata/class.h>
42 #include <mono/metadata/object.h>
43 #include <mono/metadata/exception.h>
44 #include <mono/metadata/opcodes.h>
45 #include <mono/metadata/mono-endian.h>
46 #include <mono/metadata/tokentype.h>
47 #include <mono/metadata/tabledefs.h>
48 #include <mono/metadata/marshal.h>
49 #include <mono/metadata/debug-helpers.h>
50 #include <mono/metadata/mono-debug.h>
51 #include <mono/metadata/gc-internal.h>
52 #include <mono/metadata/security-manager.h>
53 #include <mono/metadata/threads-types.h>
54 #include <mono/metadata/security-core-clr.h>
55 #include <mono/metadata/monitor.h>
56 #include <mono/metadata/profiler-private.h>
57 #include <mono/metadata/profiler.h>
58 #include <mono/metadata/debug-mono-symfile.h>
59 #include <mono/utils/mono-compiler.h>
60 #include <mono/utils/mono-memory-model.h>
61 #include <mono/metadata/mono-basic-block.h>
68 #include "jit-icalls.h"
70 #include "debugger-agent.h"
72 #define BRANCH_COST 10
73 #define INLINE_LENGTH_LIMIT 20
74 #define INLINE_FAILURE(msg) do { \
75 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE)) { \
76 if (cfg->verbose_level >= 2) \
77 printf ("inline failed: %s\n", msg); \
78 goto inline_failure; \
81 #define CHECK_CFG_EXCEPTION do {\
82 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
85 #define METHOD_ACCESS_FAILURE do { \
86 char *method_fname = mono_method_full_name (method, TRUE); \
87 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
88 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS); \
89 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
90 g_free (method_fname); \
91 g_free (cil_method_fname); \
92 goto exception_exit; \
94 #define FIELD_ACCESS_FAILURE do { \
95 char *method_fname = mono_method_full_name (method, TRUE); \
96 char *field_fname = mono_field_full_name (field); \
97 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS); \
98 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
99 g_free (method_fname); \
100 g_free (field_fname); \
101 goto exception_exit; \
103 #define GENERIC_SHARING_FAILURE(opcode) do { \
104 if (cfg->generic_sharing_context) { \
105 if (cfg->verbose_level > 2) \
106 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
107 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
108 goto exception_exit; \
111 #define GSHAREDVT_FAILURE(opcode) do { \
112 if (cfg->gsharedvt) { \
113 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __FILE__, __LINE__); \
114 if (cfg->verbose_level >= 2) \
115 printf ("%s\n", cfg->exception_message); \
116 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
117 goto exception_exit; \
120 #define OUT_OF_MEMORY_FAILURE do { \
121 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
122 goto exception_exit; \
124 /* Determine whenever 'ins' represents a load of the 'this' argument */
125 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
127 static int ldind_to_load_membase (int opcode);
128 static int stind_to_store_membase (int opcode);
130 int mono_op_to_op_imm (int opcode);
131 int mono_op_to_op_imm_noemul (int opcode);
133 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
135 /* helper methods signatures */
136 static MonoMethodSignature *helper_sig_class_init_trampoline = NULL;
137 static MonoMethodSignature *helper_sig_domain_get = NULL;
138 static MonoMethodSignature *helper_sig_generic_class_init_trampoline = NULL;
139 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm = NULL;
140 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline = NULL;
141 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline = NULL;
142 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm = NULL;
145 * Instruction metadata
153 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
154 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
160 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
165 /* keep in sync with the enum in mini.h */
168 #include "mini-ops.h"
173 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
174 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
176 * This should contain the index of the last sreg + 1. This is not the same
177 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
179 const gint8 ins_sreg_counts[] = {
180 #include "mini-ops.h"
185 #define MONO_INIT_VARINFO(vi,id) do { \
186 (vi)->range.first_use.pos.bid = 0xffff; \
192 mono_inst_set_src_registers (MonoInst *ins, int *regs)
194 ins->sreg1 = regs [0];
195 ins->sreg2 = regs [1];
196 ins->sreg3 = regs [2];
200 mono_alloc_ireg (MonoCompile *cfg)
202 return alloc_ireg (cfg);
206 mono_alloc_freg (MonoCompile *cfg)
208 return alloc_freg (cfg);
212 mono_alloc_preg (MonoCompile *cfg)
214 return alloc_preg (cfg);
218 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
220 return alloc_dreg (cfg, stack_type);
224 * mono_alloc_ireg_ref:
226 * Allocate an IREG, and mark it as holding a GC ref.
229 mono_alloc_ireg_ref (MonoCompile *cfg)
231 return alloc_ireg_ref (cfg);
235 * mono_alloc_ireg_mp:
237 * Allocate an IREG, and mark it as holding a managed pointer.
240 mono_alloc_ireg_mp (MonoCompile *cfg)
242 return alloc_ireg_mp (cfg);
246 * mono_alloc_ireg_copy:
248 * Allocate an IREG with the same GC type as VREG.
251 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
253 if (vreg_is_ref (cfg, vreg))
254 return alloc_ireg_ref (cfg);
255 else if (vreg_is_mp (cfg, vreg))
256 return alloc_ireg_mp (cfg);
258 return alloc_ireg (cfg);
262 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
268 switch (type->type) {
271 case MONO_TYPE_BOOLEAN:
283 case MONO_TYPE_FNPTR:
285 case MONO_TYPE_CLASS:
286 case MONO_TYPE_STRING:
287 case MONO_TYPE_OBJECT:
288 case MONO_TYPE_SZARRAY:
289 case MONO_TYPE_ARRAY:
293 #if SIZEOF_REGISTER == 8
302 case MONO_TYPE_VALUETYPE:
303 if (type->data.klass->enumtype) {
304 type = mono_class_enum_basetype (type->data.klass);
307 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
310 case MONO_TYPE_TYPEDBYREF:
312 case MONO_TYPE_GENERICINST:
313 type = &type->data.generic_class->container_class->byval_arg;
317 g_assert (cfg->generic_sharing_context);
318 if (mini_type_var_is_vt (cfg, type))
323 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
329 mono_print_bb (MonoBasicBlock *bb, const char *msg)
334 printf ("\n%s %d: [IN: ", msg, bb->block_num);
335 for (i = 0; i < bb->in_count; ++i)
336 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
338 for (i = 0; i < bb->out_count; ++i)
339 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
341 for (tree = bb->code; tree; tree = tree->next)
342 mono_print_ins_index (-1, tree);
346 mono_create_helper_signatures (void)
348 helper_sig_domain_get = mono_create_icall_signature ("ptr");
349 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
350 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
351 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
352 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
353 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
354 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
358 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
359 * foo<T> (int i) { ldarg.0; box T; }
361 #define UNVERIFIED do { \
362 if (cfg->gsharedvt) { \
363 if (cfg->verbose_level > 2) \
364 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
365 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
366 goto exception_exit; \
368 if (mini_get_debug_options ()->break_on_unverified) \
374 #define LOAD_ERROR do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto load_error; } while (0)
376 #define TYPE_LOAD_ERROR(klass) do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else { cfg->exception_ptr = klass; goto load_error; } } while (0)
378 #define GET_BBLOCK(cfg,tblock,ip) do { \
379 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
381 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
382 NEW_BBLOCK (cfg, (tblock)); \
383 (tblock)->cil_code = (ip); \
384 ADD_BBLOCK (cfg, (tblock)); \
388 #if defined(TARGET_X86) || defined(TARGET_AMD64)
389 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
390 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
391 (dest)->dreg = alloc_ireg_mp ((cfg)); \
392 (dest)->sreg1 = (sr1); \
393 (dest)->sreg2 = (sr2); \
394 (dest)->inst_imm = (imm); \
395 (dest)->backend.shift_amount = (shift); \
396 MONO_ADD_INS ((cfg)->cbb, (dest)); \
400 #if SIZEOF_REGISTER == 8
401 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
402 /* FIXME: Need to add many more cases */ \
403 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
405 int dr = alloc_preg (cfg); \
406 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
407 (ins)->sreg2 = widen->dreg; \
411 #define ADD_WIDEN_OP(ins, arg1, arg2)
414 #define ADD_BINOP(op) do { \
415 MONO_INST_NEW (cfg, ins, (op)); \
417 ins->sreg1 = sp [0]->dreg; \
418 ins->sreg2 = sp [1]->dreg; \
419 type_from_op (ins, sp [0], sp [1]); \
421 /* Have to insert a widening op */ \
422 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
423 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
424 MONO_ADD_INS ((cfg)->cbb, (ins)); \
425 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
428 #define ADD_UNOP(op) do { \
429 MONO_INST_NEW (cfg, ins, (op)); \
431 ins->sreg1 = sp [0]->dreg; \
432 type_from_op (ins, sp [0], NULL); \
434 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
435 MONO_ADD_INS ((cfg)->cbb, (ins)); \
436 *sp++ = mono_decompose_opcode (cfg, ins); \
439 #define ADD_BINCOND(next_block) do { \
442 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
443 cmp->sreg1 = sp [0]->dreg; \
444 cmp->sreg2 = sp [1]->dreg; \
445 type_from_op (cmp, sp [0], sp [1]); \
447 type_from_op (ins, sp [0], sp [1]); \
448 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
449 GET_BBLOCK (cfg, tblock, target); \
450 link_bblock (cfg, bblock, tblock); \
451 ins->inst_true_bb = tblock; \
452 if ((next_block)) { \
453 link_bblock (cfg, bblock, (next_block)); \
454 ins->inst_false_bb = (next_block); \
455 start_new_bblock = 1; \
457 GET_BBLOCK (cfg, tblock, ip); \
458 link_bblock (cfg, bblock, tblock); \
459 ins->inst_false_bb = tblock; \
460 start_new_bblock = 2; \
462 if (sp != stack_start) { \
463 handle_stack_args (cfg, stack_start, sp - stack_start); \
464 CHECK_UNVERIFIABLE (cfg); \
466 MONO_ADD_INS (bblock, cmp); \
467 MONO_ADD_INS (bblock, ins); \
471 * link_bblock: Links two basic blocks
473 * links two basic blocks in the control flow graph, the 'from'
474 * argument is the starting block and the 'to' argument is the block
475 * the control flow ends to after 'from'.
478 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
480 MonoBasicBlock **newa;
484 if (from->cil_code) {
486 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
488 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
491 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
493 printf ("edge from entry to exit\n");
498 for (i = 0; i < from->out_count; ++i) {
499 if (to == from->out_bb [i]) {
505 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
506 for (i = 0; i < from->out_count; ++i) {
507 newa [i] = from->out_bb [i];
515 for (i = 0; i < to->in_count; ++i) {
516 if (from == to->in_bb [i]) {
522 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
523 for (i = 0; i < to->in_count; ++i) {
524 newa [i] = to->in_bb [i];
533 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
535 link_bblock (cfg, from, to);
539 * mono_find_block_region:
541 * We mark each basic block with a region ID. We use that to avoid BB
542 * optimizations when blocks are in different regions.
545 * A region token that encodes where this region is, and information
546 * about the clause owner for this block.
548 * The region encodes the try/catch/filter clause that owns this block
549 * as well as the type. -1 is a special value that represents a block
550 * that is in none of try/catch/filter.
553 mono_find_block_region (MonoCompile *cfg, int offset)
555 MonoMethodHeader *header = cfg->header;
556 MonoExceptionClause *clause;
559 for (i = 0; i < header->num_clauses; ++i) {
560 clause = &header->clauses [i];
561 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
562 (offset < (clause->handler_offset)))
563 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
565 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
566 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
567 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
568 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
569 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
571 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
574 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
575 return ((i + 1) << 8) | clause->flags;
582 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
584 MonoMethodHeader *header = cfg->header;
585 MonoExceptionClause *clause;
589 for (i = 0; i < header->num_clauses; ++i) {
590 clause = &header->clauses [i];
591 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
592 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
593 if (clause->flags == type)
594 res = g_list_append (res, clause);
601 mono_create_spvar_for_region (MonoCompile *cfg, int region)
605 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
609 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
610 /* prevent it from being register allocated */
611 var->flags |= MONO_INST_INDIRECT;
613 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
617 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
619 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
623 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
627 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
631 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
632 /* prevent it from being register allocated */
633 var->flags |= MONO_INST_INDIRECT;
635 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
641 * Returns the type used in the eval stack when @type is loaded.
642 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
645 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
649 inst->klass = klass = mono_class_from_mono_type (type);
651 inst->type = STACK_MP;
656 switch (type->type) {
658 inst->type = STACK_INV;
662 case MONO_TYPE_BOOLEAN:
668 inst->type = STACK_I4;
673 case MONO_TYPE_FNPTR:
674 inst->type = STACK_PTR;
676 case MONO_TYPE_CLASS:
677 case MONO_TYPE_STRING:
678 case MONO_TYPE_OBJECT:
679 case MONO_TYPE_SZARRAY:
680 case MONO_TYPE_ARRAY:
681 inst->type = STACK_OBJ;
685 inst->type = STACK_I8;
689 inst->type = STACK_R8;
691 case MONO_TYPE_VALUETYPE:
692 if (type->data.klass->enumtype) {
693 type = mono_class_enum_basetype (type->data.klass);
697 inst->type = STACK_VTYPE;
700 case MONO_TYPE_TYPEDBYREF:
701 inst->klass = mono_defaults.typed_reference_class;
702 inst->type = STACK_VTYPE;
704 case MONO_TYPE_GENERICINST:
705 type = &type->data.generic_class->container_class->byval_arg;
709 g_assert (cfg->generic_sharing_context);
710 if (mini_is_gsharedvt_type (cfg, type)) {
711 g_assert (cfg->gsharedvt);
712 inst->type = STACK_VTYPE;
714 inst->type = STACK_OBJ;
718 g_error ("unknown type 0x%02x in eval stack type", type->type);
723 * The following tables are used to quickly validate the IL code in type_from_op ().
726 bin_num_table [STACK_MAX] [STACK_MAX] = {
727 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
728 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
729 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
730 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
731 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
732 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
733 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
734 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
739 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
742 /* reduce the size of this table */
744 bin_int_table [STACK_MAX] [STACK_MAX] = {
745 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
746 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
747 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
748 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
749 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
750 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
751 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
752 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
756 bin_comp_table [STACK_MAX] [STACK_MAX] = {
757 /* Inv i L p F & O vt */
759 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
760 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
761 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
762 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
763 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
764 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
765 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
768 /* reduce the size of this table */
770 shift_table [STACK_MAX] [STACK_MAX] = {
771 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
772 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
773 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
774 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
775 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
776 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
777 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
778 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
782 * Tables to map from the non-specific opcode to the matching
783 * type-specific opcode.
785 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
787 binops_op_map [STACK_MAX] = {
788 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
791 /* handles from CEE_NEG to CEE_CONV_U8 */
793 unops_op_map [STACK_MAX] = {
794 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
797 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
799 ovfops_op_map [STACK_MAX] = {
800 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
803 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
805 ovf2ops_op_map [STACK_MAX] = {
806 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
809 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
811 ovf3ops_op_map [STACK_MAX] = {
812 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
815 /* handles from CEE_BEQ to CEE_BLT_UN */
817 beqops_op_map [STACK_MAX] = {
818 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
821 /* handles from CEE_CEQ to CEE_CLT_UN */
823 ceqops_op_map [STACK_MAX] = {
824 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
828 * Sets ins->type (the type on the eval stack) according to the
829 * type of the opcode and the arguments to it.
830 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
832 * FIXME: this function sets ins->type unconditionally in some cases, but
833 * it should set it to invalid for some types (a conv.x on an object)
836 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
838 switch (ins->opcode) {
845 /* FIXME: check unverifiable args for STACK_MP */
846 ins->type = bin_num_table [src1->type] [src2->type];
847 ins->opcode += binops_op_map [ins->type];
854 ins->type = bin_int_table [src1->type] [src2->type];
855 ins->opcode += binops_op_map [ins->type];
860 ins->type = shift_table [src1->type] [src2->type];
861 ins->opcode += binops_op_map [ins->type];
866 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
867 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
868 ins->opcode = OP_LCOMPARE;
869 else if (src1->type == STACK_R8)
870 ins->opcode = OP_FCOMPARE;
872 ins->opcode = OP_ICOMPARE;
874 case OP_ICOMPARE_IMM:
875 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
876 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
877 ins->opcode = OP_LCOMPARE_IMM;
889 ins->opcode += beqops_op_map [src1->type];
892 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
893 ins->opcode += ceqops_op_map [src1->type];
899 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
900 ins->opcode += ceqops_op_map [src1->type];
904 ins->type = neg_table [src1->type];
905 ins->opcode += unops_op_map [ins->type];
908 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
909 ins->type = src1->type;
911 ins->type = STACK_INV;
912 ins->opcode += unops_op_map [ins->type];
918 ins->type = STACK_I4;
919 ins->opcode += unops_op_map [src1->type];
922 ins->type = STACK_R8;
923 switch (src1->type) {
926 ins->opcode = OP_ICONV_TO_R_UN;
929 ins->opcode = OP_LCONV_TO_R_UN;
933 case CEE_CONV_OVF_I1:
934 case CEE_CONV_OVF_U1:
935 case CEE_CONV_OVF_I2:
936 case CEE_CONV_OVF_U2:
937 case CEE_CONV_OVF_I4:
938 case CEE_CONV_OVF_U4:
939 ins->type = STACK_I4;
940 ins->opcode += ovf3ops_op_map [src1->type];
942 case CEE_CONV_OVF_I_UN:
943 case CEE_CONV_OVF_U_UN:
944 ins->type = STACK_PTR;
945 ins->opcode += ovf2ops_op_map [src1->type];
947 case CEE_CONV_OVF_I1_UN:
948 case CEE_CONV_OVF_I2_UN:
949 case CEE_CONV_OVF_I4_UN:
950 case CEE_CONV_OVF_U1_UN:
951 case CEE_CONV_OVF_U2_UN:
952 case CEE_CONV_OVF_U4_UN:
953 ins->type = STACK_I4;
954 ins->opcode += ovf2ops_op_map [src1->type];
957 ins->type = STACK_PTR;
958 switch (src1->type) {
960 ins->opcode = OP_ICONV_TO_U;
964 #if SIZEOF_VOID_P == 8
965 ins->opcode = OP_LCONV_TO_U;
967 ins->opcode = OP_MOVE;
971 ins->opcode = OP_LCONV_TO_U;
974 ins->opcode = OP_FCONV_TO_U;
980 ins->type = STACK_I8;
981 ins->opcode += unops_op_map [src1->type];
983 case CEE_CONV_OVF_I8:
984 case CEE_CONV_OVF_U8:
985 ins->type = STACK_I8;
986 ins->opcode += ovf3ops_op_map [src1->type];
988 case CEE_CONV_OVF_U8_UN:
989 case CEE_CONV_OVF_I8_UN:
990 ins->type = STACK_I8;
991 ins->opcode += ovf2ops_op_map [src1->type];
995 ins->type = STACK_R8;
996 ins->opcode += unops_op_map [src1->type];
999 ins->type = STACK_R8;
1003 ins->type = STACK_I4;
1004 ins->opcode += ovfops_op_map [src1->type];
1007 case CEE_CONV_OVF_I:
1008 case CEE_CONV_OVF_U:
1009 ins->type = STACK_PTR;
1010 ins->opcode += ovfops_op_map [src1->type];
1013 case CEE_ADD_OVF_UN:
1015 case CEE_MUL_OVF_UN:
1017 case CEE_SUB_OVF_UN:
1018 ins->type = bin_num_table [src1->type] [src2->type];
1019 ins->opcode += ovfops_op_map [src1->type];
1020 if (ins->type == STACK_R8)
1021 ins->type = STACK_INV;
1023 case OP_LOAD_MEMBASE:
1024 ins->type = STACK_PTR;
1026 case OP_LOADI1_MEMBASE:
1027 case OP_LOADU1_MEMBASE:
1028 case OP_LOADI2_MEMBASE:
1029 case OP_LOADU2_MEMBASE:
1030 case OP_LOADI4_MEMBASE:
1031 case OP_LOADU4_MEMBASE:
1032 ins->type = STACK_PTR;
1034 case OP_LOADI8_MEMBASE:
1035 ins->type = STACK_I8;
1037 case OP_LOADR4_MEMBASE:
1038 case OP_LOADR8_MEMBASE:
1039 ins->type = STACK_R8;
1042 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1046 if (ins->type == STACK_MP)
1047 ins->klass = mono_defaults.object_class;
1052 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1058 param_table [STACK_MAX] [STACK_MAX] = {
1063 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1067 switch (args->type) {
1077 for (i = 0; i < sig->param_count; ++i) {
1078 switch (args [i].type) {
1082 if (!sig->params [i]->byref)
1086 if (sig->params [i]->byref)
1088 switch (sig->params [i]->type) {
1089 case MONO_TYPE_CLASS:
1090 case MONO_TYPE_STRING:
1091 case MONO_TYPE_OBJECT:
1092 case MONO_TYPE_SZARRAY:
1093 case MONO_TYPE_ARRAY:
1100 if (sig->params [i]->byref)
1102 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1111 /*if (!param_table [args [i].type] [sig->params [i]->type])
1119 * When we need a pointer to the current domain many times in a method, we
1120 * call mono_domain_get() once and we store the result in a local variable.
1121 * This function returns the variable that represents the MonoDomain*.
1123 inline static MonoInst *
1124 mono_get_domainvar (MonoCompile *cfg)
1126 if (!cfg->domainvar)
1127 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1128 return cfg->domainvar;
1132 * The got_var contains the address of the Global Offset Table when AOT
1136 mono_get_got_var (MonoCompile *cfg)
1138 #ifdef MONO_ARCH_NEED_GOT_VAR
1139 if (!cfg->compile_aot)
1141 if (!cfg->got_var) {
1142 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1144 return cfg->got_var;
1151 mono_get_vtable_var (MonoCompile *cfg)
1153 g_assert (cfg->generic_sharing_context);
1155 if (!cfg->rgctx_var) {
1156 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1157 /* force the var to be stack allocated */
1158 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1161 return cfg->rgctx_var;
1165 type_from_stack_type (MonoInst *ins) {
1166 switch (ins->type) {
1167 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1168 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1169 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1170 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1172 return &ins->klass->this_arg;
1173 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1174 case STACK_VTYPE: return &ins->klass->byval_arg;
1176 g_error ("stack type %d to monotype not handled\n", ins->type);
1181 static G_GNUC_UNUSED int
1182 type_to_stack_type (MonoType *t)
1184 t = mono_type_get_underlying_type (t);
1188 case MONO_TYPE_BOOLEAN:
1191 case MONO_TYPE_CHAR:
1198 case MONO_TYPE_FNPTR:
1200 case MONO_TYPE_CLASS:
1201 case MONO_TYPE_STRING:
1202 case MONO_TYPE_OBJECT:
1203 case MONO_TYPE_SZARRAY:
1204 case MONO_TYPE_ARRAY:
1212 case MONO_TYPE_VALUETYPE:
1213 case MONO_TYPE_TYPEDBYREF:
1215 case MONO_TYPE_GENERICINST:
1216 if (mono_type_generic_inst_is_valuetype (t))
1222 g_assert_not_reached ();
1229 array_access_to_klass (int opcode)
1233 return mono_defaults.byte_class;
1235 return mono_defaults.uint16_class;
1238 return mono_defaults.int_class;
1241 return mono_defaults.sbyte_class;
1244 return mono_defaults.int16_class;
1247 return mono_defaults.int32_class;
1249 return mono_defaults.uint32_class;
1252 return mono_defaults.int64_class;
1255 return mono_defaults.single_class;
1258 return mono_defaults.double_class;
1259 case CEE_LDELEM_REF:
1260 case CEE_STELEM_REF:
1261 return mono_defaults.object_class;
1263 g_assert_not_reached ();
1269 * We try to share variables when possible
1272 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1277 /* inlining can result in deeper stacks */
1278 if (slot >= cfg->header->max_stack)
1279 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1281 pos = ins->type - 1 + slot * STACK_MAX;
1283 switch (ins->type) {
1290 if ((vnum = cfg->intvars [pos]))
1291 return cfg->varinfo [vnum];
1292 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1293 cfg->intvars [pos] = res->inst_c0;
1296 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1302 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1305 * Don't use this if a generic_context is set, since that means AOT can't
1306 * look up the method using just the image+token.
1307 * table == 0 means this is a reference made from a wrapper.
1309 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1310 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1311 jump_info_token->image = image;
1312 jump_info_token->token = token;
1313 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1318 * This function is called to handle items that are left on the evaluation stack
1319 * at basic block boundaries. What happens is that we save the values to local variables
1320 * and we reload them later when first entering the target basic block (with the
1321 * handle_loaded_temps () function).
1322 * A single joint point will use the same variables (stored in the array bb->out_stack or
1323 * bb->in_stack, if the basic block is before or after the joint point).
1325 * This function needs to be called _before_ emitting the last instruction of
1326 * the bb (i.e. before emitting a branch).
1327 * If the stack merge fails at a join point, cfg->unverifiable is set.
1330 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1333 MonoBasicBlock *bb = cfg->cbb;
1334 MonoBasicBlock *outb;
1335 MonoInst *inst, **locals;
1340 if (cfg->verbose_level > 3)
1341 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1342 if (!bb->out_scount) {
1343 bb->out_scount = count;
1344 //printf ("bblock %d has out:", bb->block_num);
1346 for (i = 0; i < bb->out_count; ++i) {
1347 outb = bb->out_bb [i];
1348 /* exception handlers are linked, but they should not be considered for stack args */
1349 if (outb->flags & BB_EXCEPTION_HANDLER)
1351 //printf (" %d", outb->block_num);
1352 if (outb->in_stack) {
1354 bb->out_stack = outb->in_stack;
1360 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1361 for (i = 0; i < count; ++i) {
1363 * try to reuse temps already allocated for this purpouse, if they occupy the same
1364 * stack slot and if they are of the same type.
1365 * This won't cause conflicts since if 'local' is used to
1366 * store one of the values in the in_stack of a bblock, then
1367 * the same variable will be used for the same outgoing stack
1369 * This doesn't work when inlining methods, since the bblocks
1370 * in the inlined methods do not inherit their in_stack from
1371 * the bblock they are inlined to. See bug #58863 for an
1374 if (cfg->inlined_method)
1375 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1377 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1382 for (i = 0; i < bb->out_count; ++i) {
1383 outb = bb->out_bb [i];
1384 /* exception handlers are linked, but they should not be considered for stack args */
1385 if (outb->flags & BB_EXCEPTION_HANDLER)
1387 if (outb->in_scount) {
1388 if (outb->in_scount != bb->out_scount) {
1389 cfg->unverifiable = TRUE;
1392 continue; /* check they are the same locals */
1394 outb->in_scount = count;
1395 outb->in_stack = bb->out_stack;
1398 locals = bb->out_stack;
1400 for (i = 0; i < count; ++i) {
1401 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1402 inst->cil_code = sp [i]->cil_code;
1403 sp [i] = locals [i];
1404 if (cfg->verbose_level > 3)
1405 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1409 * It is possible that the out bblocks already have in_stack assigned, and
1410 * the in_stacks differ. In this case, we will store to all the different
1417 /* Find a bblock which has a different in_stack */
1419 while (bindex < bb->out_count) {
1420 outb = bb->out_bb [bindex];
1421 /* exception handlers are linked, but they should not be considered for stack args */
1422 if (outb->flags & BB_EXCEPTION_HANDLER) {
1426 if (outb->in_stack != locals) {
1427 for (i = 0; i < count; ++i) {
1428 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1429 inst->cil_code = sp [i]->cil_code;
1430 sp [i] = locals [i];
1431 if (cfg->verbose_level > 3)
1432 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1434 locals = outb->in_stack;
1443 /* Emit code which loads interface_offsets [klass->interface_id]
1444 * The array is stored in memory before vtable.
1447 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1449 if (cfg->compile_aot) {
1450 int ioffset_reg = alloc_preg (cfg);
1451 int iid_reg = alloc_preg (cfg);
1453 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1454 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1455 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1458 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1463 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1465 int ibitmap_reg = alloc_preg (cfg);
1466 #ifdef COMPRESSED_INTERFACE_BITMAP
1468 MonoInst *res, *ins;
1469 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1470 MONO_ADD_INS (cfg->cbb, ins);
1472 if (cfg->compile_aot)
1473 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1475 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1476 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1477 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1479 int ibitmap_byte_reg = alloc_preg (cfg);
1481 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1483 if (cfg->compile_aot) {
1484 int iid_reg = alloc_preg (cfg);
1485 int shifted_iid_reg = alloc_preg (cfg);
1486 int ibitmap_byte_address_reg = alloc_preg (cfg);
1487 int masked_iid_reg = alloc_preg (cfg);
1488 int iid_one_bit_reg = alloc_preg (cfg);
1489 int iid_bit_reg = alloc_preg (cfg);
1490 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1491 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1492 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1493 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1494 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1495 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1496 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1497 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1499 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1500 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1506 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1507 * stored in "klass_reg" implements the interface "klass".
1510 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1512 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1516 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1517 * stored in "vtable_reg" implements the interface "klass".
1520 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1522 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1526 * Emit code which checks whenever the interface id of @klass is smaller than
1527 * than the value given by max_iid_reg.
1530 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1531 MonoBasicBlock *false_target)
1533 if (cfg->compile_aot) {
1534 int iid_reg = alloc_preg (cfg);
1535 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1536 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1539 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1541 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1543 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1546 /* Same as above, but obtains max_iid from a vtable */
1548 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1549 MonoBasicBlock *false_target)
1551 int max_iid_reg = alloc_preg (cfg);
1553 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1554 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1557 /* Same as above, but obtains max_iid from a klass */
1559 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1560 MonoBasicBlock *false_target)
1562 int max_iid_reg = alloc_preg (cfg);
1564 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1565 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1569 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1571 int idepth_reg = alloc_preg (cfg);
1572 int stypes_reg = alloc_preg (cfg);
1573 int stype = alloc_preg (cfg);
1575 mono_class_setup_supertypes (klass);
1577 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1578 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1579 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1580 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1582 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1583 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1585 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1586 } else if (cfg->compile_aot) {
1587 int const_reg = alloc_preg (cfg);
1588 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1589 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1591 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1593 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1597 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1599 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1603 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1605 int intf_reg = alloc_preg (cfg);
1607 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1608 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1609 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1611 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1613 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1617 * Variant of the above that takes a register to the class, not the vtable.
1620 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1622 int intf_bit_reg = alloc_preg (cfg);
1624 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1625 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1626 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1628 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1630 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1634 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1637 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1638 } else if (cfg->compile_aot) {
1639 int const_reg = alloc_preg (cfg);
1640 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1641 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1643 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1645 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1649 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1651 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1655 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1657 if (cfg->compile_aot) {
1658 int const_reg = alloc_preg (cfg);
1659 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1660 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1662 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1664 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1668 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1671 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1674 int rank_reg = alloc_preg (cfg);
1675 int eclass_reg = alloc_preg (cfg);
1677 g_assert (!klass_inst);
1678 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1679 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1680 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1681 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1682 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1683 if (klass->cast_class == mono_defaults.object_class) {
1684 int parent_reg = alloc_preg (cfg);
1685 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1686 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1687 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1688 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1689 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1690 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1691 } else if (klass->cast_class == mono_defaults.enum_class) {
1692 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1693 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1694 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1696 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1697 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1700 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1701 /* Check that the object is a vector too */
1702 int bounds_reg = alloc_preg (cfg);
1703 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1704 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1705 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1708 int idepth_reg = alloc_preg (cfg);
1709 int stypes_reg = alloc_preg (cfg);
1710 int stype = alloc_preg (cfg);
1712 mono_class_setup_supertypes (klass);
1714 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1715 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1716 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1717 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1719 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1720 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1721 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1726 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1728 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1732 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1736 g_assert (val == 0);
1741 if ((size <= 4) && (size <= align)) {
1744 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1747 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1750 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1752 #if SIZEOF_REGISTER == 8
1754 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1760 val_reg = alloc_preg (cfg);
1762 if (SIZEOF_REGISTER == 8)
1763 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1765 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1768 /* This could be optimized further if neccesary */
1770 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1777 #if !NO_UNALIGNED_ACCESS
1778 if (SIZEOF_REGISTER == 8) {
1780 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1785 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1793 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1798 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1803 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1810 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1817 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1818 g_assert (size < 10000);
1821 /* This could be optimized further if neccesary */
1823 cur_reg = alloc_preg (cfg);
1824 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1825 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1832 #if !NO_UNALIGNED_ACCESS
1833 if (SIZEOF_REGISTER == 8) {
1835 cur_reg = alloc_preg (cfg);
1836 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1837 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1846 cur_reg = alloc_preg (cfg);
1847 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1848 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1854 cur_reg = alloc_preg (cfg);
1855 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1856 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1862 cur_reg = alloc_preg (cfg);
1863 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1864 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1872 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1875 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1878 type = mini_get_basic_type_from_generic (gsctx, type);
1879 switch (type->type) {
1880 case MONO_TYPE_VOID:
1881 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1884 case MONO_TYPE_BOOLEAN:
1887 case MONO_TYPE_CHAR:
1890 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1894 case MONO_TYPE_FNPTR:
1895 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1896 case MONO_TYPE_CLASS:
1897 case MONO_TYPE_STRING:
1898 case MONO_TYPE_OBJECT:
1899 case MONO_TYPE_SZARRAY:
1900 case MONO_TYPE_ARRAY:
1901 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1904 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1907 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1908 case MONO_TYPE_VALUETYPE:
1909 if (type->data.klass->enumtype) {
1910 type = mono_class_enum_basetype (type->data.klass);
1913 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1914 case MONO_TYPE_TYPEDBYREF:
1915 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1916 case MONO_TYPE_GENERICINST:
1917 type = &type->data.generic_class->container_class->byval_arg;
1920 case MONO_TYPE_MVAR:
1922 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1924 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1930 * target_type_is_incompatible:
1931 * @cfg: MonoCompile context
1933 * Check that the item @arg on the evaluation stack can be stored
1934 * in the target type (can be a local, or field, etc).
1935 * The cfg arg can be used to check if we need verification or just
1938 * Returns: non-0 value if arg can't be stored on a target.
1941 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1943 MonoType *simple_type;
1946 if (target->byref) {
1947 /* FIXME: check that the pointed to types match */
1948 if (arg->type == STACK_MP)
1949 return arg->klass != mono_class_from_mono_type (target);
1950 if (arg->type == STACK_PTR)
1955 simple_type = mono_type_get_underlying_type (target);
1956 switch (simple_type->type) {
1957 case MONO_TYPE_VOID:
1961 case MONO_TYPE_BOOLEAN:
1964 case MONO_TYPE_CHAR:
1967 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1971 /* STACK_MP is needed when setting pinned locals */
1972 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1977 case MONO_TYPE_FNPTR:
1979 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
1980 * in native int. (#688008).
1982 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1985 case MONO_TYPE_CLASS:
1986 case MONO_TYPE_STRING:
1987 case MONO_TYPE_OBJECT:
1988 case MONO_TYPE_SZARRAY:
1989 case MONO_TYPE_ARRAY:
1990 if (arg->type != STACK_OBJ)
1992 /* FIXME: check type compatibility */
1996 if (arg->type != STACK_I8)
2001 if (arg->type != STACK_R8)
2004 case MONO_TYPE_VALUETYPE:
2005 if (arg->type != STACK_VTYPE)
2007 klass = mono_class_from_mono_type (simple_type);
2008 if (klass != arg->klass)
2011 case MONO_TYPE_TYPEDBYREF:
2012 if (arg->type != STACK_VTYPE)
2014 klass = mono_class_from_mono_type (simple_type);
2015 if (klass != arg->klass)
2018 case MONO_TYPE_GENERICINST:
2019 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2020 if (arg->type != STACK_VTYPE)
2022 klass = mono_class_from_mono_type (simple_type);
2023 if (klass != arg->klass)
2027 if (arg->type != STACK_OBJ)
2029 /* FIXME: check type compatibility */
2033 case MONO_TYPE_MVAR:
2034 g_assert (cfg->generic_sharing_context);
2035 if (mini_type_var_is_vt (cfg, simple_type)) {
2036 if (arg->type != STACK_VTYPE)
2039 if (arg->type != STACK_OBJ)
2044 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2050 * Prepare arguments for passing to a function call.
2051 * Return a non-zero value if the arguments can't be passed to the given
2053 * The type checks are not yet complete and some conversions may need
2054 * casts on 32 or 64 bit architectures.
2056 * FIXME: implement this using target_type_is_incompatible ()
2059 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2061 MonoType *simple_type;
2065 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2069 for (i = 0; i < sig->param_count; ++i) {
2070 if (sig->params [i]->byref) {
2071 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2075 simple_type = sig->params [i];
2076 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
2078 switch (simple_type->type) {
2079 case MONO_TYPE_VOID:
2084 case MONO_TYPE_BOOLEAN:
2087 case MONO_TYPE_CHAR:
2090 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2096 case MONO_TYPE_FNPTR:
2097 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2100 case MONO_TYPE_CLASS:
2101 case MONO_TYPE_STRING:
2102 case MONO_TYPE_OBJECT:
2103 case MONO_TYPE_SZARRAY:
2104 case MONO_TYPE_ARRAY:
2105 if (args [i]->type != STACK_OBJ)
2110 if (args [i]->type != STACK_I8)
2115 if (args [i]->type != STACK_R8)
2118 case MONO_TYPE_VALUETYPE:
2119 if (simple_type->data.klass->enumtype) {
2120 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2123 if (args [i]->type != STACK_VTYPE)
2126 case MONO_TYPE_TYPEDBYREF:
2127 if (args [i]->type != STACK_VTYPE)
2130 case MONO_TYPE_GENERICINST:
2131 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2134 case MONO_TYPE_MVAR:
2136 if (args [i]->type != STACK_VTYPE)
2140 g_error ("unknown type 0x%02x in check_call_signature",
2148 callvirt_to_call (int opcode)
2153 case OP_VOIDCALLVIRT:
2162 g_assert_not_reached ();
2169 callvirt_to_call_membase (int opcode)
2173 return OP_CALL_MEMBASE;
2174 case OP_VOIDCALLVIRT:
2175 return OP_VOIDCALL_MEMBASE;
2177 return OP_FCALL_MEMBASE;
2179 return OP_LCALL_MEMBASE;
2181 return OP_VCALL_MEMBASE;
2183 g_assert_not_reached ();
2189 #ifdef MONO_ARCH_HAVE_IMT
2190 /* Either METHOD or IMT_ARG needs to be set */
2192 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2196 if (COMPILE_LLVM (cfg)) {
2197 method_reg = alloc_preg (cfg);
2200 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2201 } else if (cfg->compile_aot) {
2202 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2205 MONO_INST_NEW (cfg, ins, OP_PCONST);
2206 ins->inst_p0 = method;
2207 ins->dreg = method_reg;
2208 MONO_ADD_INS (cfg->cbb, ins);
2212 call->imt_arg_reg = method_reg;
2214 #ifdef MONO_ARCH_IMT_REG
2215 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2217 /* Need this to keep the IMT arg alive */
2218 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2223 #ifdef MONO_ARCH_IMT_REG
2224 method_reg = alloc_preg (cfg);
2227 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2228 } else if (cfg->compile_aot) {
2229 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2232 MONO_INST_NEW (cfg, ins, OP_PCONST);
2233 ins->inst_p0 = method;
2234 ins->dreg = method_reg;
2235 MONO_ADD_INS (cfg->cbb, ins);
2238 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2240 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2245 static MonoJumpInfo *
2246 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2248 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2252 ji->data.target = target;
2258 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2260 if (cfg->generic_sharing_context)
2261 return mono_class_check_context_used (klass);
2267 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2269 if (cfg->generic_sharing_context)
2270 return mono_method_check_context_used (method);
2276 * check_method_sharing:
2278 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2281 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2283 gboolean pass_vtable = FALSE;
2284 gboolean pass_mrgctx = FALSE;
2286 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2287 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2288 gboolean sharable = FALSE;
2290 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2293 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2294 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
2295 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2297 sharable = sharing_enabled && context_sharable;
2301 * Pass vtable iff target method might
2302 * be shared, which means that sharing
2303 * is enabled for its class and its
2304 * context is sharable (and it's not a
2307 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2311 if (mini_method_get_context (cmethod) &&
2312 mini_method_get_context (cmethod)->method_inst) {
2313 g_assert (!pass_vtable);
2315 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2318 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2319 MonoGenericContext *context = mini_method_get_context (cmethod);
2320 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2322 if (sharing_enabled && context_sharable)
2324 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, mono_method_signature (cmethod)))
2329 if (out_pass_vtable)
2330 *out_pass_vtable = pass_vtable;
2331 if (out_pass_mrgctx)
2332 *out_pass_mrgctx = pass_mrgctx;
2335 inline static MonoCallInst *
2336 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2337 MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
2340 #ifdef MONO_ARCH_SOFT_FLOAT
2345 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2347 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2350 call->signature = sig;
2351 call->rgctx_reg = rgctx;
2353 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2356 if (mini_type_is_vtype (cfg, sig->ret)) {
2357 call->vret_var = cfg->vret_addr;
2358 //g_assert_not_reached ();
2360 } else if (mini_type_is_vtype (cfg, sig->ret)) {
2361 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2364 temp->backend.is_pinvoke = sig->pinvoke;
2367 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2368 * address of return value to increase optimization opportunities.
2369 * Before vtype decomposition, the dreg of the call ins itself represents the
2370 * fact the call modifies the return value. After decomposition, the call will
2371 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2372 * will be transformed into an LDADDR.
2374 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2375 loada->dreg = alloc_preg (cfg);
2376 loada->inst_p0 = temp;
2377 /* We reference the call too since call->dreg could change during optimization */
2378 loada->inst_p1 = call;
2379 MONO_ADD_INS (cfg->cbb, loada);
2381 call->inst.dreg = temp->dreg;
2383 call->vret_var = loada;
2384 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2385 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2387 #ifdef MONO_ARCH_SOFT_FLOAT
2388 if (COMPILE_SOFT_FLOAT (cfg)) {
2390 * If the call has a float argument, we would need to do an r8->r4 conversion using
2391 * an icall, but that cannot be done during the call sequence since it would clobber
2392 * the call registers + the stack. So we do it before emitting the call.
2394 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2396 MonoInst *in = call->args [i];
2398 if (i >= sig->hasthis)
2399 t = sig->params [i - sig->hasthis];
2401 t = &mono_defaults.int_class->byval_arg;
2402 t = mono_type_get_underlying_type (t);
2404 if (!t->byref && t->type == MONO_TYPE_R4) {
2405 MonoInst *iargs [1];
2409 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2411 /* The result will be in an int vreg */
2412 call->args [i] = conv;
2418 call->need_unbox_trampoline = unbox_trampoline;
2421 if (COMPILE_LLVM (cfg))
2422 mono_llvm_emit_call (cfg, call);
2424 mono_arch_emit_call (cfg, call);
2426 mono_arch_emit_call (cfg, call);
2429 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2430 cfg->flags |= MONO_CFG_HAS_CALLS;
2436 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2438 #ifdef MONO_ARCH_RGCTX_REG
2439 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2440 cfg->uses_rgctx_reg = TRUE;
2441 call->rgctx_reg = TRUE;
2443 call->rgctx_arg_reg = rgctx_reg;
2450 inline static MonoInst*
2451 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2457 rgctx_reg = mono_alloc_preg (cfg);
2458 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2461 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2463 call->inst.sreg1 = addr->dreg;
2466 emit_imt_argument (cfg, call, NULL, imt_arg);
2468 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2471 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2473 return (MonoInst*)call;
2477 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2480 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2482 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2485 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2486 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2488 #ifndef DISABLE_REMOTING
2489 gboolean might_be_remote = FALSE;
2491 gboolean virtual = this != NULL;
2492 gboolean enable_for_aot = TRUE;
2496 gboolean need_unbox_trampoline;
2499 sig = mono_method_signature (method);
2502 rgctx_reg = mono_alloc_preg (cfg);
2503 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2506 if (method->string_ctor) {
2507 /* Create the real signature */
2508 /* FIXME: Cache these */
2509 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2510 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2515 context_used = mini_method_check_context_used (cfg, method);
2517 #ifndef DISABLE_REMOTING
2518 might_be_remote = this && sig->hasthis &&
2519 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2520 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2522 if (might_be_remote && context_used) {
2525 g_assert (cfg->generic_sharing_context);
2527 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2529 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2533 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2535 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2537 #ifndef DISABLE_REMOTING
2538 if (might_be_remote)
2539 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2542 call->method = method;
2543 call->inst.flags |= MONO_INST_HAS_METHOD;
2544 call->inst.inst_left = this;
2545 call->tail_call = tail;
2548 int vtable_reg, slot_reg, this_reg;
2551 this_reg = this->dreg;
2553 if (ARCH_HAVE_DELEGATE_TRAMPOLINES && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2554 MonoInst *dummy_use;
2556 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2558 /* Make a call to delegate->invoke_impl */
2559 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2560 call->inst.inst_basereg = this_reg;
2561 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2562 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2564 /* We must emit a dummy use here because the delegate trampoline will
2565 replace the 'this' argument with the delegate target making this activation
2566 no longer a root for the delegate.
2567 This is an issue for delegates that target collectible code such as dynamic
2568 methods of GC'able assemblies.
2570 For a test case look into #667921.
2572 FIXME: a dummy use is not the best way to do it as the local register allocator
2573 will put it on a caller save register and spil it around the call.
2574 Ideally, we would either put it on a callee save register or only do the store part.
2576 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2578 return (MonoInst*)call;
2581 if ((!cfg->compile_aot || enable_for_aot) &&
2582 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2583 (MONO_METHOD_IS_FINAL (method) &&
2584 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2585 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2587 * the method is not virtual, we just need to ensure this is not null
2588 * and then we can call the method directly.
2590 #ifndef DISABLE_REMOTING
2591 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2593 * The check above ensures method is not gshared, this is needed since
2594 * gshared methods can't have wrappers.
2596 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2600 if (!method->string_ctor)
2601 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2603 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2604 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2606 * the method is virtual, but we can statically dispatch since either
2607 * it's class or the method itself are sealed.
2608 * But first we need to ensure it's not a null reference.
2610 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2612 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2614 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2616 vtable_reg = alloc_preg (cfg);
2617 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2618 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2620 #ifdef MONO_ARCH_HAVE_IMT
2622 guint32 imt_slot = mono_method_get_imt_slot (method);
2623 emit_imt_argument (cfg, call, call->method, imt_arg);
2624 slot_reg = vtable_reg;
2625 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2628 if (slot_reg == -1) {
2629 slot_reg = alloc_preg (cfg);
2630 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2631 offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2634 slot_reg = vtable_reg;
2635 offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2636 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2637 #ifdef MONO_ARCH_HAVE_IMT
2639 g_assert (mono_method_signature (method)->generic_param_count);
2640 emit_imt_argument (cfg, call, call->method, imt_arg);
2645 call->inst.sreg1 = slot_reg;
2646 call->inst.inst_offset = offset;
2647 call->virtual = TRUE;
2651 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2654 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2656 return (MonoInst*)call;
2660 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2662 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this, NULL, NULL);
2666 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2673 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2676 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2678 return (MonoInst*)call;
2682 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2684 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2688 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2692 * mono_emit_abs_call:
2694 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2696 inline static MonoInst*
2697 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2698 MonoMethodSignature *sig, MonoInst **args)
2700 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2704 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2707 if (cfg->abs_patches == NULL)
2708 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2709 g_hash_table_insert (cfg->abs_patches, ji, ji);
2710 ins = mono_emit_native_call (cfg, ji, sig, args);
2711 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2716 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2718 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2719 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2723 * Native code might return non register sized integers
2724 * without initializing the upper bits.
2726 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2727 case OP_LOADI1_MEMBASE:
2728 widen_op = OP_ICONV_TO_I1;
2730 case OP_LOADU1_MEMBASE:
2731 widen_op = OP_ICONV_TO_U1;
2733 case OP_LOADI2_MEMBASE:
2734 widen_op = OP_ICONV_TO_I2;
2736 case OP_LOADU2_MEMBASE:
2737 widen_op = OP_ICONV_TO_U2;
2743 if (widen_op != -1) {
2744 int dreg = alloc_preg (cfg);
2747 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2748 widen->type = ins->type;
2758 get_memcpy_method (void)
2760 static MonoMethod *memcpy_method = NULL;
2761 if (!memcpy_method) {
2762 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2764 g_error ("Old corlib found. Install a new one");
2766 return memcpy_method;
2770 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
2772 MonoClassField *field;
2773 gpointer iter = NULL;
2775 while ((field = mono_class_get_fields (klass, &iter))) {
2778 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2780 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2781 if (mini_type_is_reference (cfg, mono_field_get_type (field))) {
2782 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2783 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2785 MonoClass *field_class = mono_class_from_mono_type (field->type);
2786 if (field_class->has_references)
2787 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
2793 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value, int value_reg)
2795 int card_table_shift_bits;
2796 gpointer card_table_mask;
2798 MonoInst *dummy_use;
2799 int nursery_shift_bits;
2800 size_t nursery_size;
2801 gboolean has_card_table_wb = FALSE;
2803 if (!cfg->gen_write_barriers)
2806 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2808 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2810 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
2811 has_card_table_wb = TRUE;
2814 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0) {
2817 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2818 wbarrier->sreg1 = ptr->dreg;
2820 wbarrier->sreg2 = value->dreg;
2822 wbarrier->sreg2 = value_reg;
2823 MONO_ADD_INS (cfg->cbb, wbarrier);
2824 } else if (card_table) {
2825 int offset_reg = alloc_preg (cfg);
2826 int card_reg = alloc_preg (cfg);
2829 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2830 if (card_table_mask)
2831 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
2833 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2834 * IMM's larger than 32bits.
2836 if (cfg->compile_aot) {
2837 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
2839 MONO_INST_NEW (cfg, ins, OP_PCONST);
2840 ins->inst_p0 = card_table;
2841 ins->dreg = card_reg;
2842 MONO_ADD_INS (cfg->cbb, ins);
2845 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
2846 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
2848 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2849 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
2853 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
2855 MONO_INST_NEW (cfg, dummy_use, OP_DUMMY_USE);
2856 dummy_use->sreg1 = value_reg;
2857 MONO_ADD_INS (cfg->cbb, dummy_use);
2862 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
2864 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
2865 unsigned need_wb = 0;
2870 /*types with references can't have alignment smaller than sizeof(void*) */
2871 if (align < SIZEOF_VOID_P)
2874 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
2875 if (size > 32 * SIZEOF_VOID_P)
2878 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
2880 /* We don't unroll more than 5 stores to avoid code bloat. */
2881 if (size > 5 * SIZEOF_VOID_P) {
2882 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
2883 size += (SIZEOF_VOID_P - 1);
2884 size &= ~(SIZEOF_VOID_P - 1);
2886 EMIT_NEW_ICONST (cfg, iargs [2], size);
2887 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
2888 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
2892 destreg = iargs [0]->dreg;
2893 srcreg = iargs [1]->dreg;
2896 dest_ptr_reg = alloc_preg (cfg);
2897 tmp_reg = alloc_preg (cfg);
2900 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
2902 while (size >= SIZEOF_VOID_P) {
2903 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, tmp_reg, srcreg, offset);
2904 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
2907 emit_write_barrier (cfg, iargs [0], NULL, tmp_reg);
2909 offset += SIZEOF_VOID_P;
2910 size -= SIZEOF_VOID_P;
2913 /*tmp += sizeof (void*)*/
2914 if (size >= SIZEOF_VOID_P) {
2915 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
2916 MONO_ADD_INS (cfg->cbb, iargs [0]);
2920 /* Those cannot be references since size < sizeof (void*) */
2922 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
2923 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
2929 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
2930 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
2936 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
2937 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
2946 * Emit code to copy a valuetype of type @klass whose address is stored in
2947 * @src->dreg to memory whose address is stored at @dest->dreg.
2950 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2952 MonoInst *iargs [4];
2953 int context_used, n;
2955 MonoMethod *memcpy_method;
2956 MonoInst *size_ins = NULL;
2957 MonoInst *memcpy_ins = NULL;
2961 * This check breaks with spilled vars... need to handle it during verification anyway.
2962 * g_assert (klass && klass == src->klass && klass == dest->klass);
2965 if (mini_is_gsharedvt_klass (cfg, klass)) {
2967 context_used = mini_class_check_context_used (cfg, klass);
2968 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
2969 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
2973 n = mono_class_native_size (klass, &align);
2975 n = mono_class_value_size (klass, &align);
2977 /* if native is true there should be no references in the struct */
2978 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
2979 /* Avoid barriers when storing to the stack */
2980 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2981 (dest->opcode == OP_LDADDR))) {
2987 context_used = mini_class_check_context_used (cfg, klass);
2989 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
2990 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
2992 } else if (context_used) {
2993 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2995 if (cfg->compile_aot) {
2996 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2998 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2999 mono_class_compute_gc_descriptor (klass);
3004 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3006 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3011 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
3012 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3013 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3018 iargs [2] = size_ins;
3020 EMIT_NEW_ICONST (cfg, iargs [2], n);
3022 memcpy_method = get_memcpy_method ();
3024 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3026 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3031 get_memset_method (void)
3033 static MonoMethod *memset_method = NULL;
3034 if (!memset_method) {
3035 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3037 g_error ("Old corlib found. Install a new one");
3039 return memset_method;
3043 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3045 MonoInst *iargs [3];
3046 int n, context_used;
3048 MonoMethod *memset_method;
3049 MonoInst *size_ins = NULL;
3050 MonoInst *bzero_ins = NULL;
3051 static MonoMethod *bzero_method;
3053 /* FIXME: Optimize this for the case when dest is an LDADDR */
3055 mono_class_init (klass);
3056 if (mini_is_gsharedvt_klass (cfg, klass)) {
3057 context_used = mini_class_check_context_used (cfg, klass);
3058 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3059 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3061 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3062 g_assert (bzero_method);
3064 iargs [1] = size_ins;
3065 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3069 n = mono_class_value_size (klass, &align);
3071 if (n <= sizeof (gpointer) * 5) {
3072 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3075 memset_method = get_memset_method ();
3077 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3078 EMIT_NEW_ICONST (cfg, iargs [2], n);
3079 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3084 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3086 MonoInst *this = NULL;
3088 g_assert (cfg->generic_sharing_context);
3090 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3091 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3092 !method->klass->valuetype)
3093 EMIT_NEW_ARGLOAD (cfg, this, 0);
3095 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3096 MonoInst *mrgctx_loc, *mrgctx_var;
3099 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3101 mrgctx_loc = mono_get_vtable_var (cfg);
3102 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3105 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3106 MonoInst *vtable_loc, *vtable_var;
3110 vtable_loc = mono_get_vtable_var (cfg);
3111 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3113 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3114 MonoInst *mrgctx_var = vtable_var;
3117 vtable_reg = alloc_preg (cfg);
3118 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3119 vtable_var->type = STACK_PTR;
3127 vtable_reg = alloc_preg (cfg);
3128 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3133 static MonoJumpInfoRgctxEntry *
3134 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3136 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3137 res->method = method;
3138 res->in_mrgctx = in_mrgctx;
3139 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3140 res->data->type = patch_type;
3141 res->data->data.target = patch_data;
3142 res->info_type = info_type;
3147 static inline MonoInst*
3148 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3150 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3154 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3155 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3157 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3158 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3160 return emit_rgctx_fetch (cfg, rgctx, entry);
3164 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3165 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3167 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3168 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3170 return emit_rgctx_fetch (cfg, rgctx, entry);
3174 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3175 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3177 MonoJumpInfoGSharedVtCall *call_info;
3178 MonoJumpInfoRgctxEntry *entry;
3181 call_info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3182 call_info->sig = sig;
3183 call_info->method = cmethod;
3185 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3186 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3188 return emit_rgctx_fetch (cfg, rgctx, entry);
3193 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3194 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3196 MonoJumpInfoRgctxEntry *entry;
3199 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3200 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3202 return emit_rgctx_fetch (cfg, rgctx, entry);
3206 * emit_get_rgctx_method:
3208 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3209 * normal constants, else emit a load from the rgctx.
3212 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3213 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3215 if (!context_used) {
3218 switch (rgctx_type) {
3219 case MONO_RGCTX_INFO_METHOD:
3220 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3222 case MONO_RGCTX_INFO_METHOD_RGCTX:
3223 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3226 g_assert_not_reached ();
3229 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3230 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3232 return emit_rgctx_fetch (cfg, rgctx, entry);
3237 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3238 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3240 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3241 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3243 return emit_rgctx_fetch (cfg, rgctx, entry);
3247 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3249 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3250 MonoRuntimeGenericContextInfoTemplate *template;
3255 for (i = 0; i < info->entries->len; ++i) {
3256 MonoRuntimeGenericContextInfoTemplate *otemplate = g_ptr_array_index (info->entries, i);
3258 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3262 template = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate));
3263 template->info_type = rgctx_type;
3264 template->data = data;
3266 idx = info->entries->len;
3268 g_ptr_array_add (info->entries, template);
3274 * emit_get_gsharedvt_info:
3276 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3279 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3284 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3285 /* Load info->entries [idx] */
3286 dreg = alloc_preg (cfg);
3287 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3293 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3295 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3299 * On return the caller must check @klass for load errors.
3302 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3304 MonoInst *vtable_arg;
3308 context_used = mini_class_check_context_used (cfg, klass);
3311 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3312 klass, MONO_RGCTX_INFO_VTABLE);
3314 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3318 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3321 if (COMPILE_LLVM (cfg))
3322 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3324 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3325 #ifdef MONO_ARCH_VTABLE_REG
3326 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3327 cfg->uses_vtable_reg = TRUE;
3334 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc)
3338 if (cfg->gen_seq_points && cfg->method == method) {
3339 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3340 MONO_ADD_INS (cfg->cbb, ins);
3345 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
3347 if (mini_get_debug_options ()->better_cast_details) {
3348 int to_klass_reg = alloc_preg (cfg);
3349 int vtable_reg = alloc_preg (cfg);
3350 int klass_reg = alloc_preg (cfg);
3351 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3354 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3358 MONO_ADD_INS (cfg->cbb, tls_get);
3359 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3360 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3362 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3363 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3364 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3369 reset_cast_details (MonoCompile *cfg)
3371 /* Reset the variables holding the cast details */
3372 if (mini_get_debug_options ()->better_cast_details) {
3373 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3375 MONO_ADD_INS (cfg->cbb, tls_get);
3376 /* It is enough to reset the from field */
3377 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3382 * On return the caller must check @array_class for load errors
3385 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3387 int vtable_reg = alloc_preg (cfg);
3390 context_used = mini_class_check_context_used (cfg, array_class);
3392 save_cast_details (cfg, array_class, obj->dreg);
3394 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3396 if (cfg->opt & MONO_OPT_SHARED) {
3397 int class_reg = alloc_preg (cfg);
3398 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3399 if (cfg->compile_aot) {
3400 int klass_reg = alloc_preg (cfg);
3401 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3402 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3404 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3406 } else if (context_used) {
3407 MonoInst *vtable_ins;
3409 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3410 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3412 if (cfg->compile_aot) {
3416 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3418 vt_reg = alloc_preg (cfg);
3419 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3420 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3423 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3425 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3429 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3431 reset_cast_details (cfg);
3435 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3436 * generic code is generated.
3439 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3441 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3444 MonoInst *rgctx, *addr;
3446 /* FIXME: What if the class is shared? We might not
3447 have to get the address of the method from the
3449 addr = emit_get_rgctx_method (cfg, context_used, method,
3450 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3452 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3454 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3456 gboolean pass_vtable, pass_mrgctx;
3457 MonoInst *rgctx_arg = NULL;
3459 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3460 g_assert (!pass_mrgctx);
3463 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3466 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3469 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3474 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3478 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3479 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3480 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3481 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3483 obj_reg = sp [0]->dreg;
3484 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3485 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3487 /* FIXME: generics */
3488 g_assert (klass->rank == 0);
3491 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3492 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3494 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3495 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3498 MonoInst *element_class;
3500 /* This assertion is from the unboxcast insn */
3501 g_assert (klass->rank == 0);
3503 element_class = emit_get_rgctx_klass (cfg, context_used,
3504 klass->element_class, MONO_RGCTX_INFO_KLASS);
3506 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3507 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3509 save_cast_details (cfg, klass->element_class, obj_reg);
3510 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3511 reset_cast_details (cfg);
3514 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3515 MONO_ADD_INS (cfg->cbb, add);
3516 add->type = STACK_MP;
3523 handle_unbox_gsharedvt (MonoCompile *cfg, int context_used, MonoClass *klass, MonoInst *obj, MonoBasicBlock **out_cbb)
3525 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3526 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3530 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3536 args [1] = klass_inst;
3539 obj = mono_emit_jit_icall (cfg, mono_object_castclass, args);
3541 NEW_BBLOCK (cfg, is_ref_bb);
3542 NEW_BBLOCK (cfg, is_nullable_bb);
3543 NEW_BBLOCK (cfg, end_bb);
3544 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3545 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3546 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3548 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3549 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3551 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3552 addr_reg = alloc_dreg (cfg, STACK_MP);
3556 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3557 MONO_ADD_INS (cfg->cbb, addr);
3559 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3562 MONO_START_BB (cfg, is_ref_bb);
3564 /* Save the ref to a temporary */
3565 dreg = alloc_ireg (cfg);
3566 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3567 addr->dreg = addr_reg;
3568 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3569 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3572 MONO_START_BB (cfg, is_nullable_bb);
3575 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3576 MonoInst *unbox_call;
3577 MonoMethodSignature *unbox_sig;
3580 var = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
3582 unbox_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3583 unbox_sig->ret = &klass->byval_arg;
3584 unbox_sig->param_count = 1;
3585 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3586 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3588 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3589 addr->dreg = addr_reg;
3592 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3595 MONO_START_BB (cfg, end_bb);
3598 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3600 *out_cbb = cfg->cbb;
3606 * Returns NULL and set the cfg exception on error.
3609 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3611 MonoInst *iargs [2];
3617 MonoInst *iargs [2];
3619 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box);
3621 if (cfg->opt & MONO_OPT_SHARED)
3622 rgctx_info = MONO_RGCTX_INFO_KLASS;
3624 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3625 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3627 if (cfg->opt & MONO_OPT_SHARED) {
3628 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3630 alloc_ftn = mono_object_new;
3633 alloc_ftn = mono_object_new_specific;
3636 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED))
3637 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3639 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3642 if (cfg->opt & MONO_OPT_SHARED) {
3643 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3644 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3646 alloc_ftn = mono_object_new;
3647 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3648 /* This happens often in argument checking code, eg. throw new FooException... */
3649 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3650 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3651 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3653 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3654 MonoMethod *managed_alloc = NULL;
3658 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3659 cfg->exception_ptr = klass;
3663 #ifndef MONO_CROSS_COMPILE
3664 managed_alloc = mono_gc_get_managed_allocator (klass, for_box);
3667 if (managed_alloc) {
3668 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3669 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3671 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3673 guint32 lw = vtable->klass->instance_size;
3674 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3675 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3676 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3679 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3683 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3687 * Returns NULL and set the cfg exception on error.
3690 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoBasicBlock **out_cbb)
3692 MonoInst *alloc, *ins;
3694 *out_cbb = cfg->cbb;
3696 if (mono_class_is_nullable (klass)) {
3697 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3700 /* FIXME: What if the class is shared? We might not
3701 have to get the method address from the RGCTX. */
3702 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3703 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3704 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3706 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3708 gboolean pass_vtable, pass_mrgctx;
3709 MonoInst *rgctx_arg = NULL;
3711 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3712 g_assert (!pass_mrgctx);
3715 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3718 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3721 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3725 if (mini_is_gsharedvt_klass (cfg, klass)) {
3726 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3727 MonoInst *res, *is_ref, *src_var, *addr;
3730 dreg = alloc_ireg (cfg);
3732 NEW_BBLOCK (cfg, is_ref_bb);
3733 NEW_BBLOCK (cfg, is_nullable_bb);
3734 NEW_BBLOCK (cfg, end_bb);
3735 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3736 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3737 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3739 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3740 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3743 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3746 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3747 ins->opcode = OP_STOREV_MEMBASE;
3749 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
3750 res->type = STACK_OBJ;
3752 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3755 MONO_START_BB (cfg, is_ref_bb);
3756 addr_reg = alloc_ireg (cfg);
3758 /* val is a vtype, so has to load the value manually */
3759 src_var = get_vreg_to_inst (cfg, val->dreg);
3761 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
3762 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
3763 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
3764 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3767 MONO_START_BB (cfg, is_nullable_bb);
3770 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
3771 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
3773 MonoMethodSignature *box_sig;
3776 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
3777 * construct that method at JIT time, so have to do things by hand.
3779 box_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3780 box_sig->ret = &mono_defaults.object_class->byval_arg;
3781 box_sig->param_count = 1;
3782 box_sig->params [0] = &klass->byval_arg;
3783 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
3784 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
3785 res->type = STACK_OBJ;
3789 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3791 MONO_START_BB (cfg, end_bb);
3793 *out_cbb = cfg->cbb;
3797 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3801 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3808 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
3811 MonoGenericContainer *container;
3812 MonoGenericInst *ginst;
3814 if (klass->generic_class) {
3815 container = klass->generic_class->container_class->generic_container;
3816 ginst = klass->generic_class->context.class_inst;
3817 } else if (klass->generic_container && context_used) {
3818 container = klass->generic_container;
3819 ginst = container->context.class_inst;
3824 for (i = 0; i < container->type_argc; ++i) {
3826 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
3828 type = ginst->type_argv [i];
3829 if (mini_type_is_reference (cfg, type))
3835 // FIXME: This doesn't work yet (class libs tests fail?)
3836 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3839 * Returns NULL and set the cfg exception on error.
3842 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3844 MonoBasicBlock *is_null_bb;
3845 int obj_reg = src->dreg;
3846 int vtable_reg = alloc_preg (cfg);
3847 MonoInst *klass_inst = NULL;
3852 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
3853 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
3854 MonoInst *cache_ins;
3856 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3861 /* klass - it's the second element of the cache entry*/
3862 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3865 args [2] = cache_ins;
3867 return mono_emit_method_call (cfg, mono_castclass, args, NULL);
3870 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3873 NEW_BBLOCK (cfg, is_null_bb);
3875 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3876 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3878 save_cast_details (cfg, klass, obj_reg);
3880 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3881 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3882 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3884 int klass_reg = alloc_preg (cfg);
3886 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3888 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3889 /* the remoting code is broken, access the class for now */
3890 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3891 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3893 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3894 cfg->exception_ptr = klass;
3897 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3899 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3900 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3902 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3904 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3905 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3909 MONO_START_BB (cfg, is_null_bb);
3911 reset_cast_details (cfg);
3917 * Returns NULL and set the cfg exception on error.
3920 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3923 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3924 int obj_reg = src->dreg;
3925 int vtable_reg = alloc_preg (cfg);
3926 int res_reg = alloc_ireg_ref (cfg);
3927 MonoInst *klass_inst = NULL;
3932 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
3933 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
3934 MonoInst *cache_ins;
3936 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3941 /* klass - it's the second element of the cache entry*/
3942 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3945 args [2] = cache_ins;
3947 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
3950 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3953 NEW_BBLOCK (cfg, is_null_bb);
3954 NEW_BBLOCK (cfg, false_bb);
3955 NEW_BBLOCK (cfg, end_bb);
3957 /* Do the assignment at the beginning, so the other assignment can be if converted */
3958 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3959 ins->type = STACK_OBJ;
3962 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3963 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3965 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3967 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3968 g_assert (!context_used);
3969 /* the is_null_bb target simply copies the input register to the output */
3970 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3972 int klass_reg = alloc_preg (cfg);
3975 int rank_reg = alloc_preg (cfg);
3976 int eclass_reg = alloc_preg (cfg);
3978 g_assert (!context_used);
3979 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3980 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3981 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3982 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3983 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3984 if (klass->cast_class == mono_defaults.object_class) {
3985 int parent_reg = alloc_preg (cfg);
3986 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3987 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3988 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3989 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3990 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3991 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3992 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3993 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3994 } else if (klass->cast_class == mono_defaults.enum_class) {
3995 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3996 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3997 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3998 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4000 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4001 /* Check that the object is a vector too */
4002 int bounds_reg = alloc_preg (cfg);
4003 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
4004 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4005 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4008 /* the is_null_bb target simply copies the input register to the output */
4009 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4011 } else if (mono_class_is_nullable (klass)) {
4012 g_assert (!context_used);
4013 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4014 /* the is_null_bb target simply copies the input register to the output */
4015 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4017 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4018 g_assert (!context_used);
4019 /* the remoting code is broken, access the class for now */
4020 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4021 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4023 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4024 cfg->exception_ptr = klass;
4027 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4029 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4030 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4032 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4033 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4035 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4036 /* the is_null_bb target simply copies the input register to the output */
4037 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4042 MONO_START_BB (cfg, false_bb);
4044 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4045 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4047 MONO_START_BB (cfg, is_null_bb);
4049 MONO_START_BB (cfg, end_bb);
4055 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4057 /* This opcode takes as input an object reference and a class, and returns:
4058 0) if the object is an instance of the class,
4059 1) if the object is not instance of the class,
4060 2) if the object is a proxy whose type cannot be determined */
4063 #ifndef DISABLE_REMOTING
4064 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4066 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4068 int obj_reg = src->dreg;
4069 int dreg = alloc_ireg (cfg);
4071 #ifndef DISABLE_REMOTING
4072 int klass_reg = alloc_preg (cfg);
4075 NEW_BBLOCK (cfg, true_bb);
4076 NEW_BBLOCK (cfg, false_bb);
4077 NEW_BBLOCK (cfg, end_bb);
4078 #ifndef DISABLE_REMOTING
4079 NEW_BBLOCK (cfg, false2_bb);
4080 NEW_BBLOCK (cfg, no_proxy_bb);
4083 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4084 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4086 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4087 #ifndef DISABLE_REMOTING
4088 NEW_BBLOCK (cfg, interface_fail_bb);
4091 tmp_reg = alloc_preg (cfg);
4092 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4093 #ifndef DISABLE_REMOTING
4094 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4095 MONO_START_BB (cfg, interface_fail_bb);
4096 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4098 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4100 tmp_reg = alloc_preg (cfg);
4101 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4102 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4103 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4105 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4108 #ifndef DISABLE_REMOTING
4109 tmp_reg = alloc_preg (cfg);
4110 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4111 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4113 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4114 tmp_reg = alloc_preg (cfg);
4115 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4116 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4118 tmp_reg = alloc_preg (cfg);
4119 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4120 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4121 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4123 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4124 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4126 MONO_START_BB (cfg, no_proxy_bb);
4128 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4130 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4134 MONO_START_BB (cfg, false_bb);
4136 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4137 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4139 #ifndef DISABLE_REMOTING
4140 MONO_START_BB (cfg, false2_bb);
4142 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4143 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4146 MONO_START_BB (cfg, true_bb);
4148 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4150 MONO_START_BB (cfg, end_bb);
4153 MONO_INST_NEW (cfg, ins, OP_ICONST);
4155 ins->type = STACK_I4;
4161 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4163 /* This opcode takes as input an object reference and a class, and returns:
4164 0) if the object is an instance of the class,
4165 1) if the object is a proxy whose type cannot be determined
4166 an InvalidCastException exception is thrown otherwhise*/
4169 #ifndef DISABLE_REMOTING
4170 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4172 MonoBasicBlock *ok_result_bb;
4174 int obj_reg = src->dreg;
4175 int dreg = alloc_ireg (cfg);
4176 int tmp_reg = alloc_preg (cfg);
4178 #ifndef DISABLE_REMOTING
4179 int klass_reg = alloc_preg (cfg);
4180 NEW_BBLOCK (cfg, end_bb);
4183 NEW_BBLOCK (cfg, ok_result_bb);
4185 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4186 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4188 save_cast_details (cfg, klass, obj_reg);
4190 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4191 #ifndef DISABLE_REMOTING
4192 NEW_BBLOCK (cfg, interface_fail_bb);
4194 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4195 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4196 MONO_START_BB (cfg, interface_fail_bb);
4197 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4199 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4201 tmp_reg = alloc_preg (cfg);
4202 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4203 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4204 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4206 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4207 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4209 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4210 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4211 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4214 #ifndef DISABLE_REMOTING
4215 NEW_BBLOCK (cfg, no_proxy_bb);
4217 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4218 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4219 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4221 tmp_reg = alloc_preg (cfg);
4222 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4223 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4225 tmp_reg = alloc_preg (cfg);
4226 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4227 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4228 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4230 NEW_BBLOCK (cfg, fail_1_bb);
4232 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4234 MONO_START_BB (cfg, fail_1_bb);
4236 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4237 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4239 MONO_START_BB (cfg, no_proxy_bb);
4241 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4243 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4247 MONO_START_BB (cfg, ok_result_bb);
4249 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4251 #ifndef DISABLE_REMOTING
4252 MONO_START_BB (cfg, end_bb);
4256 MONO_INST_NEW (cfg, ins, OP_ICONST);
4258 ins->type = STACK_I4;
4264 * Returns NULL and set the cfg exception on error.
4266 static G_GNUC_UNUSED MonoInst*
4267 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
4271 gpointer *trampoline;
4272 MonoInst *obj, *method_ins, *tramp_ins;
4276 obj = handle_alloc (cfg, klass, FALSE, 0);
4280 /* Inline the contents of mono_delegate_ctor */
4282 /* Set target field */
4283 /* Optimize away setting of NULL target */
4284 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
4285 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4286 if (cfg->gen_write_barriers) {
4287 dreg = alloc_preg (cfg);
4288 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target));
4289 emit_write_barrier (cfg, ptr, target, 0);
4293 /* Set method field */
4294 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4295 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4296 if (cfg->gen_write_barriers) {
4297 dreg = alloc_preg (cfg);
4298 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method));
4299 emit_write_barrier (cfg, ptr, method_ins, 0);
4302 * To avoid looking up the compiled code belonging to the target method
4303 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4304 * store it, and we fill it after the method has been compiled.
4306 if (!cfg->compile_aot && !method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4307 MonoInst *code_slot_ins;
4310 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4312 domain = mono_domain_get ();
4313 mono_domain_lock (domain);
4314 if (!domain_jit_info (domain)->method_code_hash)
4315 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4316 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4318 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
4319 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4321 mono_domain_unlock (domain);
4323 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
4325 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4328 /* Set invoke_impl field */
4329 if (cfg->compile_aot) {
4330 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
4332 trampoline = mono_create_delegate_trampoline (cfg->domain, klass);
4333 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4335 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4337 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4343 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4345 MonoJitICallInfo *info;
4347 /* Need to register the icall so it gets an icall wrapper */
4348 info = mono_get_array_new_va_icall (rank);
4350 cfg->flags |= MONO_CFG_HAS_VARARGS;
4352 /* mono_array_new_va () needs a vararg calling convention */
4353 cfg->disable_llvm = TRUE;
4355 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4356 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4360 mono_emit_load_got_addr (MonoCompile *cfg)
4362 MonoInst *getaddr, *dummy_use;
4364 if (!cfg->got_var || cfg->got_var_allocated)
4367 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
4368 getaddr->cil_code = cfg->header->code;
4369 getaddr->dreg = cfg->got_var->dreg;
4371 /* Add it to the start of the first bblock */
4372 if (cfg->bb_entry->code) {
4373 getaddr->next = cfg->bb_entry->code;
4374 cfg->bb_entry->code = getaddr;
4377 MONO_ADD_INS (cfg->bb_entry, getaddr);
4379 cfg->got_var_allocated = TRUE;
4382 * Add a dummy use to keep the got_var alive, since real uses might
4383 * only be generated by the back ends.
4384 * Add it to end_bblock, so the variable's lifetime covers the whole
4386 * It would be better to make the usage of the got var explicit in all
4387 * cases when the backend needs it (i.e. calls, throw etc.), so this
4388 * wouldn't be needed.
4390 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
4391 MONO_ADD_INS (cfg->bb_exit, dummy_use);
4394 static int inline_limit;
4395 static gboolean inline_limit_inited;
4398 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
4400 MonoMethodHeaderSummary header;
4402 #ifdef MONO_ARCH_SOFT_FLOAT
4403 MonoMethodSignature *sig = mono_method_signature (method);
4407 if (cfg->generic_sharing_context)
4410 if (cfg->inline_depth > 10)
4413 #ifdef MONO_ARCH_HAVE_LMF_OPS
4414 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
4415 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
4416 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
4421 if (!mono_method_get_header_summary (method, &header))
4424 /*runtime, icall and pinvoke are checked by summary call*/
4425 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
4426 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
4427 (mono_class_is_marshalbyref (method->klass)) ||
4431 /* also consider num_locals? */
4432 /* Do the size check early to avoid creating vtables */
4433 if (!inline_limit_inited) {
4434 if (getenv ("MONO_INLINELIMIT"))
4435 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
4437 inline_limit = INLINE_LENGTH_LIMIT;
4438 inline_limit_inited = TRUE;
4440 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
4444 * if we can initialize the class of the method right away, we do,
4445 * otherwise we don't allow inlining if the class needs initialization,
4446 * since it would mean inserting a call to mono_runtime_class_init()
4447 * inside the inlined code
4449 if (!(cfg->opt & MONO_OPT_SHARED)) {
4450 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
4451 if (cfg->run_cctors && method->klass->has_cctor) {
4452 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
4453 if (!method->klass->runtime_info)
4454 /* No vtable created yet */
4456 vtable = mono_class_vtable (cfg->domain, method->klass);
4459 /* This makes so that inline cannot trigger */
4460 /* .cctors: too many apps depend on them */
4461 /* running with a specific order... */
4462 if (! vtable->initialized)
4464 mono_runtime_class_init (vtable);
4466 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
4467 if (!method->klass->runtime_info)
4468 /* No vtable created yet */
4470 vtable = mono_class_vtable (cfg->domain, method->klass);
4473 if (!vtable->initialized)
4478 * If we're compiling for shared code
4479 * the cctor will need to be run at aot method load time, for example,
4480 * or at the end of the compilation of the inlining method.
4482 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
4487 * CAS - do not inline methods with declarative security
4488 * Note: this has to be before any possible return TRUE;
4490 if (mono_security_method_has_declsec (method))
4493 #ifdef MONO_ARCH_SOFT_FLOAT
4495 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4497 for (i = 0; i < sig->param_count; ++i)
4498 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4506 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
4508 if (vtable->initialized && !cfg->compile_aot)
4511 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
4514 if (!mono_class_needs_cctor_run (vtable->klass, method))
4517 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
4518 /* The initialization is already done before the method is called */
4525 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4529 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4532 if (mini_is_gsharedvt_klass (cfg, klass)) {
4535 mono_class_init (klass);
4536 size = mono_class_array_element_size (klass);
4539 mult_reg = alloc_preg (cfg);
4540 array_reg = arr->dreg;
4541 index_reg = index->dreg;
4543 #if SIZEOF_REGISTER == 8
4544 /* The array reg is 64 bits but the index reg is only 32 */
4545 if (COMPILE_LLVM (cfg)) {
4547 index2_reg = index_reg;
4549 index2_reg = alloc_preg (cfg);
4550 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4553 if (index->type == STACK_I8) {
4554 index2_reg = alloc_preg (cfg);
4555 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4557 index2_reg = index_reg;
4562 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4564 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4565 if (size == 1 || size == 2 || size == 4 || size == 8) {
4566 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4568 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
4569 ins->klass = mono_class_get_element_class (klass);
4570 ins->type = STACK_MP;
4576 add_reg = alloc_ireg_mp (cfg);
4579 MonoInst *rgctx_ins;
4582 g_assert (cfg->generic_sharing_context);
4583 context_used = mini_class_check_context_used (cfg, klass);
4584 g_assert (context_used);
4585 rgctx_ins = emit_get_gsharedvt_info (cfg, &klass->byval_arg, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
4586 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
4588 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4590 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4591 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4592 ins->klass = mono_class_get_element_class (klass);
4593 ins->type = STACK_MP;
4594 MONO_ADD_INS (cfg->cbb, ins);
4599 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4601 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4603 int bounds_reg = alloc_preg (cfg);
4604 int add_reg = alloc_ireg_mp (cfg);
4605 int mult_reg = alloc_preg (cfg);
4606 int mult2_reg = alloc_preg (cfg);
4607 int low1_reg = alloc_preg (cfg);
4608 int low2_reg = alloc_preg (cfg);
4609 int high1_reg = alloc_preg (cfg);
4610 int high2_reg = alloc_preg (cfg);
4611 int realidx1_reg = alloc_preg (cfg);
4612 int realidx2_reg = alloc_preg (cfg);
4613 int sum_reg = alloc_preg (cfg);
4614 int index1, index2, tmpreg;
4618 mono_class_init (klass);
4619 size = mono_class_array_element_size (klass);
4621 index1 = index_ins1->dreg;
4622 index2 = index_ins2->dreg;
4624 #if SIZEOF_REGISTER == 8
4625 /* The array reg is 64 bits but the index reg is only 32 */
4626 if (COMPILE_LLVM (cfg)) {
4629 tmpreg = alloc_preg (cfg);
4630 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
4632 tmpreg = alloc_preg (cfg);
4633 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
4637 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
4641 /* range checking */
4642 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4643 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4645 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4646 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4647 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4648 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4649 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4650 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4651 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4653 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4654 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4655 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4656 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4657 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
4658 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4659 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4661 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4662 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4663 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4664 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4665 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4667 ins->type = STACK_MP;
4669 MONO_ADD_INS (cfg->cbb, ins);
4676 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4680 MonoMethod *addr_method;
4683 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4686 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
4688 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4689 /* emit_ldelema_2 depends on OP_LMUL */
4690 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
4691 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
4695 element_size = mono_class_array_element_size (cmethod->klass->element_class);
4696 addr_method = mono_marshal_get_array_address (rank, element_size);
4697 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4702 static MonoBreakPolicy
4703 always_insert_breakpoint (MonoMethod *method)
4705 return MONO_BREAK_POLICY_ALWAYS;
4708 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4711 * mono_set_break_policy:
4712 * policy_callback: the new callback function
4714 * Allow embedders to decide wherther to actually obey breakpoint instructions
4715 * (both break IL instructions and Debugger.Break () method calls), for example
4716 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4717 * untrusted or semi-trusted code.
4719 * @policy_callback will be called every time a break point instruction needs to
4720 * be inserted with the method argument being the method that calls Debugger.Break()
4721 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4722 * if it wants the breakpoint to not be effective in the given method.
4723 * #MONO_BREAK_POLICY_ALWAYS is the default.
4726 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4728 if (policy_callback)
4729 break_policy_func = policy_callback;
4731 break_policy_func = always_insert_breakpoint;
4735 should_insert_brekpoint (MonoMethod *method) {
4736 switch (break_policy_func (method)) {
4737 case MONO_BREAK_POLICY_ALWAYS:
4739 case MONO_BREAK_POLICY_NEVER:
4741 case MONO_BREAK_POLICY_ON_DBG:
4742 return mono_debug_using_mono_debugger ();
4744 g_warning ("Incorrect value returned from break policy callback");
4749 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4751 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4753 MonoInst *addr, *store, *load;
4754 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4756 /* the bounds check is already done by the callers */
4757 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4759 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4760 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4761 if (mini_type_is_reference (cfg, fsig->params [2]))
4762 emit_write_barrier (cfg, addr, load, -1);
4764 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4765 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4772 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4774 return mini_type_is_reference (cfg, &klass->byval_arg);
4778 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
4780 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
4781 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
4782 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
4783 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
4784 MonoInst *iargs [3];
4787 mono_class_setup_vtable (obj_array);
4788 g_assert (helper->slot);
4790 if (sp [0]->type != STACK_OBJ)
4792 if (sp [2]->type != STACK_OBJ)
4799 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
4803 if (mini_is_gsharedvt_klass (cfg, klass)) {
4806 // FIXME-VT: OP_ICONST optimization
4807 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
4808 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
4809 ins->opcode = OP_STOREV_MEMBASE;
4810 } else if (sp [1]->opcode == OP_ICONST) {
4811 int array_reg = sp [0]->dreg;
4812 int index_reg = sp [1]->dreg;
4813 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
4816 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
4817 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
4819 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
4820 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
4821 if (generic_class_is_reference_type (cfg, klass))
4822 emit_write_barrier (cfg, addr, sp [2], -1);
4829 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4834 eklass = mono_class_from_mono_type (fsig->params [2]);
4836 eklass = mono_class_from_mono_type (fsig->ret);
4840 return emit_array_store (cfg, eklass, args, FALSE);
4842 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4843 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
4849 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4851 MonoInst *ins = NULL;
4852 #ifdef MONO_ARCH_SIMD_INTRINSICS
4853 if (cfg->opt & MONO_OPT_SIMD) {
4854 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4864 emit_memory_barrier (MonoCompile *cfg, int kind)
4866 MonoInst *ins = NULL;
4867 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4868 MONO_ADD_INS (cfg->cbb, ins);
4869 ins->backend.memory_barrier_kind = kind;
4875 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4877 MonoInst *ins = NULL;
4880 /* The LLVM backend supports these intrinsics */
4881 if (cmethod->klass == mono_defaults.math_class) {
4882 if (strcmp (cmethod->name, "Sin") == 0) {
4884 } else if (strcmp (cmethod->name, "Cos") == 0) {
4886 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
4888 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
4893 MONO_INST_NEW (cfg, ins, opcode);
4894 ins->type = STACK_R8;
4895 ins->dreg = mono_alloc_freg (cfg);
4896 ins->sreg1 = args [0]->dreg;
4897 MONO_ADD_INS (cfg->cbb, ins);
4901 if (cfg->opt & MONO_OPT_CMOV) {
4902 if (strcmp (cmethod->name, "Min") == 0) {
4903 if (fsig->params [0]->type == MONO_TYPE_I4)
4905 if (fsig->params [0]->type == MONO_TYPE_U4)
4906 opcode = OP_IMIN_UN;
4907 else if (fsig->params [0]->type == MONO_TYPE_I8)
4909 else if (fsig->params [0]->type == MONO_TYPE_U8)
4910 opcode = OP_LMIN_UN;
4911 } else if (strcmp (cmethod->name, "Max") == 0) {
4912 if (fsig->params [0]->type == MONO_TYPE_I4)
4914 if (fsig->params [0]->type == MONO_TYPE_U4)
4915 opcode = OP_IMAX_UN;
4916 else if (fsig->params [0]->type == MONO_TYPE_I8)
4918 else if (fsig->params [0]->type == MONO_TYPE_U8)
4919 opcode = OP_LMAX_UN;
4924 MONO_INST_NEW (cfg, ins, opcode);
4925 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
4926 ins->dreg = mono_alloc_ireg (cfg);
4927 ins->sreg1 = args [0]->dreg;
4928 ins->sreg2 = args [1]->dreg;
4929 MONO_ADD_INS (cfg->cbb, ins);
4937 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4939 if (cmethod->klass == mono_defaults.array_class) {
4940 if (strcmp (cmethod->name, "UnsafeStore") == 0)
4941 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
4942 if (strcmp (cmethod->name, "UnsafeLoad") == 0)
4943 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
4950 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4952 MonoInst *ins = NULL;
4954 static MonoClass *runtime_helpers_class = NULL;
4955 if (! runtime_helpers_class)
4956 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
4957 "System.Runtime.CompilerServices", "RuntimeHelpers");
4959 if (cmethod->klass == mono_defaults.string_class) {
4960 if (strcmp (cmethod->name, "get_Chars") == 0) {
4961 int dreg = alloc_ireg (cfg);
4962 int index_reg = alloc_preg (cfg);
4963 int mult_reg = alloc_preg (cfg);
4964 int add_reg = alloc_preg (cfg);
4966 #if SIZEOF_REGISTER == 8
4967 /* The array reg is 64 bits but the index reg is only 32 */
4968 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
4970 index_reg = args [1]->dreg;
4972 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
4974 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4975 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
4976 add_reg = ins->dreg;
4977 /* Avoid a warning */
4979 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4982 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
4983 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4984 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4985 add_reg, G_STRUCT_OFFSET (MonoString, chars));
4987 type_from_op (ins, NULL, NULL);
4989 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4990 int dreg = alloc_ireg (cfg);
4991 /* Decompose later to allow more optimizations */
4992 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
4993 ins->type = STACK_I4;
4994 ins->flags |= MONO_INST_FAULT;
4995 cfg->cbb->has_array_access = TRUE;
4996 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
4999 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
5000 int mult_reg = alloc_preg (cfg);
5001 int add_reg = alloc_preg (cfg);
5003 /* The corlib functions check for oob already. */
5004 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
5005 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5006 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
5007 return cfg->cbb->last_ins;
5010 } else if (cmethod->klass == mono_defaults.object_class) {
5012 if (strcmp (cmethod->name, "GetType") == 0) {
5013 int dreg = alloc_ireg_ref (cfg);
5014 int vt_reg = alloc_preg (cfg);
5015 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
5016 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
5017 type_from_op (ins, NULL, NULL);
5020 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
5021 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
5022 int dreg = alloc_ireg (cfg);
5023 int t1 = alloc_ireg (cfg);
5025 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5026 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5027 ins->type = STACK_I4;
5031 } else if (strcmp (cmethod->name, ".ctor") == 0) {
5032 MONO_INST_NEW (cfg, ins, OP_NOP);
5033 MONO_ADD_INS (cfg->cbb, ins);
5037 } else if (cmethod->klass == mono_defaults.array_class) {
5038 if (!cfg->gsharedvt && strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
5039 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
5041 #ifndef MONO_BIG_ARRAYS
5043 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5046 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5047 int dreg = alloc_ireg (cfg);
5048 int bounds_reg = alloc_ireg_mp (cfg);
5049 MonoBasicBlock *end_bb, *szarray_bb;
5050 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5052 NEW_BBLOCK (cfg, end_bb);
5053 NEW_BBLOCK (cfg, szarray_bb);
5055 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5056 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
5057 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5058 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5059 /* Non-szarray case */
5061 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5062 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
5064 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5065 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5066 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5067 MONO_START_BB (cfg, szarray_bb);
5070 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5071 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
5073 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5074 MONO_START_BB (cfg, end_bb);
5076 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5077 ins->type = STACK_I4;
5083 if (cmethod->name [0] != 'g')
5086 if (strcmp (cmethod->name, "get_Rank") == 0) {
5087 int dreg = alloc_ireg (cfg);
5088 int vtable_reg = alloc_preg (cfg);
5089 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5090 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
5091 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5092 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
5093 type_from_op (ins, NULL, NULL);
5096 } else if (strcmp (cmethod->name, "get_Length") == 0) {
5097 int dreg = alloc_ireg (cfg);
5099 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5100 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
5101 type_from_op (ins, NULL, NULL);
5106 } else if (cmethod->klass == runtime_helpers_class) {
5108 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
5109 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
5113 } else if (cmethod->klass == mono_defaults.thread_class) {
5114 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
5115 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5116 MONO_ADD_INS (cfg->cbb, ins);
5118 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
5119 return emit_memory_barrier (cfg, FullBarrier);
5121 } else if (cmethod->klass == mono_defaults.monitor_class) {
5123 /* FIXME this should be integrated to the check below once we support the trampoline version */
5124 #if defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
5125 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) {
5126 MonoMethod *fast_method = NULL;
5128 /* Avoid infinite recursion */
5129 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN && !strcmp (cfg->method->name, "FastMonitorEnterV4"))
5132 fast_method = mono_monitor_get_fast_path (cmethod);
5136 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
5140 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
5141 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
5144 if (COMPILE_LLVM (cfg)) {
5146 * Pass the argument normally, the LLVM backend will handle the
5147 * calling convention problems.
5149 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5151 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
5152 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5153 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5154 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5157 return (MonoInst*)call;
5158 } else if (strcmp (cmethod->name, "Exit") == 0) {
5161 if (COMPILE_LLVM (cfg)) {
5162 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5164 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
5165 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5166 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5167 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5170 return (MonoInst*)call;
5172 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
5174 MonoMethod *fast_method = NULL;
5176 /* Avoid infinite recursion */
5177 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
5178 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
5179 strcmp (cfg->method->name, "FastMonitorExit") == 0))
5182 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) ||
5183 strcmp (cmethod->name, "Exit") == 0)
5184 fast_method = mono_monitor_get_fast_path (cmethod);
5188 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
5191 } else if (cmethod->klass->image == mono_defaults.corlib &&
5192 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5193 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
5196 #if SIZEOF_REGISTER == 8
5197 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
5198 /* 64 bit reads are already atomic */
5199 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
5200 ins->dreg = mono_alloc_preg (cfg);
5201 ins->inst_basereg = args [0]->dreg;
5202 ins->inst_offset = 0;
5203 MONO_ADD_INS (cfg->cbb, ins);
5207 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
5208 if (strcmp (cmethod->name, "Increment") == 0) {
5209 MonoInst *ins_iconst;
5212 if (fsig->params [0]->type == MONO_TYPE_I4)
5213 opcode = OP_ATOMIC_ADD_NEW_I4;
5214 #if SIZEOF_REGISTER == 8
5215 else if (fsig->params [0]->type == MONO_TYPE_I8)
5216 opcode = OP_ATOMIC_ADD_NEW_I8;
5219 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5220 ins_iconst->inst_c0 = 1;
5221 ins_iconst->dreg = mono_alloc_ireg (cfg);
5222 MONO_ADD_INS (cfg->cbb, ins_iconst);
5224 MONO_INST_NEW (cfg, ins, opcode);
5225 ins->dreg = mono_alloc_ireg (cfg);
5226 ins->inst_basereg = args [0]->dreg;
5227 ins->inst_offset = 0;
5228 ins->sreg2 = ins_iconst->dreg;
5229 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5230 MONO_ADD_INS (cfg->cbb, ins);
5232 } else if (strcmp (cmethod->name, "Decrement") == 0) {
5233 MonoInst *ins_iconst;
5236 if (fsig->params [0]->type == MONO_TYPE_I4)
5237 opcode = OP_ATOMIC_ADD_NEW_I4;
5238 #if SIZEOF_REGISTER == 8
5239 else if (fsig->params [0]->type == MONO_TYPE_I8)
5240 opcode = OP_ATOMIC_ADD_NEW_I8;
5243 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5244 ins_iconst->inst_c0 = -1;
5245 ins_iconst->dreg = mono_alloc_ireg (cfg);
5246 MONO_ADD_INS (cfg->cbb, ins_iconst);
5248 MONO_INST_NEW (cfg, ins, opcode);
5249 ins->dreg = mono_alloc_ireg (cfg);
5250 ins->inst_basereg = args [0]->dreg;
5251 ins->inst_offset = 0;
5252 ins->sreg2 = ins_iconst->dreg;
5253 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5254 MONO_ADD_INS (cfg->cbb, ins);
5256 } else if (strcmp (cmethod->name, "Add") == 0) {
5259 if (fsig->params [0]->type == MONO_TYPE_I4)
5260 opcode = OP_ATOMIC_ADD_NEW_I4;
5261 #if SIZEOF_REGISTER == 8
5262 else if (fsig->params [0]->type == MONO_TYPE_I8)
5263 opcode = OP_ATOMIC_ADD_NEW_I8;
5267 MONO_INST_NEW (cfg, ins, opcode);
5268 ins->dreg = mono_alloc_ireg (cfg);
5269 ins->inst_basereg = args [0]->dreg;
5270 ins->inst_offset = 0;
5271 ins->sreg2 = args [1]->dreg;
5272 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5273 MONO_ADD_INS (cfg->cbb, ins);
5276 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
5278 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
5279 if (strcmp (cmethod->name, "Exchange") == 0) {
5281 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
5283 if (fsig->params [0]->type == MONO_TYPE_I4)
5284 opcode = OP_ATOMIC_EXCHANGE_I4;
5285 #if SIZEOF_REGISTER == 8
5286 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
5287 (fsig->params [0]->type == MONO_TYPE_I))
5288 opcode = OP_ATOMIC_EXCHANGE_I8;
5290 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
5291 opcode = OP_ATOMIC_EXCHANGE_I4;
5296 MONO_INST_NEW (cfg, ins, opcode);
5297 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
5298 ins->inst_basereg = args [0]->dreg;
5299 ins->inst_offset = 0;
5300 ins->sreg2 = args [1]->dreg;
5301 MONO_ADD_INS (cfg->cbb, ins);
5303 switch (fsig->params [0]->type) {
5305 ins->type = STACK_I4;
5309 ins->type = STACK_I8;
5311 case MONO_TYPE_OBJECT:
5312 ins->type = STACK_OBJ;
5315 g_assert_not_reached ();
5318 if (cfg->gen_write_barriers && is_ref)
5319 emit_write_barrier (cfg, args [0], args [1], -1);
5321 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
5323 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
5324 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
5326 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
5327 if (fsig->params [1]->type == MONO_TYPE_I4)
5329 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
5330 size = sizeof (gpointer);
5331 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
5334 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
5335 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5336 ins->sreg1 = args [0]->dreg;
5337 ins->sreg2 = args [1]->dreg;
5338 ins->sreg3 = args [2]->dreg;
5339 ins->type = STACK_I4;
5340 MONO_ADD_INS (cfg->cbb, ins);
5341 } else if (size == 8) {
5342 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
5343 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5344 ins->sreg1 = args [0]->dreg;
5345 ins->sreg2 = args [1]->dreg;
5346 ins->sreg3 = args [2]->dreg;
5347 ins->type = STACK_I8;
5348 MONO_ADD_INS (cfg->cbb, ins);
5350 /* g_assert_not_reached (); */
5352 if (cfg->gen_write_barriers && is_ref)
5353 emit_write_barrier (cfg, args [0], args [1], -1);
5355 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
5357 if (strcmp (cmethod->name, "MemoryBarrier") == 0)
5358 ins = emit_memory_barrier (cfg, FullBarrier);
5362 } else if (cmethod->klass->image == mono_defaults.corlib) {
5363 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
5364 && strcmp (cmethod->klass->name, "Debugger") == 0) {
5365 if (should_insert_brekpoint (cfg->method)) {
5366 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
5368 MONO_INST_NEW (cfg, ins, OP_NOP);
5369 MONO_ADD_INS (cfg->cbb, ins);
5373 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
5374 && strcmp (cmethod->klass->name, "Environment") == 0) {
5376 EMIT_NEW_ICONST (cfg, ins, 1);
5378 EMIT_NEW_ICONST (cfg, ins, 0);
5382 } else if (cmethod->klass == mono_defaults.math_class) {
5384 * There is general branches code for Min/Max, but it does not work for
5386 * http://everything2.com/?node_id=1051618
5390 #ifdef MONO_ARCH_SIMD_INTRINSICS
5391 if (cfg->opt & MONO_OPT_SIMD) {
5392 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5398 if (COMPILE_LLVM (cfg)) {
5399 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
5404 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
5408 * This entry point could be used later for arbitrary method
5411 inline static MonoInst*
5412 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
5413 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
5415 if (method->klass == mono_defaults.string_class) {
5416 /* managed string allocation support */
5417 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
5418 MonoInst *iargs [2];
5419 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
5420 MonoMethod *managed_alloc = NULL;
5422 g_assert (vtable); /*Should not fail since it System.String*/
5423 #ifndef MONO_CROSS_COMPILE
5424 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE);
5428 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
5429 iargs [1] = args [0];
5430 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
5437 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
5439 MonoInst *store, *temp;
5442 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
5443 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
5446 * FIXME: We should use *args++ = sp [0], but that would mean the arg
5447 * would be different than the MonoInst's used to represent arguments, and
5448 * the ldelema implementation can't deal with that.
5449 * Solution: When ldelema is used on an inline argument, create a var for
5450 * it, emit ldelema on that var, and emit the saving code below in
5451 * inline_method () if needed.
5453 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
5454 cfg->args [i] = temp;
5455 /* This uses cfg->args [i] which is set by the preceeding line */
5456 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
5457 store->cil_code = sp [0]->cil_code;
5462 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
5463 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
5465 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5467 check_inline_called_method_name_limit (MonoMethod *called_method)
5470 static char *limit = NULL;
5472 if (limit == NULL) {
5473 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
5475 if (limit_string != NULL)
5476 limit = limit_string;
5478 limit = (char *) "";
5481 if (limit [0] != '\0') {
5482 char *called_method_name = mono_method_full_name (called_method, TRUE);
5484 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
5485 g_free (called_method_name);
5487 //return (strncmp_result <= 0);
5488 return (strncmp_result == 0);
5495 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5497 check_inline_caller_method_name_limit (MonoMethod *caller_method)
5500 static char *limit = NULL;
5502 if (limit == NULL) {
5503 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
5504 if (limit_string != NULL) {
5505 limit = limit_string;
5507 limit = (char *) "";
5511 if (limit [0] != '\0') {
5512 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
5514 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
5515 g_free (caller_method_name);
5517 //return (strncmp_result <= 0);
5518 return (strncmp_result == 0);
5526 emit_init_rvar (MonoCompile *cfg, MonoInst *rvar, MonoType *rtype)
5528 static double r8_0 = 0.0;
5531 switch (rvar->type) {
5533 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
5536 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
5541 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
5544 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5545 ins->type = STACK_R8;
5546 ins->inst_p0 = (void*)&r8_0;
5547 ins->dreg = rvar->dreg;
5548 MONO_ADD_INS (cfg->cbb, ins);
5551 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (rtype));
5554 g_assert_not_reached ();
5559 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
5560 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_always)
5562 MonoInst *ins, *rvar = NULL;
5563 MonoMethodHeader *cheader;
5564 MonoBasicBlock *ebblock, *sbblock;
5566 MonoMethod *prev_inlined_method;
5567 MonoInst **prev_locals, **prev_args;
5568 MonoType **prev_arg_types;
5569 guint prev_real_offset;
5570 GHashTable *prev_cbb_hash;
5571 MonoBasicBlock **prev_cil_offset_to_bb;
5572 MonoBasicBlock *prev_cbb;
5573 unsigned char* prev_cil_start;
5574 guint32 prev_cil_offset_to_bb_len;
5575 MonoMethod *prev_current_method;
5576 MonoGenericContext *prev_generic_context;
5577 gboolean ret_var_set, prev_ret_var_set, virtual = FALSE;
5579 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
5581 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5582 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
5585 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5586 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
5590 if (cfg->verbose_level > 2)
5591 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
5593 if (!cmethod->inline_info) {
5594 cfg->stat_inlineable_methods++;
5595 cmethod->inline_info = 1;
5598 /* allocate local variables */
5599 cheader = mono_method_get_header (cmethod);
5601 if (cheader == NULL || mono_loader_get_last_error ()) {
5602 MonoLoaderError *error = mono_loader_get_last_error ();
5605 mono_metadata_free_mh (cheader);
5606 if (inline_always && error)
5607 mono_cfg_set_exception (cfg, error->exception_type);
5609 mono_loader_clear_error ();
5613 /*Must verify before creating locals as it can cause the JIT to assert.*/
5614 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
5615 mono_metadata_free_mh (cheader);
5619 /* allocate space to store the return value */
5620 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
5621 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
5624 prev_locals = cfg->locals;
5625 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
5626 for (i = 0; i < cheader->num_locals; ++i)
5627 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
5629 /* allocate start and end blocks */
5630 /* This is needed so if the inline is aborted, we can clean up */
5631 NEW_BBLOCK (cfg, sbblock);
5632 sbblock->real_offset = real_offset;
5634 NEW_BBLOCK (cfg, ebblock);
5635 ebblock->block_num = cfg->num_bblocks++;
5636 ebblock->real_offset = real_offset;
5638 prev_args = cfg->args;
5639 prev_arg_types = cfg->arg_types;
5640 prev_inlined_method = cfg->inlined_method;
5641 cfg->inlined_method = cmethod;
5642 cfg->ret_var_set = FALSE;
5643 cfg->inline_depth ++;
5644 prev_real_offset = cfg->real_offset;
5645 prev_cbb_hash = cfg->cbb_hash;
5646 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
5647 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
5648 prev_cil_start = cfg->cil_start;
5649 prev_cbb = cfg->cbb;
5650 prev_current_method = cfg->current_method;
5651 prev_generic_context = cfg->generic_context;
5652 prev_ret_var_set = cfg->ret_var_set;
5654 if (*ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
5657 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, virtual);
5659 ret_var_set = cfg->ret_var_set;
5661 cfg->inlined_method = prev_inlined_method;
5662 cfg->real_offset = prev_real_offset;
5663 cfg->cbb_hash = prev_cbb_hash;
5664 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
5665 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
5666 cfg->cil_start = prev_cil_start;
5667 cfg->locals = prev_locals;
5668 cfg->args = prev_args;
5669 cfg->arg_types = prev_arg_types;
5670 cfg->current_method = prev_current_method;
5671 cfg->generic_context = prev_generic_context;
5672 cfg->ret_var_set = prev_ret_var_set;
5673 cfg->inline_depth --;
5675 if ((costs >= 0 && costs < 60) || inline_always) {
5676 if (cfg->verbose_level > 2)
5677 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
5679 cfg->stat_inlined_methods++;
5681 /* always add some code to avoid block split failures */
5682 MONO_INST_NEW (cfg, ins, OP_NOP);
5683 MONO_ADD_INS (prev_cbb, ins);
5685 prev_cbb->next_bb = sbblock;
5686 link_bblock (cfg, prev_cbb, sbblock);
5689 * Get rid of the begin and end bblocks if possible to aid local
5692 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
5694 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
5695 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
5697 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
5698 MonoBasicBlock *prev = ebblock->in_bb [0];
5699 mono_merge_basic_blocks (cfg, prev, ebblock);
5701 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
5702 mono_merge_basic_blocks (cfg, prev_cbb, prev);
5703 cfg->cbb = prev_cbb;
5707 * Its possible that the rvar is set in some prev bblock, but not in others.
5713 for (i = 0; i < ebblock->in_count; ++i) {
5714 bb = ebblock->in_bb [i];
5716 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
5719 emit_init_rvar (cfg, rvar, fsig->ret);
5729 * If the inlined method contains only a throw, then the ret var is not
5730 * set, so set it to a dummy value.
5733 emit_init_rvar (cfg, rvar, fsig->ret);
5735 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
5738 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5741 if (cfg->verbose_level > 2)
5742 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
5743 cfg->exception_type = MONO_EXCEPTION_NONE;
5744 mono_loader_clear_error ();
5746 /* This gets rid of the newly added bblocks */
5747 cfg->cbb = prev_cbb;
5749 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5754 * Some of these comments may well be out-of-date.
5755 * Design decisions: we do a single pass over the IL code (and we do bblock
5756 * splitting/merging in the few cases when it's required: a back jump to an IL
5757 * address that was not already seen as bblock starting point).
5758 * Code is validated as we go (full verification is still better left to metadata/verify.c).
5759 * Complex operations are decomposed in simpler ones right away. We need to let the
5760 * arch-specific code peek and poke inside this process somehow (except when the
5761 * optimizations can take advantage of the full semantic info of coarse opcodes).
5762 * All the opcodes of the form opcode.s are 'normalized' to opcode.
5763 * MonoInst->opcode initially is the IL opcode or some simplification of that
5764 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
5765 * opcode with value bigger than OP_LAST.
5766 * At this point the IR can be handed over to an interpreter, a dumb code generator
5767 * or to the optimizing code generator that will translate it to SSA form.
5769 * Profiling directed optimizations.
5770 * We may compile by default with few or no optimizations and instrument the code
5771 * or the user may indicate what methods to optimize the most either in a config file
5772 * or through repeated runs where the compiler applies offline the optimizations to
5773 * each method and then decides if it was worth it.
5776 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
5777 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
5778 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
5779 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
5780 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
5781 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
5782 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
5783 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; LOAD_ERROR;}
5785 /* offset from br.s -> br like opcodes */
5786 #define BIG_BRANCH_OFFSET 13
5789 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
5791 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
5793 return b == NULL || b == bb;
5797 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
5799 unsigned char *ip = start;
5800 unsigned char *target;
5803 MonoBasicBlock *bblock;
5804 const MonoOpcode *opcode;
5807 cli_addr = ip - start;
5808 i = mono_opcode_value ((const guint8 **)&ip, end);
5811 opcode = &mono_opcodes [i];
5812 switch (opcode->argument) {
5813 case MonoInlineNone:
5816 case MonoInlineString:
5817 case MonoInlineType:
5818 case MonoInlineField:
5819 case MonoInlineMethod:
5822 case MonoShortInlineR:
5829 case MonoShortInlineVar:
5830 case MonoShortInlineI:
5833 case MonoShortInlineBrTarget:
5834 target = start + cli_addr + 2 + (signed char)ip [1];
5835 GET_BBLOCK (cfg, bblock, target);
5838 GET_BBLOCK (cfg, bblock, ip);
5840 case MonoInlineBrTarget:
5841 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
5842 GET_BBLOCK (cfg, bblock, target);
5845 GET_BBLOCK (cfg, bblock, ip);
5847 case MonoInlineSwitch: {
5848 guint32 n = read32 (ip + 1);
5851 cli_addr += 5 + 4 * n;
5852 target = start + cli_addr;
5853 GET_BBLOCK (cfg, bblock, target);
5855 for (j = 0; j < n; ++j) {
5856 target = start + cli_addr + (gint32)read32 (ip);
5857 GET_BBLOCK (cfg, bblock, target);
5867 g_assert_not_reached ();
5870 if (i == CEE_THROW) {
5871 unsigned char *bb_start = ip - 1;
5873 /* Find the start of the bblock containing the throw */
5875 while ((bb_start >= start) && !bblock) {
5876 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
5880 bblock->out_of_line = 1;
5890 static inline MonoMethod *
5891 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5895 if (m->wrapper_type != MONO_WRAPPER_NONE) {
5896 method = mono_method_get_wrapper_data (m, token);
5898 method = mono_class_inflate_generic_method (method, context);
5900 method = mono_get_method_full (m->klass->image, token, klass, context);
5906 static inline MonoMethod *
5907 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5909 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
5911 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
5917 static inline MonoClass*
5918 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
5922 if (method->wrapper_type != MONO_WRAPPER_NONE) {
5923 klass = mono_method_get_wrapper_data (method, token);
5925 klass = mono_class_inflate_generic_class (klass, context);
5927 klass = mono_class_get_full (method->klass->image, token, context);
5930 mono_class_init (klass);
5934 static inline MonoMethodSignature*
5935 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
5937 MonoMethodSignature *fsig;
5939 if (method->wrapper_type != MONO_WRAPPER_NONE) {
5942 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
5944 fsig = mono_inflate_generic_signature (fsig, context, &error);
5946 g_assert (mono_error_ok (&error));
5949 fsig = mono_metadata_parse_signature (method->klass->image, token);
5955 * Returns TRUE if the JIT should abort inlining because "callee"
5956 * is influenced by security attributes.
5959 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
5963 if ((cfg->method != caller) && mono_security_method_has_declsec (callee)) {
5967 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
5968 if (result == MONO_JIT_SECURITY_OK)
5971 if (result == MONO_JIT_LINKDEMAND_ECMA) {
5972 /* Generate code to throw a SecurityException before the actual call/link */
5973 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5976 NEW_ICONST (cfg, args [0], 4);
5977 NEW_METHODCONST (cfg, args [1], caller);
5978 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
5979 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
5980 /* don't hide previous results */
5981 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
5982 cfg->exception_data = result;
5990 throw_exception (void)
5992 static MonoMethod *method = NULL;
5995 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5996 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
6003 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
6005 MonoMethod *thrower = throw_exception ();
6008 EMIT_NEW_PCONST (cfg, args [0], ex);
6009 mono_emit_method_call (cfg, thrower, args, NULL);
6013 * Return the original method is a wrapper is specified. We can only access
6014 * the custom attributes from the original method.
6017 get_original_method (MonoMethod *method)
6019 if (method->wrapper_type == MONO_WRAPPER_NONE)
6022 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
6023 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
6026 /* in other cases we need to find the original method */
6027 return mono_marshal_method_from_wrapper (method);
6031 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
6032 MonoBasicBlock *bblock, unsigned char *ip)
6034 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6035 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
6037 emit_throw_exception (cfg, ex);
6041 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
6042 MonoBasicBlock *bblock, unsigned char *ip)
6044 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6045 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
6047 emit_throw_exception (cfg, ex);
6051 * Check that the IL instructions at ip are the array initialization
6052 * sequence and return the pointer to the data and the size.
6055 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
6058 * newarr[System.Int32]
6060 * ldtoken field valuetype ...
6061 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
6063 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
6064 guint32 token = read32 (ip + 7);
6065 guint32 field_token = read32 (ip + 2);
6066 guint32 field_index = field_token & 0xffffff;
6068 const char *data_ptr;
6070 MonoMethod *cmethod;
6071 MonoClass *dummy_class;
6072 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
6078 *out_field_token = field_token;
6080 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
6083 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
6085 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
6086 case MONO_TYPE_BOOLEAN:
6090 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
6091 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
6092 case MONO_TYPE_CHAR:
6102 return NULL; /* stupid ARM FP swapped format */
6112 if (size > mono_type_size (field->type, &dummy_align))
6115 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
6116 if (!method->klass->image->dynamic) {
6117 field_index = read32 (ip + 2) & 0xffffff;
6118 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
6119 data_ptr = mono_image_rva_map (method->klass->image, rva);
6120 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
6121 /* for aot code we do the lookup on load */
6122 if (aot && data_ptr)
6123 return GUINT_TO_POINTER (rva);
6125 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
6127 data_ptr = mono_field_get_data (field);
6135 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
6137 char *method_fname = mono_method_full_name (method, TRUE);
6139 MonoMethodHeader *header = mono_method_get_header (method);
6141 if (header->code_size == 0)
6142 method_code = g_strdup ("method body is empty.");
6144 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
6145 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
6146 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
6147 g_free (method_fname);
6148 g_free (method_code);
6149 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
6153 set_exception_object (MonoCompile *cfg, MonoException *exception)
6155 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
6156 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
6157 cfg->exception_ptr = exception;
6161 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
6164 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
6165 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
6166 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
6167 /* Optimize reg-reg moves away */
6169 * Can't optimize other opcodes, since sp[0] might point to
6170 * the last ins of a decomposed opcode.
6172 sp [0]->dreg = (cfg)->locals [n]->dreg;
6174 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
6179 * ldloca inhibits many optimizations so try to get rid of it in common
6182 static inline unsigned char *
6183 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
6192 local = read16 (ip + 2);
6196 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
6197 gboolean skip = FALSE;
6199 /* From the INITOBJ case */
6200 token = read32 (ip + 2);
6201 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
6202 CHECK_TYPELOAD (klass);
6203 if (mini_type_is_reference (cfg, &klass->byval_arg)) {
6204 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
6205 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
6206 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
6219 is_exception_class (MonoClass *class)
6222 if (class == mono_defaults.exception_class)
6224 class = class->parent;
6230 * is_jit_optimizer_disabled:
6232 * Determine whenever M's assembly has a DebuggableAttribute with the
6233 * IsJITOptimizerDisabled flag set.
6236 is_jit_optimizer_disabled (MonoMethod *m)
6238 MonoAssembly *ass = m->klass->image->assembly;
6239 MonoCustomAttrInfo* attrs;
6240 static MonoClass *klass;
6242 gboolean val = FALSE;
6245 if (ass->jit_optimizer_disabled_inited)
6246 return ass->jit_optimizer_disabled;
6249 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
6252 ass->jit_optimizer_disabled = FALSE;
6253 mono_memory_barrier ();
6254 ass->jit_optimizer_disabled_inited = TRUE;
6258 attrs = mono_custom_attrs_from_assembly (ass);
6260 for (i = 0; i < attrs->num_attrs; ++i) {
6261 MonoCustomAttrEntry *attr = &attrs->attrs [i];
6264 MonoMethodSignature *sig;
6266 if (!attr->ctor || attr->ctor->klass != klass)
6268 /* Decode the attribute. See reflection.c */
6269 len = attr->data_size;
6270 p = (const char*)attr->data;
6271 g_assert (read16 (p) == 0x0001);
6274 // FIXME: Support named parameters
6275 sig = mono_method_signature (attr->ctor);
6276 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
6278 /* Two boolean arguments */
6282 mono_custom_attrs_free (attrs);
6285 ass->jit_optimizer_disabled = val;
6286 mono_memory_barrier ();
6287 ass->jit_optimizer_disabled_inited = TRUE;
6293 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig)
6295 gboolean supported_tail_call;
6298 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6299 supported_tail_call = MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
6301 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6304 for (i = 0; i < fsig->param_count; ++i) {
6305 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
6306 /* These can point to the current method's stack */
6307 supported_tail_call = FALSE;
6309 if (fsig->hasthis && cmethod->klass->valuetype)
6310 /* this might point to the current method's stack */
6311 supported_tail_call = FALSE;
6312 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
6313 supported_tail_call = FALSE;
6314 if (cfg->method->save_lmf)
6315 supported_tail_call = FALSE;
6316 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
6317 supported_tail_call = FALSE;
6319 /* Debugging support */
6321 if (supported_tail_call) {
6322 if (!mono_debug_count ())
6323 supported_tail_call = FALSE;
6327 return supported_tail_call;
6330 /* the JIT intercepts ldflda instructions to the tlsdata field in ThreadLocal<T> and redirects
6331 * it to the thread local value based on the tls_offset field. Every other kind of access to
6332 * the field causes an assert.
6335 is_magic_tls_access (MonoClassField *field)
6337 if (strcmp (field->name, "tlsdata"))
6339 if (strcmp (field->parent->name, "ThreadLocal`1"))
6341 return field->parent->image == mono_defaults.corlib;
6344 /* emits the code needed to access a managed tls var (like ThreadStatic)
6345 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
6346 * pointer for the current thread.
6347 * Returns the MonoInst* representing the address of the tls var.
6350 emit_managed_static_data_access (MonoCompile *cfg, MonoInst *thread_ins, int offset_reg)
6353 int static_data_reg, array_reg, dreg;
6354 int offset2_reg, idx_reg;
6355 // inlined access to the tls data
6356 // idx = (offset >> 24) - 1;
6357 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
6358 static_data_reg = alloc_ireg (cfg);
6359 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
6360 idx_reg = alloc_ireg (cfg);
6361 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
6362 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
6363 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
6364 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
6365 array_reg = alloc_ireg (cfg);
6366 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
6367 offset2_reg = alloc_ireg (cfg);
6368 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
6369 dreg = alloc_ireg (cfg);
6370 EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
6375 * redirect access to the tlsdata field to the tls var given by the tls_offset field.
6376 * this address is cached per-method in cached_tls_addr.
6379 create_magic_tls_access (MonoCompile *cfg, MonoClassField *tls_field, MonoInst **cached_tls_addr, MonoInst *thread_local)
6381 MonoInst *load, *addr, *temp, *store, *thread_ins;
6382 MonoClassField *offset_field;
6384 if (*cached_tls_addr) {
6385 EMIT_NEW_TEMPLOAD (cfg, addr, (*cached_tls_addr)->inst_c0);
6388 thread_ins = mono_get_thread_intrinsic (cfg);
6389 offset_field = mono_class_get_field_from_name (tls_field->parent, "tls_offset");
6391 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, offset_field->type, thread_local->dreg, offset_field->offset);
6393 MONO_ADD_INS (cfg->cbb, thread_ins);
6395 MonoMethod *thread_method;
6396 thread_method = mono_class_get_method_from_name (mono_get_thread_class(), "CurrentInternalThread_internal", 0);
6397 thread_ins = mono_emit_method_call (cfg, thread_method, NULL, NULL);
6399 addr = emit_managed_static_data_access (cfg, thread_ins, load->dreg);
6400 addr->klass = mono_class_from_mono_type (tls_field->type);
6401 addr->type = STACK_MP;
6402 *cached_tls_addr = temp = mono_compile_create_var (cfg, type_from_stack_type (addr), OP_LOCAL);
6403 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, addr);
6405 EMIT_NEW_TEMPLOAD (cfg, addr, temp->inst_c0);
6410 * mono_method_to_ir:
6412 * Translate the .net IL into linear IR.
6415 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
6416 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
6417 guint inline_offset, gboolean is_virtual_call)
6420 MonoInst *ins, **sp, **stack_start;
6421 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
6422 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
6423 MonoMethod *cmethod, *method_definition;
6424 MonoInst **arg_array;
6425 MonoMethodHeader *header;
6427 guint32 token, ins_flag;
6429 MonoClass *constrained_call = NULL;
6430 unsigned char *ip, *end, *target, *err_pos;
6431 static double r8_0 = 0.0;
6432 MonoMethodSignature *sig;
6433 MonoGenericContext *generic_context = NULL;
6434 MonoGenericContainer *generic_container = NULL;
6435 MonoType **param_types;
6436 int i, n, start_new_bblock, dreg;
6437 int num_calls = 0, inline_costs = 0;
6438 int breakpoint_id = 0;
6440 MonoBoolean security, pinvoke;
6441 MonoSecurityManager* secman = NULL;
6442 MonoDeclSecurityActions actions;
6443 GSList *class_inits = NULL;
6444 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
6446 gboolean init_locals, seq_points, skip_dead_blocks;
6447 gboolean disable_inline, sym_seq_points = FALSE;
6448 MonoInst *cached_tls_addr = NULL;
6449 MonoDebugMethodInfo *minfo;
6450 MonoBitSet *seq_point_locs = NULL;
6451 MonoBitSet *seq_point_set_locs = NULL;
6453 disable_inline = is_jit_optimizer_disabled (method);
6455 /* serialization and xdomain stuff may need access to private fields and methods */
6456 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
6457 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
6458 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
6459 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
6460 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
6461 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
6463 dont_verify |= mono_security_smcs_hack_enabled ();
6465 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
6466 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
6467 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
6468 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
6469 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
6471 image = method->klass->image;
6472 header = mono_method_get_header (method);
6474 MonoLoaderError *error;
6476 if ((error = mono_loader_get_last_error ())) {
6477 mono_cfg_set_exception (cfg, error->exception_type);
6479 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
6480 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
6482 goto exception_exit;
6484 generic_container = mono_method_get_generic_container (method);
6485 sig = mono_method_signature (method);
6486 num_args = sig->hasthis + sig->param_count;
6487 ip = (unsigned char*)header->code;
6488 cfg->cil_start = ip;
6489 end = ip + header->code_size;
6490 cfg->stat_cil_code_size += header->code_size;
6491 init_locals = header->init_locals;
6493 seq_points = cfg->gen_seq_points && cfg->method == method;
6494 #ifdef PLATFORM_ANDROID
6495 seq_points &= cfg->method->wrapper_type == MONO_WRAPPER_NONE;
6498 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
6499 /* We could hit a seq point before attaching to the JIT (#8338) */
6503 if (cfg->gen_seq_points && cfg->method == method) {
6504 minfo = mono_debug_lookup_method (method);
6506 int i, n_il_offsets;
6510 mono_debug_symfile_get_line_numbers_full (minfo, NULL, NULL, &n_il_offsets, &il_offsets, &line_numbers, NULL, NULL);
6511 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6512 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6513 sym_seq_points = TRUE;
6514 for (i = 0; i < n_il_offsets; ++i) {
6515 if (il_offsets [i] < header->code_size)
6516 mono_bitset_set_fast (seq_point_locs, il_offsets [i]);
6522 * Methods without init_locals set could cause asserts in various passes
6527 method_definition = method;
6528 while (method_definition->is_inflated) {
6529 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
6530 method_definition = imethod->declaring;
6533 /* SkipVerification is not allowed if core-clr is enabled */
6534 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
6536 dont_verify_stloc = TRUE;
6539 if (mono_debug_using_mono_debugger ())
6540 cfg->keep_cil_nops = TRUE;
6542 if (sig->is_inflated)
6543 generic_context = mono_method_get_context (method);
6544 else if (generic_container)
6545 generic_context = &generic_container->context;
6546 cfg->generic_context = generic_context;
6548 if (!cfg->generic_sharing_context)
6549 g_assert (!sig->has_type_parameters);
6551 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
6552 g_assert (method->is_inflated);
6553 g_assert (mono_method_get_context (method)->method_inst);
6555 if (method->is_inflated && mono_method_get_context (method)->method_inst)
6556 g_assert (sig->generic_param_count);
6558 if (cfg->method == method) {
6559 cfg->real_offset = 0;
6561 cfg->real_offset = inline_offset;
6564 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
6565 cfg->cil_offset_to_bb_len = header->code_size;
6567 cfg->current_method = method;
6569 if (cfg->verbose_level > 2)
6570 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
6572 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
6574 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
6575 for (n = 0; n < sig->param_count; ++n)
6576 param_types [n + sig->hasthis] = sig->params [n];
6577 cfg->arg_types = param_types;
6579 dont_inline = g_list_prepend (dont_inline, method);
6580 if (cfg->method == method) {
6582 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
6583 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
6586 NEW_BBLOCK (cfg, start_bblock);
6587 cfg->bb_entry = start_bblock;
6588 start_bblock->cil_code = NULL;
6589 start_bblock->cil_length = 0;
6590 #if defined(__native_client_codegen__)
6591 MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
6592 ins->dreg = alloc_dreg (cfg, STACK_I4);
6593 MONO_ADD_INS (start_bblock, ins);
6597 NEW_BBLOCK (cfg, end_bblock);
6598 cfg->bb_exit = end_bblock;
6599 end_bblock->cil_code = NULL;
6600 end_bblock->cil_length = 0;
6601 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
6602 g_assert (cfg->num_bblocks == 2);
6604 arg_array = cfg->args;
6606 if (header->num_clauses) {
6607 cfg->spvars = g_hash_table_new (NULL, NULL);
6608 cfg->exvars = g_hash_table_new (NULL, NULL);
6610 /* handle exception clauses */
6611 for (i = 0; i < header->num_clauses; ++i) {
6612 MonoBasicBlock *try_bb;
6613 MonoExceptionClause *clause = &header->clauses [i];
6614 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
6615 try_bb->real_offset = clause->try_offset;
6616 try_bb->try_start = TRUE;
6617 try_bb->region = ((i + 1) << 8) | clause->flags;
6618 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
6619 tblock->real_offset = clause->handler_offset;
6620 tblock->flags |= BB_EXCEPTION_HANDLER;
6622 link_bblock (cfg, try_bb, tblock);
6624 if (*(ip + clause->handler_offset) == CEE_POP)
6625 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
6627 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
6628 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
6629 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
6630 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
6631 MONO_ADD_INS (tblock, ins);
6633 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) {
6634 /* finally clauses already have a seq point */
6635 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
6636 MONO_ADD_INS (tblock, ins);
6639 /* todo: is a fault block unsafe to optimize? */
6640 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
6641 tblock->flags |= BB_EXCEPTION_UNSAFE;
6645 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
6647 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
6649 /* catch and filter blocks get the exception object on the stack */
6650 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
6651 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
6652 MonoInst *dummy_use;
6654 /* mostly like handle_stack_args (), but just sets the input args */
6655 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
6656 tblock->in_scount = 1;
6657 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
6658 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
6661 * Add a dummy use for the exvar so its liveness info will be
6665 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
6667 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
6668 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
6669 tblock->flags |= BB_EXCEPTION_HANDLER;
6670 tblock->real_offset = clause->data.filter_offset;
6671 tblock->in_scount = 1;
6672 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
6673 /* The filter block shares the exvar with the handler block */
6674 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
6675 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
6676 MONO_ADD_INS (tblock, ins);
6680 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
6681 clause->data.catch_class &&
6682 cfg->generic_sharing_context &&
6683 mono_class_check_context_used (clause->data.catch_class)) {
6685 * In shared generic code with catch
6686 * clauses containing type variables
6687 * the exception handling code has to
6688 * be able to get to the rgctx.
6689 * Therefore we have to make sure that
6690 * the vtable/mrgctx argument (for
6691 * static or generic methods) or the
6692 * "this" argument (for non-static
6693 * methods) are live.
6695 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
6696 mini_method_get_context (method)->method_inst ||
6697 method->klass->valuetype) {
6698 mono_get_vtable_var (cfg);
6700 MonoInst *dummy_use;
6702 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
6707 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
6708 cfg->cbb = start_bblock;
6709 cfg->args = arg_array;
6710 mono_save_args (cfg, sig, inline_args);
6713 /* FIRST CODE BLOCK */
6714 NEW_BBLOCK (cfg, bblock);
6715 bblock->cil_code = ip;
6719 ADD_BBLOCK (cfg, bblock);
6721 if (cfg->method == method) {
6722 breakpoint_id = mono_debugger_method_has_breakpoint (method);
6723 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
6724 MONO_INST_NEW (cfg, ins, OP_BREAK);
6725 MONO_ADD_INS (bblock, ins);
6729 if (mono_security_cas_enabled ())
6730 secman = mono_security_manager_get_methods ();
6732 security = (secman && mono_security_method_has_declsec (method));
6733 /* at this point having security doesn't mean we have any code to generate */
6734 if (security && (cfg->method == method)) {
6735 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
6736 * And we do not want to enter the next section (with allocation) if we
6737 * have nothing to generate */
6738 security = mono_declsec_get_demands (method, &actions);
6741 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
6742 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
6744 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
6745 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6746 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
6748 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
6749 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
6753 mono_custom_attrs_free (custom);
6756 custom = mono_custom_attrs_from_class (wrapped->klass);
6757 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
6761 mono_custom_attrs_free (custom);
6764 /* not a P/Invoke after all */
6769 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
6770 /* we use a separate basic block for the initialization code */
6771 NEW_BBLOCK (cfg, init_localsbb);
6772 cfg->bb_init = init_localsbb;
6773 init_localsbb->real_offset = cfg->real_offset;
6774 start_bblock->next_bb = init_localsbb;
6775 init_localsbb->next_bb = bblock;
6776 link_bblock (cfg, start_bblock, init_localsbb);
6777 link_bblock (cfg, init_localsbb, bblock);
6779 cfg->cbb = init_localsbb;
6781 start_bblock->next_bb = bblock;
6782 link_bblock (cfg, start_bblock, bblock);
6785 if (cfg->gsharedvt && cfg->method == method) {
6786 MonoGSharedVtMethodInfo *info;
6787 MonoInst *var, *locals_var;
6790 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
6791 info->method = cfg->method;
6793 info->entries = g_ptr_array_new ();
6794 cfg->gsharedvt_info = info;
6796 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6797 /* prevent it from being register allocated */
6798 //var->flags |= MONO_INST_INDIRECT;
6799 cfg->gsharedvt_info_var = var;
6801 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
6802 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
6804 /* Allocate locals */
6805 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6806 /* prevent it from being register allocated */
6807 //locals_var->flags |= MONO_INST_INDIRECT;
6808 cfg->gsharedvt_locals_var = locals_var;
6810 dreg = alloc_ireg (cfg);
6811 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
6813 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
6814 ins->dreg = locals_var->dreg;
6816 MONO_ADD_INS (cfg->cbb, ins);
6817 cfg->gsharedvt_locals_var_ins = ins;
6819 cfg->flags |= MONO_CFG_HAS_ALLOCA;
6822 ins->flags |= MONO_INST_INIT;
6826 /* at this point we know, if security is TRUE, that some code needs to be generated */
6827 if (security && (cfg->method == method)) {
6830 cfg->stat_cas_demand_generation++;
6832 if (actions.demand.blob) {
6833 /* Add code for SecurityAction.Demand */
6834 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
6835 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
6836 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
6837 mono_emit_method_call (cfg, secman->demand, args, NULL);
6839 if (actions.noncasdemand.blob) {
6840 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
6841 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
6842 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
6843 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
6844 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
6845 mono_emit_method_call (cfg, secman->demand, args, NULL);
6847 if (actions.demandchoice.blob) {
6848 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
6849 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
6850 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
6851 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
6852 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
6856 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
6858 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
6861 if (mono_security_core_clr_enabled ()) {
6862 /* check if this is native code, e.g. an icall or a p/invoke */
6863 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
6864 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
6866 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
6867 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
6869 /* if this ia a native call then it can only be JITted from platform code */
6870 if ((icall || pinvk) && method->klass && method->klass->image) {
6871 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
6872 MonoException *ex = icall ? mono_get_exception_security () :
6873 mono_get_exception_method_access ();
6874 emit_throw_exception (cfg, ex);
6881 if (header->code_size == 0)
6884 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
6889 if (cfg->method == method)
6890 mono_debug_init_method (cfg, bblock, breakpoint_id);
6892 for (n = 0; n < header->num_locals; ++n) {
6893 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
6898 /* We force the vtable variable here for all shared methods
6899 for the possibility that they might show up in a stack
6900 trace where their exact instantiation is needed. */
6901 if (cfg->generic_sharing_context && method == cfg->method) {
6902 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
6903 mini_method_get_context (method)->method_inst ||
6904 method->klass->valuetype) {
6905 mono_get_vtable_var (cfg);
6907 /* FIXME: Is there a better way to do this?
6908 We need the variable live for the duration
6909 of the whole method. */
6910 cfg->args [0]->flags |= MONO_INST_INDIRECT;
6914 /* add a check for this != NULL to inlined methods */
6915 if (is_virtual_call) {
6918 NEW_ARGLOAD (cfg, arg_ins, 0);
6919 MONO_ADD_INS (cfg->cbb, arg_ins);
6920 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
6923 skip_dead_blocks = !dont_verify;
6924 if (skip_dead_blocks) {
6925 original_bb = bb = mono_basic_block_split (method, &error);
6926 if (!mono_error_ok (&error)) {
6927 mono_error_cleanup (&error);
6933 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
6934 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
6937 start_new_bblock = 0;
6940 if (cfg->method == method)
6941 cfg->real_offset = ip - header->code;
6943 cfg->real_offset = inline_offset;
6948 if (start_new_bblock) {
6949 bblock->cil_length = ip - bblock->cil_code;
6950 if (start_new_bblock == 2) {
6951 g_assert (ip == tblock->cil_code);
6953 GET_BBLOCK (cfg, tblock, ip);
6955 bblock->next_bb = tblock;
6958 start_new_bblock = 0;
6959 for (i = 0; i < bblock->in_scount; ++i) {
6960 if (cfg->verbose_level > 3)
6961 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
6962 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
6966 g_slist_free (class_inits);
6969 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
6970 link_bblock (cfg, bblock, tblock);
6971 if (sp != stack_start) {
6972 handle_stack_args (cfg, stack_start, sp - stack_start);
6974 CHECK_UNVERIFIABLE (cfg);
6976 bblock->next_bb = tblock;
6979 for (i = 0; i < bblock->in_scount; ++i) {
6980 if (cfg->verbose_level > 3)
6981 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
6982 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
6985 g_slist_free (class_inits);
6990 if (skip_dead_blocks) {
6991 int ip_offset = ip - header->code;
6993 if (ip_offset == bb->end)
6997 int op_size = mono_opcode_size (ip, end);
6998 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
7000 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
7002 if (ip_offset + op_size == bb->end) {
7003 MONO_INST_NEW (cfg, ins, OP_NOP);
7004 MONO_ADD_INS (bblock, ins);
7005 start_new_bblock = 1;
7013 * Sequence points are points where the debugger can place a breakpoint.
7014 * Currently, we generate these automatically at points where the IL
7017 if (seq_points && ((sp == stack_start) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
7019 * Make methods interruptable at the beginning, and at the targets of
7020 * backward branches.
7021 * Also, do this at the start of every bblock in methods with clauses too,
7022 * to be able to handle instructions with inprecise control flow like
7024 * Backward branches are handled at the end of method-to-ir ().
7026 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
7028 /* Avoid sequence points on empty IL like .volatile */
7029 // FIXME: Enable this
7030 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
7031 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
7032 MONO_ADD_INS (cfg->cbb, ins);
7035 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
7038 bblock->real_offset = cfg->real_offset;
7040 if ((cfg->method == method) && cfg->coverage_info) {
7041 guint32 cil_offset = ip - header->code;
7042 cfg->coverage_info->data [cil_offset].cil_code = ip;
7044 /* TODO: Use an increment here */
7045 #if defined(TARGET_X86)
7046 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
7047 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
7049 MONO_ADD_INS (cfg->cbb, ins);
7051 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
7052 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
7056 if (cfg->verbose_level > 3)
7057 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
7061 if (seq_points && !sym_seq_points && sp != stack_start) {
7063 * The C# compiler uses these nops to notify the JIT that it should
7064 * insert seq points.
7066 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
7067 MONO_ADD_INS (cfg->cbb, ins);
7069 if (cfg->keep_cil_nops)
7070 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
7072 MONO_INST_NEW (cfg, ins, OP_NOP);
7074 MONO_ADD_INS (bblock, ins);
7077 if (should_insert_brekpoint (cfg->method)) {
7078 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
7080 MONO_INST_NEW (cfg, ins, OP_NOP);
7083 MONO_ADD_INS (bblock, ins);
7089 CHECK_STACK_OVF (1);
7090 n = (*ip)-CEE_LDARG_0;
7092 EMIT_NEW_ARGLOAD (cfg, ins, n);
7100 CHECK_STACK_OVF (1);
7101 n = (*ip)-CEE_LDLOC_0;
7103 EMIT_NEW_LOCLOAD (cfg, ins, n);
7112 n = (*ip)-CEE_STLOC_0;
7115 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
7117 emit_stloc_ir (cfg, sp, header, n);
7124 CHECK_STACK_OVF (1);
7127 EMIT_NEW_ARGLOAD (cfg, ins, n);
7133 CHECK_STACK_OVF (1);
7136 NEW_ARGLOADA (cfg, ins, n);
7137 MONO_ADD_INS (cfg->cbb, ins);
7147 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
7149 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
7154 CHECK_STACK_OVF (1);
7157 EMIT_NEW_LOCLOAD (cfg, ins, n);
7161 case CEE_LDLOCA_S: {
7162 unsigned char *tmp_ip;
7164 CHECK_STACK_OVF (1);
7165 CHECK_LOCAL (ip [1]);
7167 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
7173 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
7182 CHECK_LOCAL (ip [1]);
7183 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
7185 emit_stloc_ir (cfg, sp, header, ip [1]);
7190 CHECK_STACK_OVF (1);
7191 EMIT_NEW_PCONST (cfg, ins, NULL);
7192 ins->type = STACK_OBJ;
7197 CHECK_STACK_OVF (1);
7198 EMIT_NEW_ICONST (cfg, ins, -1);
7211 CHECK_STACK_OVF (1);
7212 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
7218 CHECK_STACK_OVF (1);
7220 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
7226 CHECK_STACK_OVF (1);
7227 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
7233 CHECK_STACK_OVF (1);
7234 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7235 ins->type = STACK_I8;
7236 ins->dreg = alloc_dreg (cfg, STACK_I8);
7238 ins->inst_l = (gint64)read64 (ip);
7239 MONO_ADD_INS (bblock, ins);
7245 gboolean use_aotconst = FALSE;
7247 #ifdef TARGET_POWERPC
7248 /* FIXME: Clean this up */
7249 if (cfg->compile_aot)
7250 use_aotconst = TRUE;
7253 /* FIXME: we should really allocate this only late in the compilation process */
7254 f = mono_domain_alloc (cfg->domain, sizeof (float));
7256 CHECK_STACK_OVF (1);
7262 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
7264 dreg = alloc_freg (cfg);
7265 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
7266 ins->type = STACK_R8;
7268 MONO_INST_NEW (cfg, ins, OP_R4CONST);
7269 ins->type = STACK_R8;
7270 ins->dreg = alloc_dreg (cfg, STACK_R8);
7272 MONO_ADD_INS (bblock, ins);
7282 gboolean use_aotconst = FALSE;
7284 #ifdef TARGET_POWERPC
7285 /* FIXME: Clean this up */
7286 if (cfg->compile_aot)
7287 use_aotconst = TRUE;
7290 /* FIXME: we should really allocate this only late in the compilation process */
7291 d = mono_domain_alloc (cfg->domain, sizeof (double));
7293 CHECK_STACK_OVF (1);
7299 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
7301 dreg = alloc_freg (cfg);
7302 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
7303 ins->type = STACK_R8;
7305 MONO_INST_NEW (cfg, ins, OP_R8CONST);
7306 ins->type = STACK_R8;
7307 ins->dreg = alloc_dreg (cfg, STACK_R8);
7309 MONO_ADD_INS (bblock, ins);
7318 MonoInst *temp, *store;
7320 CHECK_STACK_OVF (1);
7324 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
7325 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
7327 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
7330 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
7343 if (sp [0]->type == STACK_R8)
7344 /* we need to pop the value from the x86 FP stack */
7345 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
7351 INLINE_FAILURE ("jmp");
7352 GSHAREDVT_FAILURE (*ip);
7355 if (stack_start != sp)
7357 token = read32 (ip + 1);
7358 /* FIXME: check the signature matches */
7359 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7361 if (!cmethod || mono_loader_get_last_error ())
7364 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
7365 GENERIC_SHARING_FAILURE (CEE_JMP);
7367 if (mono_security_cas_enabled ())
7368 CHECK_CFG_EXCEPTION;
7370 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
7372 MonoMethodSignature *fsig = mono_method_signature (cmethod);
7375 /* Handle tail calls similarly to calls */
7376 n = fsig->param_count + fsig->hasthis;
7378 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
7379 call->method = cmethod;
7380 call->tail_call = TRUE;
7381 call->signature = mono_method_signature (cmethod);
7382 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
7383 call->inst.inst_p0 = cmethod;
7384 for (i = 0; i < n; ++i)
7385 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
7387 mono_arch_emit_call (cfg, call);
7388 MONO_ADD_INS (bblock, (MonoInst*)call);
7391 for (i = 0; i < num_args; ++i)
7392 /* Prevent arguments from being optimized away */
7393 arg_array [i]->flags |= MONO_INST_VOLATILE;
7395 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
7396 ins = (MonoInst*)call;
7397 ins->inst_p0 = cmethod;
7398 MONO_ADD_INS (bblock, ins);
7402 start_new_bblock = 1;
7407 case CEE_CALLVIRT: {
7408 MonoInst *addr = NULL;
7409 MonoMethodSignature *fsig = NULL;
7411 int virtual = *ip == CEE_CALLVIRT;
7412 int calli = *ip == CEE_CALLI;
7413 gboolean pass_imt_from_rgctx = FALSE;
7414 MonoInst *imt_arg = NULL;
7415 MonoInst *keep_this_alive = NULL;
7416 gboolean pass_vtable = FALSE;
7417 gboolean pass_mrgctx = FALSE;
7418 MonoInst *vtable_arg = NULL;
7419 gboolean check_this = FALSE;
7420 gboolean supported_tail_call = FALSE;
7421 gboolean tail_call = FALSE;
7422 gboolean need_seq_point = FALSE;
7423 guint32 call_opcode = *ip;
7424 gboolean emit_widen = TRUE;
7425 gboolean push_res = TRUE;
7426 gboolean skip_ret = FALSE;
7427 gboolean delegate_invoke = FALSE;
7430 token = read32 (ip + 1);
7435 //GSHAREDVT_FAILURE (*ip);
7440 fsig = mini_get_signature (method, token, generic_context);
7441 n = fsig->param_count + fsig->hasthis;
7443 if (method->dynamic && fsig->pinvoke) {
7447 * This is a call through a function pointer using a pinvoke
7448 * signature. Have to create a wrapper and call that instead.
7449 * FIXME: This is very slow, need to create a wrapper at JIT time
7450 * instead based on the signature.
7452 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
7453 EMIT_NEW_PCONST (cfg, args [1], fsig);
7455 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
7458 MonoMethod *cil_method;
7460 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7461 cil_method = cmethod;
7463 if (constrained_call) {
7464 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7465 if (cfg->verbose_level > 2)
7466 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_call));
7467 if (!((constrained_call->byval_arg.type == MONO_TYPE_VAR ||
7468 constrained_call->byval_arg.type == MONO_TYPE_MVAR) &&
7469 cfg->generic_sharing_context)) {
7470 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_call, generic_context);
7473 if (cfg->verbose_level > 2)
7474 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_call));
7476 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
7478 * This is needed since get_method_constrained can't find
7479 * the method in klass representing a type var.
7480 * The type var is guaranteed to be a reference type in this
7483 if (!mini_is_gsharedvt_klass (cfg, constrained_call))
7484 g_assert (!cmethod->klass->valuetype);
7486 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
7491 if (!cmethod || mono_loader_get_last_error ())
7493 if (!dont_verify && !cfg->skip_visibility) {
7494 MonoMethod *target_method = cil_method;
7495 if (method->is_inflated) {
7496 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
7498 if (!mono_method_can_access_method (method_definition, target_method) &&
7499 !mono_method_can_access_method (method, cil_method))
7500 METHOD_ACCESS_FAILURE;
7503 if (mono_security_core_clr_enabled ())
7504 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
7506 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
7507 /* MS.NET seems to silently convert this to a callvirt */
7512 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
7513 * converts to a callvirt.
7515 * tests/bug-515884.il is an example of this behavior
7517 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
7518 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
7519 if (!virtual && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
7523 if (!cmethod->klass->inited)
7524 if (!mono_class_init (cmethod->klass))
7525 TYPE_LOAD_ERROR (cmethod->klass);
7527 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
7528 mini_class_is_system_array (cmethod->klass)) {
7529 array_rank = cmethod->klass->rank;
7530 fsig = mono_method_signature (cmethod);
7532 fsig = mono_method_signature (cmethod);
7537 if (fsig->pinvoke) {
7538 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
7539 check_for_pending_exc, FALSE);
7540 fsig = mono_method_signature (wrapper);
7541 } else if (constrained_call) {
7542 fsig = mono_method_signature (cmethod);
7544 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
7548 mono_save_token_info (cfg, image, token, cil_method);
7550 if (!MONO_TYPE_IS_VOID (fsig->ret) && !sym_seq_points) {
7552 * Need to emit an implicit seq point after every non-void call so single stepping through nested calls like
7553 * foo (bar (), baz ())
7554 * works correctly. MS does this also:
7555 * http://stackoverflow.com/questions/6937198/making-your-net-language-step-correctly-in-the-debugger
7556 * The problem with this approach is that the debugger will stop after all calls returning a value,
7557 * even for simple cases, like:
7560 /* Special case a few common successor opcodes */
7561 if (!(ip + 5 < end && ip [5] == CEE_POP))
7562 need_seq_point = TRUE;
7565 n = fsig->param_count + fsig->hasthis;
7567 /* Don't support calls made using type arguments for now */
7569 if (cfg->gsharedvt) {
7570 if (mini_is_gsharedvt_signature (cfg, fsig))
7571 GSHAREDVT_FAILURE (*ip);
7575 if (mono_security_cas_enabled ()) {
7576 if (check_linkdemand (cfg, method, cmethod))
7577 INLINE_FAILURE ("linkdemand");
7578 CHECK_CFG_EXCEPTION;
7581 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
7582 g_assert_not_reached ();
7585 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
7588 if (!cfg->generic_sharing_context && cmethod)
7589 g_assert (!mono_method_check_context_used (cmethod));
7593 //g_assert (!virtual || fsig->hasthis);
7597 if (constrained_call) {
7598 if (mini_is_gsharedvt_klass (cfg, constrained_call)) {
7600 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
7602 /* Special case Object methods as they are easy to implement */
7603 if (cmethod->klass == mono_defaults.object_class) {
7604 MonoInst *args [16];
7607 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
7608 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_call), constrained_call, MONO_RGCTX_INFO_KLASS);
7610 if (!strcmp (cmethod->name, "ToString")) {
7611 ins = mono_emit_jit_icall (cfg, mono_object_tostring_gsharedvt, args);
7612 } else if (!strcmp (cmethod->name, "Equals")) {
7614 ins = mono_emit_jit_icall (cfg, mono_object_equals_gsharedvt, args);
7615 } else if (!strcmp (cmethod->name, "GetHashCode")) {
7616 ins = mono_emit_jit_icall (cfg, mono_object_gethashcode_gsharedvt, args);
7618 GSHAREDVT_FAILURE (*ip);
7621 } else if (constrained_call->valuetype && cmethod->klass->valuetype) {
7622 /* The 'Own method' case below */
7624 GSHAREDVT_FAILURE (*ip);
7628 * We have the `constrained.' prefix opcode.
7630 if (constrained_call->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
7632 * The type parameter is instantiated as a valuetype,
7633 * but that type doesn't override the method we're
7634 * calling, so we need to box `this'.
7636 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
7637 ins->klass = constrained_call;
7638 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
7639 CHECK_CFG_EXCEPTION;
7640 } else if (!constrained_call->valuetype) {
7641 int dreg = alloc_ireg_ref (cfg);
7644 * The type parameter is instantiated as a reference
7645 * type. We have a managed pointer on the stack, so
7646 * we need to dereference it here.
7648 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
7649 ins->type = STACK_OBJ;
7652 if (cmethod->klass->valuetype) {
7655 /* Interface method */
7658 mono_class_setup_vtable (constrained_call);
7659 CHECK_TYPELOAD (constrained_call);
7660 ioffset = mono_class_interface_offset (constrained_call, cmethod->klass);
7662 TYPE_LOAD_ERROR (constrained_call);
7663 slot = mono_method_get_vtable_slot (cmethod);
7665 TYPE_LOAD_ERROR (cmethod->klass);
7666 cmethod = constrained_call->vtable [ioffset + slot];
7668 if (cmethod->klass == mono_defaults.enum_class) {
7669 /* Enum implements some interfaces, so treat this as the first case */
7670 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
7671 ins->klass = constrained_call;
7672 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
7673 CHECK_CFG_EXCEPTION;
7678 constrained_call = NULL;
7681 if (!calli && check_call_signature (cfg, fsig, sp))
7684 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
7685 if (cmethod && (cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
7686 delegate_invoke = TRUE;
7689 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
7691 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7692 type_to_eval_stack_type ((cfg), fsig->ret, ins);
7700 * If the callee is a shared method, then its static cctor
7701 * might not get called after the call was patched.
7703 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
7704 emit_generic_class_init (cfg, cmethod->klass);
7705 CHECK_TYPELOAD (cmethod->klass);
7709 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
7711 if (cfg->generic_sharing_context && cmethod) {
7712 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
7714 context_used = mini_method_check_context_used (cfg, cmethod);
7716 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7717 /* Generic method interface
7718 calls are resolved via a
7719 helper function and don't
7721 if (!cmethod_context || !cmethod_context->method_inst)
7722 pass_imt_from_rgctx = TRUE;
7726 * If a shared method calls another
7727 * shared method then the caller must
7728 * have a generic sharing context
7729 * because the magic trampoline
7730 * requires it. FIXME: We shouldn't
7731 * have to force the vtable/mrgctx
7732 * variable here. Instead there
7733 * should be a flag in the cfg to
7734 * request a generic sharing context.
7737 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
7738 mono_get_vtable_var (cfg);
7743 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7745 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7747 CHECK_TYPELOAD (cmethod->klass);
7748 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7753 g_assert (!vtable_arg);
7755 if (!cfg->compile_aot) {
7757 * emit_get_rgctx_method () calls mono_class_vtable () so check
7758 * for type load errors before.
7760 mono_class_setup_vtable (cmethod->klass);
7761 CHECK_TYPELOAD (cmethod->klass);
7764 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7766 /* !marshalbyref is needed to properly handle generic methods + remoting */
7767 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
7768 MONO_METHOD_IS_FINAL (cmethod)) &&
7769 !mono_class_is_marshalbyref (cmethod->klass)) {
7776 if (pass_imt_from_rgctx) {
7777 g_assert (!pass_vtable);
7780 imt_arg = emit_get_rgctx_method (cfg, context_used,
7781 cmethod, MONO_RGCTX_INFO_METHOD);
7785 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
7787 /* Calling virtual generic methods */
7788 if (cmethod && virtual &&
7789 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
7790 !(MONO_METHOD_IS_FINAL (cmethod) &&
7791 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
7792 fsig->generic_param_count &&
7793 !(cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))) {
7794 MonoInst *this_temp, *this_arg_temp, *store;
7795 MonoInst *iargs [4];
7796 gboolean use_imt = FALSE;
7798 g_assert (fsig->is_inflated);
7800 /* Prevent inlining of methods that contain indirect calls */
7801 INLINE_FAILURE ("virtual generic call");
7803 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
7804 GSHAREDVT_FAILURE (*ip);
7806 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
7807 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt)
7812 g_assert (!imt_arg);
7814 g_assert (cmethod->is_inflated);
7815 imt_arg = emit_get_rgctx_method (cfg, context_used,
7816 cmethod, MONO_RGCTX_INFO_METHOD);
7817 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
7819 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
7820 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
7821 MONO_ADD_INS (bblock, store);
7823 /* FIXME: This should be a managed pointer */
7824 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7826 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
7827 iargs [1] = emit_get_rgctx_method (cfg, context_used,
7828 cmethod, MONO_RGCTX_INFO_METHOD);
7829 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
7830 addr = mono_emit_jit_icall (cfg,
7831 mono_helper_compile_generic_method, iargs);
7833 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
7835 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
7842 * Implement a workaround for the inherent races involved in locking:
7848 * If a thread abort happens between the call to Monitor.Enter () and the start of the
7849 * try block, the Exit () won't be executed, see:
7850 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
7851 * To work around this, we extend such try blocks to include the last x bytes
7852 * of the Monitor.Enter () call.
7854 if (cmethod && cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
7855 MonoBasicBlock *tbb;
7857 GET_BBLOCK (cfg, tbb, ip + 5);
7859 * Only extend try blocks with a finally, to avoid catching exceptions thrown
7860 * from Monitor.Enter like ArgumentNullException.
7862 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
7863 /* Mark this bblock as needing to be extended */
7864 tbb->extend_try_block = TRUE;
7868 /* Conversion to a JIT intrinsic */
7869 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
7871 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7872 type_to_eval_stack_type ((cfg), fsig->ret, ins);
7879 if (cmethod && (cfg->opt & MONO_OPT_INLINE) &&
7880 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
7881 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
7882 !g_list_find (dont_inline, cmethod)) {
7884 gboolean always = FALSE;
7886 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
7887 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
7888 /* Prevent inlining of methods that call wrappers */
7889 INLINE_FAILURE ("wrapper call");
7890 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
7894 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, always);
7896 cfg->real_offset += 5;
7899 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7900 /* *sp is already set by inline_method */
7905 inline_costs += costs;
7911 /* Tail recursion elimination */
7912 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
7913 gboolean has_vtargs = FALSE;
7916 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
7917 INLINE_FAILURE ("tail call");
7919 /* keep it simple */
7920 for (i = fsig->param_count - 1; i >= 0; i--) {
7921 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
7926 for (i = 0; i < n; ++i)
7927 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
7928 MONO_INST_NEW (cfg, ins, OP_BR);
7929 MONO_ADD_INS (bblock, ins);
7930 tblock = start_bblock->out_bb [0];
7931 link_bblock (cfg, bblock, tblock);
7932 ins->inst_target_bb = tblock;
7933 start_new_bblock = 1;
7935 /* skip the CEE_RET, too */
7936 if (ip_in_bb (cfg, bblock, ip + 5))
7943 inline_costs += 10 * num_calls++;
7946 * Making generic calls out of gsharedvt methods.
7948 if (cmethod && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
7949 MonoRgctxInfoType info_type;
7952 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
7953 //GSHAREDVT_FAILURE (*ip);
7954 // disable for possible remoting calls
7955 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
7956 GSHAREDVT_FAILURE (*ip);
7957 if (fsig->generic_param_count) {
7958 /* virtual generic call */
7959 g_assert (mono_use_imt);
7960 g_assert (!imt_arg);
7961 /* Same as the virtual generic case above */
7962 imt_arg = emit_get_rgctx_method (cfg, context_used,
7963 cmethod, MONO_RGCTX_INFO_METHOD);
7964 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
7969 if (cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)
7970 /* test_0_multi_dim_arrays () in gshared.cs */
7971 GSHAREDVT_FAILURE (*ip);
7973 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
7974 keep_this_alive = sp [0];
7976 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
7977 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
7979 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
7980 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
7982 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
7984 } else if (calli && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
7986 * We pass the address to the gsharedvt trampoline in the rgctx reg
7988 MonoInst *callee = addr;
7990 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
7992 GSHAREDVT_FAILURE (*ip);
7994 addr = emit_get_rgctx_sig (cfg, context_used,
7995 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
7996 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8000 /* Generic sharing */
8001 /* FIXME: only do this for generic methods if
8002 they are not shared! */
8003 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
8004 (!mono_method_is_generic_sharable (cmethod, TRUE) ||
8005 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
8006 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
8007 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
8008 INLINE_FAILURE ("gshared");
8010 g_assert (cfg->generic_sharing_context && cmethod);
8014 * We are compiling a call to a
8015 * generic method from shared code,
8016 * which means that we have to look up
8017 * the method in the rgctx and do an
8021 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8023 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8024 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8028 /* Indirect calls */
8030 if (call_opcode == CEE_CALL)
8031 g_assert (context_used);
8032 else if (call_opcode == CEE_CALLI)
8033 g_assert (!vtable_arg);
8035 /* FIXME: what the hell is this??? */
8036 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
8037 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
8039 /* Prevent inlining of methods with indirect calls */
8040 INLINE_FAILURE ("indirect call");
8042 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8047 * Instead of emitting an indirect call, emit a direct call
8048 * with the contents of the aotconst as the patch info.
8050 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8051 info_type = addr->inst_c1;
8052 info_data = addr->inst_p0;
8054 info_type = addr->inst_right->inst_c1;
8055 info_data = addr->inst_right->inst_left;
8058 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8059 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8064 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8072 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
8073 MonoInst *val = sp [fsig->param_count];
8075 if (val->type == STACK_OBJ) {
8076 MonoInst *iargs [2];
8081 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
8084 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
8085 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
8086 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
8087 emit_write_barrier (cfg, addr, val, 0);
8088 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
8089 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
8091 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
8092 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
8093 if (!cmethod->klass->element_class->valuetype && !readonly)
8094 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
8095 CHECK_TYPELOAD (cmethod->klass);
8098 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
8101 g_assert_not_reached ();
8108 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
8112 /* Tail prefix / tail call optimization */
8114 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
8115 /* FIXME: runtime generic context pointer for jumps? */
8116 /* FIXME: handle this for generic sharing eventually */
8117 if (cmethod && (ins_flag & MONO_INST_TAILCALL) &&
8118 !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig))
8119 supported_tail_call = TRUE;
8120 if (supported_tail_call) {
8121 if (call_opcode != CEE_CALL)
8122 supported_tail_call = FALSE;
8125 if (supported_tail_call) {
8128 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8129 INLINE_FAILURE ("tail call");
8131 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
8133 if (ARCH_USE_OP_TAIL_CALL) {
8134 /* Handle tail calls similarly to normal calls */
8137 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8138 call->tail_call = TRUE;
8139 call->method = cmethod;
8140 call->signature = mono_method_signature (cmethod);
8143 * We implement tail calls by storing the actual arguments into the
8144 * argument variables, then emitting a CEE_JMP.
8146 for (i = 0; i < n; ++i) {
8147 /* Prevent argument from being register allocated */
8148 arg_array [i]->flags |= MONO_INST_VOLATILE;
8149 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8151 ins = (MonoInst*)call;
8152 ins->inst_p0 = cmethod;
8153 ins->inst_p1 = arg_array [0];
8154 MONO_ADD_INS (bblock, ins);
8155 link_bblock (cfg, bblock, end_bblock);
8156 start_new_bblock = 1;
8158 // FIXME: Eliminate unreachable epilogs
8161 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
8162 * only reachable from this call.
8164 GET_BBLOCK (cfg, tblock, ip + 5);
8165 if (tblock == bblock || tblock->in_count == 0)
8174 * Synchronized wrappers.
8175 * Its hard to determine where to replace a method with its synchronized
8176 * wrapper without causing an infinite recursion. The current solution is
8177 * to add the synchronized wrapper in the trampolines, and to
8178 * change the called method to a dummy wrapper, and resolve that wrapper
8179 * to the real method in mono_jit_compile_method ().
8181 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8182 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
8183 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
8184 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
8188 INLINE_FAILURE ("call");
8189 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual ? sp [0] : NULL,
8190 imt_arg, vtable_arg);
8193 link_bblock (cfg, bblock, end_bblock);
8194 start_new_bblock = 1;
8196 // FIXME: Eliminate unreachable epilogs
8199 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
8200 * only reachable from this call.
8202 GET_BBLOCK (cfg, tblock, ip + 5);
8203 if (tblock == bblock || tblock->in_count == 0)
8210 /* End of call, INS should contain the result of the call, if any */
8212 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
8215 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8220 if (keep_this_alive) {
8221 MonoInst *dummy_use;
8223 /* See mono_emit_method_call_full () */
8224 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
8227 CHECK_CFG_EXCEPTION;
8231 g_assert (*ip == CEE_RET);
8235 constrained_call = NULL;
8237 emit_seq_point (cfg, method, ip, FALSE);
8241 if (cfg->method != method) {
8242 /* return from inlined method */
8244 * If in_count == 0, that means the ret is unreachable due to
8245 * being preceeded by a throw. In that case, inline_method () will
8246 * handle setting the return value
8247 * (test case: test_0_inline_throw ()).
8249 if (return_var && cfg->cbb->in_count) {
8250 MonoType *ret_type = mono_method_signature (method)->ret;
8256 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
8259 //g_assert (returnvar != -1);
8260 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
8261 cfg->ret_var_set = TRUE;
8265 MonoType *ret_type = mono_method_signature (method)->ret;
8267 if (seq_points && !sym_seq_points) {
8269 * Place a seq point here too even through the IL stack is not
8270 * empty, so a step over on
8273 * will work correctly.
8275 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
8276 MONO_ADD_INS (cfg->cbb, ins);
8279 g_assert (!return_var);
8283 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
8286 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
8289 if (!cfg->vret_addr) {
8292 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
8294 EMIT_NEW_RETLOADA (cfg, ret_addr);
8296 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
8297 ins->klass = mono_class_from_mono_type (ret_type);
8300 #ifdef MONO_ARCH_SOFT_FLOAT
8301 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
8302 MonoInst *iargs [1];
8306 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
8307 mono_arch_emit_setret (cfg, method, conv);
8309 mono_arch_emit_setret (cfg, method, *sp);
8312 mono_arch_emit_setret (cfg, method, *sp);
8317 if (sp != stack_start)
8319 MONO_INST_NEW (cfg, ins, OP_BR);
8321 ins->inst_target_bb = end_bblock;
8322 MONO_ADD_INS (bblock, ins);
8323 link_bblock (cfg, bblock, end_bblock);
8324 start_new_bblock = 1;
8328 MONO_INST_NEW (cfg, ins, OP_BR);
8330 target = ip + 1 + (signed char)(*ip);
8332 GET_BBLOCK (cfg, tblock, target);
8333 link_bblock (cfg, bblock, tblock);
8334 ins->inst_target_bb = tblock;
8335 if (sp != stack_start) {
8336 handle_stack_args (cfg, stack_start, sp - stack_start);
8338 CHECK_UNVERIFIABLE (cfg);
8340 MONO_ADD_INS (bblock, ins);
8341 start_new_bblock = 1;
8342 inline_costs += BRANCH_COST;
8356 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
8358 target = ip + 1 + *(signed char*)ip;
8364 inline_costs += BRANCH_COST;
8368 MONO_INST_NEW (cfg, ins, OP_BR);
8371 target = ip + 4 + (gint32)read32(ip);
8373 GET_BBLOCK (cfg, tblock, target);
8374 link_bblock (cfg, bblock, tblock);
8375 ins->inst_target_bb = tblock;
8376 if (sp != stack_start) {
8377 handle_stack_args (cfg, stack_start, sp - stack_start);
8379 CHECK_UNVERIFIABLE (cfg);
8382 MONO_ADD_INS (bblock, ins);
8384 start_new_bblock = 1;
8385 inline_costs += BRANCH_COST;
8392 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
8393 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
8394 guint32 opsize = is_short ? 1 : 4;
8396 CHECK_OPSIZE (opsize);
8398 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
8401 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
8406 GET_BBLOCK (cfg, tblock, target);
8407 link_bblock (cfg, bblock, tblock);
8408 GET_BBLOCK (cfg, tblock, ip);
8409 link_bblock (cfg, bblock, tblock);
8411 if (sp != stack_start) {
8412 handle_stack_args (cfg, stack_start, sp - stack_start);
8413 CHECK_UNVERIFIABLE (cfg);
8416 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
8417 cmp->sreg1 = sp [0]->dreg;
8418 type_from_op (cmp, sp [0], NULL);
8421 #if SIZEOF_REGISTER == 4
8422 if (cmp->opcode == OP_LCOMPARE_IMM) {
8423 /* Convert it to OP_LCOMPARE */
8424 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8425 ins->type = STACK_I8;
8426 ins->dreg = alloc_dreg (cfg, STACK_I8);
8428 MONO_ADD_INS (bblock, ins);
8429 cmp->opcode = OP_LCOMPARE;
8430 cmp->sreg2 = ins->dreg;
8433 MONO_ADD_INS (bblock, cmp);
8435 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
8436 type_from_op (ins, sp [0], NULL);
8437 MONO_ADD_INS (bblock, ins);
8438 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
8439 GET_BBLOCK (cfg, tblock, target);
8440 ins->inst_true_bb = tblock;
8441 GET_BBLOCK (cfg, tblock, ip);
8442 ins->inst_false_bb = tblock;
8443 start_new_bblock = 2;
8446 inline_costs += BRANCH_COST;
8461 MONO_INST_NEW (cfg, ins, *ip);
8463 target = ip + 4 + (gint32)read32(ip);
8469 inline_costs += BRANCH_COST;
8473 MonoBasicBlock **targets;
8474 MonoBasicBlock *default_bblock;
8475 MonoJumpInfoBBTable *table;
8476 int offset_reg = alloc_preg (cfg);
8477 int target_reg = alloc_preg (cfg);
8478 int table_reg = alloc_preg (cfg);
8479 int sum_reg = alloc_preg (cfg);
8480 gboolean use_op_switch;
8484 n = read32 (ip + 1);
8487 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
8491 CHECK_OPSIZE (n * sizeof (guint32));
8492 target = ip + n * sizeof (guint32);
8494 GET_BBLOCK (cfg, default_bblock, target);
8495 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
8497 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
8498 for (i = 0; i < n; ++i) {
8499 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
8500 targets [i] = tblock;
8501 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
8505 if (sp != stack_start) {
8507 * Link the current bb with the targets as well, so handle_stack_args
8508 * will set their in_stack correctly.
8510 link_bblock (cfg, bblock, default_bblock);
8511 for (i = 0; i < n; ++i)
8512 link_bblock (cfg, bblock, targets [i]);
8514 handle_stack_args (cfg, stack_start, sp - stack_start);
8516 CHECK_UNVERIFIABLE (cfg);
8519 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
8520 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
8523 for (i = 0; i < n; ++i)
8524 link_bblock (cfg, bblock, targets [i]);
8526 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
8527 table->table = targets;
8528 table->table_size = n;
8530 use_op_switch = FALSE;
8532 /* ARM implements SWITCH statements differently */
8533 /* FIXME: Make it use the generic implementation */
8534 if (!cfg->compile_aot)
8535 use_op_switch = TRUE;
8538 if (COMPILE_LLVM (cfg))
8539 use_op_switch = TRUE;
8541 cfg->cbb->has_jump_table = 1;
8543 if (use_op_switch) {
8544 MONO_INST_NEW (cfg, ins, OP_SWITCH);
8545 ins->sreg1 = src1->dreg;
8546 ins->inst_p0 = table;
8547 ins->inst_many_bb = targets;
8548 ins->klass = GUINT_TO_POINTER (n);
8549 MONO_ADD_INS (cfg->cbb, ins);
8551 if (sizeof (gpointer) == 8)
8552 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
8554 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
8556 #if SIZEOF_REGISTER == 8
8557 /* The upper word might not be zero, and we add it to a 64 bit address later */
8558 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
8561 if (cfg->compile_aot) {
8562 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
8564 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
8565 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
8566 ins->inst_p0 = table;
8567 ins->dreg = table_reg;
8568 MONO_ADD_INS (cfg->cbb, ins);
8571 /* FIXME: Use load_memindex */
8572 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
8573 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
8574 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
8576 start_new_bblock = 1;
8577 inline_costs += (BRANCH_COST * 2);
8597 dreg = alloc_freg (cfg);
8600 dreg = alloc_lreg (cfg);
8603 dreg = alloc_ireg_ref (cfg);
8606 dreg = alloc_preg (cfg);
8609 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
8610 ins->type = ldind_type [*ip - CEE_LDIND_I1];
8611 ins->flags |= ins_flag;
8613 MONO_ADD_INS (bblock, ins);
8615 if (ins->flags & MONO_INST_VOLATILE) {
8616 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
8617 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
8618 emit_memory_barrier (cfg, FullBarrier);
8633 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
8634 ins->flags |= ins_flag;
8637 if (ins->flags & MONO_INST_VOLATILE) {
8638 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
8639 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
8640 emit_memory_barrier (cfg, FullBarrier);
8643 MONO_ADD_INS (bblock, ins);
8645 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
8646 emit_write_barrier (cfg, sp [0], sp [1], -1);
8655 MONO_INST_NEW (cfg, ins, (*ip));
8657 ins->sreg1 = sp [0]->dreg;
8658 ins->sreg2 = sp [1]->dreg;
8659 type_from_op (ins, sp [0], sp [1]);
8661 ins->dreg = alloc_dreg ((cfg), (ins)->type);
8663 /* Use the immediate opcodes if possible */
8664 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
8665 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
8666 if (imm_opcode != -1) {
8667 ins->opcode = imm_opcode;
8668 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
8671 sp [1]->opcode = OP_NOP;
8675 MONO_ADD_INS ((cfg)->cbb, (ins));
8677 *sp++ = mono_decompose_opcode (cfg, ins);
8694 MONO_INST_NEW (cfg, ins, (*ip));
8696 ins->sreg1 = sp [0]->dreg;
8697 ins->sreg2 = sp [1]->dreg;
8698 type_from_op (ins, sp [0], sp [1]);
8700 ADD_WIDEN_OP (ins, sp [0], sp [1]);
8701 ins->dreg = alloc_dreg ((cfg), (ins)->type);
8703 /* FIXME: Pass opcode to is_inst_imm */
8705 /* Use the immediate opcodes if possible */
8706 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
8709 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
8710 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
8711 /* Keep emulated opcodes which are optimized away later */
8712 if ((ins->opcode == OP_IREM_UN || ins->opcode == OP_IDIV_UN_IMM) && (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) && sp [1]->opcode == OP_ICONST && mono_is_power_of_two (sp [1]->inst_c0) >= 0) {
8713 imm_opcode = mono_op_to_op_imm (ins->opcode);
8716 if (imm_opcode != -1) {
8717 ins->opcode = imm_opcode;
8718 if (sp [1]->opcode == OP_I8CONST) {
8719 #if SIZEOF_REGISTER == 8
8720 ins->inst_imm = sp [1]->inst_l;
8722 ins->inst_ls_word = sp [1]->inst_ls_word;
8723 ins->inst_ms_word = sp [1]->inst_ms_word;
8727 ins->inst_imm = (gssize)(sp [1]->inst_c0);
8730 /* Might be followed by an instruction added by ADD_WIDEN_OP */
8731 if (sp [1]->next == NULL)
8732 sp [1]->opcode = OP_NOP;
8735 MONO_ADD_INS ((cfg)->cbb, (ins));
8737 *sp++ = mono_decompose_opcode (cfg, ins);
8750 case CEE_CONV_OVF_I8:
8751 case CEE_CONV_OVF_U8:
8755 /* Special case this earlier so we have long constants in the IR */
8756 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
8757 int data = sp [-1]->inst_c0;
8758 sp [-1]->opcode = OP_I8CONST;
8759 sp [-1]->type = STACK_I8;
8760 #if SIZEOF_REGISTER == 8
8761 if ((*ip) == CEE_CONV_U8)
8762 sp [-1]->inst_c0 = (guint32)data;
8764 sp [-1]->inst_c0 = data;
8766 sp [-1]->inst_ls_word = data;
8767 if ((*ip) == CEE_CONV_U8)
8768 sp [-1]->inst_ms_word = 0;
8770 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
8772 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
8779 case CEE_CONV_OVF_I4:
8780 case CEE_CONV_OVF_I1:
8781 case CEE_CONV_OVF_I2:
8782 case CEE_CONV_OVF_I:
8783 case CEE_CONV_OVF_U:
8786 if (sp [-1]->type == STACK_R8) {
8787 ADD_UNOP (CEE_CONV_OVF_I8);
8794 case CEE_CONV_OVF_U1:
8795 case CEE_CONV_OVF_U2:
8796 case CEE_CONV_OVF_U4:
8799 if (sp [-1]->type == STACK_R8) {
8800 ADD_UNOP (CEE_CONV_OVF_U8);
8807 case CEE_CONV_OVF_I1_UN:
8808 case CEE_CONV_OVF_I2_UN:
8809 case CEE_CONV_OVF_I4_UN:
8810 case CEE_CONV_OVF_I8_UN:
8811 case CEE_CONV_OVF_U1_UN:
8812 case CEE_CONV_OVF_U2_UN:
8813 case CEE_CONV_OVF_U4_UN:
8814 case CEE_CONV_OVF_U8_UN:
8815 case CEE_CONV_OVF_I_UN:
8816 case CEE_CONV_OVF_U_UN:
8823 CHECK_CFG_EXCEPTION;
8827 case CEE_ADD_OVF_UN:
8829 case CEE_MUL_OVF_UN:
8831 case CEE_SUB_OVF_UN:
8837 GSHAREDVT_FAILURE (*ip);
8840 token = read32 (ip + 1);
8841 klass = mini_get_class (method, token, generic_context);
8842 CHECK_TYPELOAD (klass);
8844 if (generic_class_is_reference_type (cfg, klass)) {
8845 MonoInst *store, *load;
8846 int dreg = alloc_ireg_ref (cfg);
8848 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
8849 load->flags |= ins_flag;
8850 MONO_ADD_INS (cfg->cbb, load);
8852 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
8853 store->flags |= ins_flag;
8854 MONO_ADD_INS (cfg->cbb, store);
8856 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
8857 emit_write_barrier (cfg, sp [0], sp [1], -1);
8859 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
8871 token = read32 (ip + 1);
8872 klass = mini_get_class (method, token, generic_context);
8873 CHECK_TYPELOAD (klass);
8875 /* Optimize the common ldobj+stloc combination */
8885 loc_index = ip [5] - CEE_STLOC_0;
8892 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
8893 CHECK_LOCAL (loc_index);
8895 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
8896 ins->dreg = cfg->locals [loc_index]->dreg;
8902 /* Optimize the ldobj+stobj combination */
8903 /* The reference case ends up being a load+store anyway */
8904 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
8909 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
8916 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
8925 CHECK_STACK_OVF (1);
8927 n = read32 (ip + 1);
8929 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
8930 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
8931 ins->type = STACK_OBJ;
8934 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
8935 MonoInst *iargs [1];
8937 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
8938 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
8940 if (cfg->opt & MONO_OPT_SHARED) {
8941 MonoInst *iargs [3];
8943 if (cfg->compile_aot) {
8944 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
8946 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8947 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
8948 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
8949 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
8950 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
8952 if (bblock->out_of_line) {
8953 MonoInst *iargs [2];
8955 if (image == mono_defaults.corlib) {
8957 * Avoid relocations in AOT and save some space by using a
8958 * version of helper_ldstr specialized to mscorlib.
8960 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
8961 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
8963 /* Avoid creating the string object */
8964 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8965 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
8966 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
8970 if (cfg->compile_aot) {
8971 NEW_LDSTRCONST (cfg, ins, image, n);
8973 MONO_ADD_INS (bblock, ins);
8976 NEW_PCONST (cfg, ins, NULL);
8977 ins->type = STACK_OBJ;
8978 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
8980 OUT_OF_MEMORY_FAILURE;
8983 MONO_ADD_INS (bblock, ins);
8992 MonoInst *iargs [2];
8993 MonoMethodSignature *fsig;
8996 MonoInst *vtable_arg = NULL;
8999 token = read32 (ip + 1);
9000 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
9001 if (!cmethod || mono_loader_get_last_error ())
9003 fsig = mono_method_get_signature (cmethod, image, token);
9007 mono_save_token_info (cfg, image, token, cmethod);
9009 if (!mono_class_init (cmethod->klass))
9010 TYPE_LOAD_ERROR (cmethod->klass);
9012 context_used = mini_method_check_context_used (cfg, cmethod);
9014 if (mono_security_cas_enabled ()) {
9015 if (check_linkdemand (cfg, method, cmethod))
9016 INLINE_FAILURE ("linkdemand");
9017 CHECK_CFG_EXCEPTION;
9018 } else if (mono_security_core_clr_enabled ()) {
9019 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9022 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9023 emit_generic_class_init (cfg, cmethod->klass);
9024 CHECK_TYPELOAD (cmethod->klass);
9028 if (cfg->gsharedvt) {
9029 if (mini_is_gsharedvt_variable_signature (sig))
9030 GSHAREDVT_FAILURE (*ip);
9034 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
9035 mono_method_is_generic_sharable (cmethod, TRUE)) {
9036 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
9037 mono_class_vtable (cfg->domain, cmethod->klass);
9038 CHECK_TYPELOAD (cmethod->klass);
9040 vtable_arg = emit_get_rgctx_method (cfg, context_used,
9041 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9044 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
9045 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9047 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9049 CHECK_TYPELOAD (cmethod->klass);
9050 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9055 n = fsig->param_count;
9059 * Generate smaller code for the common newobj <exception> instruction in
9060 * argument checking code.
9062 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
9063 is_exception_class (cmethod->klass) && n <= 2 &&
9064 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
9065 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
9066 MonoInst *iargs [3];
9068 g_assert (!vtable_arg);
9072 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
9075 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
9079 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
9084 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
9087 g_assert_not_reached ();
9095 /* move the args to allow room for 'this' in the first position */
9101 /* check_call_signature () requires sp[0] to be set */
9102 this_ins.type = STACK_OBJ;
9104 if (check_call_signature (cfg, fsig, sp))
9109 if (mini_class_is_system_array (cmethod->klass)) {
9110 g_assert (!vtable_arg);
9112 *sp = emit_get_rgctx_method (cfg, context_used,
9113 cmethod, MONO_RGCTX_INFO_METHOD);
9115 /* Avoid varargs in the common case */
9116 if (fsig->param_count == 1)
9117 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
9118 else if (fsig->param_count == 2)
9119 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
9120 else if (fsig->param_count == 3)
9121 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
9122 else if (fsig->param_count == 4)
9123 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
9125 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
9126 } else if (cmethod->string_ctor) {
9127 g_assert (!context_used);
9128 g_assert (!vtable_arg);
9129 /* we simply pass a null pointer */
9130 EMIT_NEW_PCONST (cfg, *sp, NULL);
9131 /* now call the string ctor */
9132 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
9134 MonoInst* callvirt_this_arg = NULL;
9136 if (cmethod->klass->valuetype) {
9137 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
9138 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
9139 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
9144 * The code generated by mini_emit_virtual_call () expects
9145 * iargs [0] to be a boxed instance, but luckily the vcall
9146 * will be transformed into a normal call there.
9148 } else if (context_used) {
9149 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
9152 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9154 CHECK_TYPELOAD (cmethod->klass);
9157 * TypeInitializationExceptions thrown from the mono_runtime_class_init
9158 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
9159 * As a workaround, we call class cctors before allocating objects.
9161 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
9162 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
9163 if (cfg->verbose_level > 2)
9164 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
9165 class_inits = g_slist_prepend (class_inits, vtable);
9168 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
9171 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
9174 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
9176 /* Now call the actual ctor */
9177 /* Avoid virtual calls to ctors if possible */
9178 if (mono_class_is_marshalbyref (cmethod->klass))
9179 callvirt_this_arg = sp [0];
9182 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
9183 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9184 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9189 CHECK_CFG_EXCEPTION;
9190 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
9191 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
9192 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
9193 !g_list_find (dont_inline, cmethod)) {
9196 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
9197 cfg->real_offset += 5;
9200 inline_costs += costs - 5;
9202 INLINE_FAILURE ("inline failure");
9203 // FIXME-VT: Clean this up
9204 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
9205 GSHAREDVT_FAILURE(*ip);
9206 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
9208 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
9211 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
9212 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
9213 } else if (context_used &&
9214 (!mono_method_is_generic_sharable (cmethod, TRUE) ||
9215 !mono_class_generic_sharing_enabled (cmethod->klass))) {
9216 MonoInst *cmethod_addr;
9218 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
9219 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9221 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
9223 INLINE_FAILURE ("ctor call");
9224 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
9225 callvirt_this_arg, NULL, vtable_arg);
9229 if (alloc == NULL) {
9231 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
9232 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
9246 token = read32 (ip + 1);
9247 klass = mini_get_class (method, token, generic_context);
9248 CHECK_TYPELOAD (klass);
9249 if (sp [0]->type != STACK_OBJ)
9252 context_used = mini_class_check_context_used (cfg, klass);
9254 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9255 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
9262 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9265 if (cfg->compile_aot)
9266 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9268 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9270 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
9271 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
9274 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9275 MonoMethod *mono_castclass;
9276 MonoInst *iargs [1];
9279 mono_castclass = mono_marshal_get_castclass (klass);
9282 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
9283 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9284 CHECK_CFG_EXCEPTION;
9285 g_assert (costs > 0);
9288 cfg->real_offset += 5;
9293 inline_costs += costs;
9296 ins = handle_castclass (cfg, klass, *sp, context_used);
9297 CHECK_CFG_EXCEPTION;
9307 token = read32 (ip + 1);
9308 klass = mini_get_class (method, token, generic_context);
9309 CHECK_TYPELOAD (klass);
9310 if (sp [0]->type != STACK_OBJ)
9313 context_used = mini_class_check_context_used (cfg, klass);
9315 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9316 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
9323 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9326 if (cfg->compile_aot)
9327 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9329 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9331 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
9334 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9335 MonoMethod *mono_isinst;
9336 MonoInst *iargs [1];
9339 mono_isinst = mono_marshal_get_isinst (klass);
9342 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
9343 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9344 CHECK_CFG_EXCEPTION;
9345 g_assert (costs > 0);
9348 cfg->real_offset += 5;
9353 inline_costs += costs;
9356 ins = handle_isinst (cfg, klass, *sp, context_used);
9357 CHECK_CFG_EXCEPTION;
9364 case CEE_UNBOX_ANY: {
9368 token = read32 (ip + 1);
9369 klass = mini_get_class (method, token, generic_context);
9370 CHECK_TYPELOAD (klass);
9372 mono_save_token_info (cfg, image, token, klass);
9374 context_used = mini_class_check_context_used (cfg, klass);
9376 if (mini_is_gsharedvt_klass (cfg, klass)) {
9377 *sp = handle_unbox_gsharedvt (cfg, context_used, klass, *sp, &bblock);
9385 if (generic_class_is_reference_type (cfg, klass)) {
9386 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
9387 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9388 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
9395 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9398 /*FIXME AOT support*/
9399 if (cfg->compile_aot)
9400 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9402 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9404 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
9405 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
9408 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9409 MonoMethod *mono_castclass;
9410 MonoInst *iargs [1];
9413 mono_castclass = mono_marshal_get_castclass (klass);
9416 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
9417 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9418 CHECK_CFG_EXCEPTION;
9419 g_assert (costs > 0);
9422 cfg->real_offset += 5;
9426 inline_costs += costs;
9428 ins = handle_castclass (cfg, klass, *sp, context_used);
9429 CHECK_CFG_EXCEPTION;
9437 if (mono_class_is_nullable (klass)) {
9438 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
9445 ins = handle_unbox (cfg, klass, sp, context_used);
9451 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9464 token = read32 (ip + 1);
9465 klass = mini_get_class (method, token, generic_context);
9466 CHECK_TYPELOAD (klass);
9468 mono_save_token_info (cfg, image, token, klass);
9470 context_used = mini_class_check_context_used (cfg, klass);
9472 if (generic_class_is_reference_type (cfg, klass)) {
9478 if (klass == mono_defaults.void_class)
9480 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
9482 /* frequent check in generic code: box (struct), brtrue */
9484 // FIXME: LLVM can't handle the inconsistent bb linking
9485 if (!mono_class_is_nullable (klass) &&
9486 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
9487 (ip [5] == CEE_BRTRUE ||
9488 ip [5] == CEE_BRTRUE_S ||
9489 ip [5] == CEE_BRFALSE ||
9490 ip [5] == CEE_BRFALSE_S)) {
9491 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
9493 MonoBasicBlock *true_bb, *false_bb;
9497 if (cfg->verbose_level > 3) {
9498 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9499 printf ("<box+brtrue opt>\n");
9507 target = ip + 1 + (signed char)(*ip);
9514 target = ip + 4 + (gint)(read32 (ip));
9518 g_assert_not_reached ();
9522 * We need to link both bblocks, since it is needed for handling stack
9523 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
9524 * Branching to only one of them would lead to inconsistencies, so
9525 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
9527 GET_BBLOCK (cfg, true_bb, target);
9528 GET_BBLOCK (cfg, false_bb, ip);
9530 mono_link_bblock (cfg, cfg->cbb, true_bb);
9531 mono_link_bblock (cfg, cfg->cbb, false_bb);
9533 if (sp != stack_start) {
9534 handle_stack_args (cfg, stack_start, sp - stack_start);
9536 CHECK_UNVERIFIABLE (cfg);
9539 if (COMPILE_LLVM (cfg)) {
9540 dreg = alloc_ireg (cfg);
9541 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
9542 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
9544 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
9546 /* The JIT can't eliminate the iconst+compare */
9547 MONO_INST_NEW (cfg, ins, OP_BR);
9548 ins->inst_target_bb = is_true ? true_bb : false_bb;
9549 MONO_ADD_INS (cfg->cbb, ins);
9552 start_new_bblock = 1;
9556 *sp++ = handle_box (cfg, val, klass, context_used, &bblock);
9558 CHECK_CFG_EXCEPTION;
9567 token = read32 (ip + 1);
9568 klass = mini_get_class (method, token, generic_context);
9569 CHECK_TYPELOAD (klass);
9571 mono_save_token_info (cfg, image, token, klass);
9573 context_used = mini_class_check_context_used (cfg, klass);
9575 if (mono_class_is_nullable (klass)) {
9578 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
9579 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
9583 ins = handle_unbox (cfg, klass, sp, context_used);
9596 MonoClassField *field;
9597 #ifndef DISABLE_REMOTING
9601 gboolean is_instance;
9603 gpointer addr = NULL;
9604 gboolean is_special_static;
9606 MonoInst *store_val = NULL;
9609 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
9611 if (op == CEE_STFLD) {
9619 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
9621 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
9624 if (op == CEE_STSFLD) {
9632 token = read32 (ip + 1);
9633 if (method->wrapper_type != MONO_WRAPPER_NONE) {
9634 field = mono_method_get_wrapper_data (method, token);
9635 klass = field->parent;
9638 field = mono_field_from_token (image, token, &klass, generic_context);
9642 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
9643 FIELD_ACCESS_FAILURE;
9644 mono_class_init (klass);
9646 if (is_instance && *ip != CEE_LDFLDA && is_magic_tls_access (field))
9649 /* if the class is Critical then transparent code cannot access it's fields */
9650 if (!is_instance && mono_security_core_clr_enabled ())
9651 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
9653 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
9654 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
9655 if (mono_security_core_clr_enabled ())
9656 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
9660 * LDFLD etc. is usable on static fields as well, so convert those cases to
9663 if (is_instance && field->type->attrs & FIELD_ATTRIBUTE_STATIC) {
9675 g_assert_not_reached ();
9677 is_instance = FALSE;
9680 context_used = mini_class_check_context_used (cfg, klass);
9684 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
9685 if (op == CEE_STFLD) {
9686 if (target_type_is_incompatible (cfg, field->type, sp [1]))
9688 #ifndef DISABLE_REMOTING
9689 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
9690 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
9691 MonoInst *iargs [5];
9693 GSHAREDVT_FAILURE (op);
9696 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9697 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
9698 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
9702 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
9703 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
9704 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9705 CHECK_CFG_EXCEPTION;
9706 g_assert (costs > 0);
9708 cfg->real_offset += 5;
9711 inline_costs += costs;
9713 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
9720 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
9722 if (mini_is_gsharedvt_klass (cfg, klass)) {
9723 MonoInst *offset_ins;
9725 context_used = mini_class_check_context_used (cfg, klass);
9727 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9728 dreg = alloc_ireg_mp (cfg);
9729 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
9730 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
9731 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
9733 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
9735 if (sp [0]->opcode != OP_LDADDR)
9736 store->flags |= MONO_INST_FAULT;
9738 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
9739 /* insert call to write barrier */
9743 dreg = alloc_ireg_mp (cfg);
9744 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
9745 emit_write_barrier (cfg, ptr, sp [1], -1);
9748 store->flags |= ins_flag;
9755 #ifndef DISABLE_REMOTING
9756 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
9757 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
9758 MonoInst *iargs [4];
9760 GSHAREDVT_FAILURE (op);
9763 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9764 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
9765 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
9766 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
9767 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
9768 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9769 CHECK_CFG_EXCEPTION;
9771 g_assert (costs > 0);
9773 cfg->real_offset += 5;
9777 inline_costs += costs;
9779 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
9785 if (sp [0]->type == STACK_VTYPE) {
9788 /* Have to compute the address of the variable */
9790 var = get_vreg_to_inst (cfg, sp [0]->dreg);
9792 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
9794 g_assert (var->klass == klass);
9796 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
9800 if (op == CEE_LDFLDA) {
9801 if (is_magic_tls_access (field)) {
9802 GSHAREDVT_FAILURE (*ip);
9804 *sp++ = create_magic_tls_access (cfg, field, &cached_tls_addr, ins);
9806 if (sp [0]->type == STACK_OBJ) {
9807 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
9808 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
9811 dreg = alloc_ireg_mp (cfg);
9813 if (mini_is_gsharedvt_klass (cfg, klass)) {
9814 MonoInst *offset_ins;
9816 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9817 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
9819 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
9821 ins->klass = mono_class_from_mono_type (field->type);
9822 ins->type = STACK_MP;
9828 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
9830 if (mini_is_gsharedvt_klass (cfg, klass)) {
9831 MonoInst *offset_ins;
9833 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9834 dreg = alloc_ireg_mp (cfg);
9835 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
9836 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
9838 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
9840 load->flags |= ins_flag;
9841 if (sp [0]->opcode != OP_LDADDR)
9842 load->flags |= MONO_INST_FAULT;
9856 * We can only support shared generic static
9857 * field access on architectures where the
9858 * trampoline code has been extended to handle
9859 * the generic class init.
9861 #ifndef MONO_ARCH_VTABLE_REG
9862 GENERIC_SHARING_FAILURE (op);
9865 context_used = mini_class_check_context_used (cfg, klass);
9867 ftype = mono_field_get_type (field);
9869 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
9872 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
9873 * to be called here.
9875 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
9876 mono_class_vtable (cfg->domain, klass);
9877 CHECK_TYPELOAD (klass);
9879 mono_domain_lock (cfg->domain);
9880 if (cfg->domain->special_static_fields)
9881 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
9882 mono_domain_unlock (cfg->domain);
9884 is_special_static = mono_class_field_is_special_static (field);
9886 /* Generate IR to compute the field address */
9887 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
9889 * Fast access to TLS data
9890 * Inline version of get_thread_static_data () in
9894 int idx, static_data_reg, array_reg, dreg;
9895 MonoInst *thread_ins;
9897 GSHAREDVT_FAILURE (op);
9899 // offset &= 0x7fffffff;
9900 // idx = (offset >> 24) - 1;
9901 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
9903 thread_ins = mono_get_thread_intrinsic (cfg);
9904 MONO_ADD_INS (cfg->cbb, thread_ins);
9905 static_data_reg = alloc_ireg (cfg);
9906 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
9908 if (cfg->compile_aot) {
9909 int offset_reg, offset2_reg, idx_reg;
9911 /* For TLS variables, this will return the TLS offset */
9912 EMIT_NEW_SFLDACONST (cfg, ins, field);
9913 offset_reg = ins->dreg;
9914 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
9915 idx_reg = alloc_ireg (cfg);
9916 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
9917 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
9918 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
9919 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
9920 array_reg = alloc_ireg (cfg);
9921 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
9922 offset2_reg = alloc_ireg (cfg);
9923 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
9924 dreg = alloc_ireg (cfg);
9925 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
9927 offset = (gsize)addr & 0x7fffffff;
9928 idx = (offset >> 24) - 1;
9930 array_reg = alloc_ireg (cfg);
9931 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
9932 dreg = alloc_ireg (cfg);
9933 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
9935 } else if ((cfg->opt & MONO_OPT_SHARED) ||
9936 (cfg->compile_aot && is_special_static) ||
9937 (context_used && is_special_static)) {
9938 MonoInst *iargs [2];
9940 g_assert (field->parent);
9941 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9943 iargs [1] = emit_get_rgctx_field (cfg, context_used,
9944 field, MONO_RGCTX_INFO_CLASS_FIELD);
9946 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
9948 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
9949 } else if (context_used) {
9950 MonoInst *static_data;
9953 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
9954 method->klass->name_space, method->klass->name, method->name,
9955 depth, field->offset);
9958 if (mono_class_needs_cctor_run (klass, method))
9959 emit_generic_class_init (cfg, klass);
9962 * The pointer we're computing here is
9964 * super_info.static_data + field->offset
9966 static_data = emit_get_rgctx_klass (cfg, context_used,
9967 klass, MONO_RGCTX_INFO_STATIC_DATA);
9969 if (mini_is_gsharedvt_klass (cfg, klass)) {
9970 MonoInst *offset_ins;
9972 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9973 dreg = alloc_ireg_mp (cfg);
9974 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
9975 } else if (field->offset == 0) {
9978 int addr_reg = mono_alloc_preg (cfg);
9979 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
9981 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
9982 MonoInst *iargs [2];
9984 g_assert (field->parent);
9985 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9986 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
9987 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
9989 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
9991 CHECK_TYPELOAD (klass);
9993 if (mini_field_access_needs_cctor_run (cfg, method, vtable)) {
9994 if (!(g_slist_find (class_inits, vtable))) {
9995 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
9996 if (cfg->verbose_level > 2)
9997 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
9998 class_inits = g_slist_prepend (class_inits, vtable);
10001 if (cfg->run_cctors) {
10003 /* This makes so that inline cannot trigger */
10004 /* .cctors: too many apps depend on them */
10005 /* running with a specific order... */
10006 if (! vtable->initialized)
10007 INLINE_FAILURE ("class init");
10008 ex = mono_runtime_class_init_full (vtable, FALSE);
10010 set_exception_object (cfg, ex);
10011 goto exception_exit;
10015 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10017 if (cfg->compile_aot)
10018 EMIT_NEW_SFLDACONST (cfg, ins, field);
10020 EMIT_NEW_PCONST (cfg, ins, addr);
10022 MonoInst *iargs [1];
10023 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
10024 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
10028 /* Generate IR to do the actual load/store operation */
10030 if (op == CEE_LDSFLDA) {
10031 ins->klass = mono_class_from_mono_type (ftype);
10032 ins->type = STACK_PTR;
10034 } else if (op == CEE_STSFLD) {
10037 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
10038 store->flags |= ins_flag;
10040 gboolean is_const = FALSE;
10041 MonoVTable *vtable = NULL;
10042 gpointer addr = NULL;
10044 if (!context_used) {
10045 vtable = mono_class_vtable (cfg->domain, klass);
10046 CHECK_TYPELOAD (klass);
10048 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
10049 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
10050 int ro_type = ftype->type;
10052 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10053 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
10054 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
10057 GSHAREDVT_FAILURE (op);
10059 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
10062 case MONO_TYPE_BOOLEAN:
10064 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
10068 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
10071 case MONO_TYPE_CHAR:
10073 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
10077 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
10082 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
10086 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
10091 case MONO_TYPE_PTR:
10092 case MONO_TYPE_FNPTR:
10093 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10094 type_to_eval_stack_type ((cfg), field->type, *sp);
10097 case MONO_TYPE_STRING:
10098 case MONO_TYPE_OBJECT:
10099 case MONO_TYPE_CLASS:
10100 case MONO_TYPE_SZARRAY:
10101 case MONO_TYPE_ARRAY:
10102 if (!mono_gc_is_moving ()) {
10103 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10104 type_to_eval_stack_type ((cfg), field->type, *sp);
10112 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
10117 case MONO_TYPE_VALUETYPE:
10127 CHECK_STACK_OVF (1);
10129 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
10130 load->flags |= ins_flag;
10143 token = read32 (ip + 1);
10144 klass = mini_get_class (method, token, generic_context);
10145 CHECK_TYPELOAD (klass);
10146 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
10147 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
10148 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
10149 generic_class_is_reference_type (cfg, klass)) {
10150 /* insert call to write barrier */
10151 emit_write_barrier (cfg, sp [0], sp [1], -1);
10163 const char *data_ptr;
10165 guint32 field_token;
10171 token = read32 (ip + 1);
10173 klass = mini_get_class (method, token, generic_context);
10174 CHECK_TYPELOAD (klass);
10176 context_used = mini_class_check_context_used (cfg, klass);
10178 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
10179 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
10180 ins->sreg1 = sp [0]->dreg;
10181 ins->type = STACK_I4;
10182 ins->dreg = alloc_ireg (cfg);
10183 MONO_ADD_INS (cfg->cbb, ins);
10184 *sp = mono_decompose_opcode (cfg, ins);
10187 if (context_used) {
10188 MonoInst *args [3];
10189 MonoClass *array_class = mono_array_class_get (klass, 1);
10190 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
10192 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
10195 args [0] = emit_get_rgctx_klass (cfg, context_used,
10196 array_class, MONO_RGCTX_INFO_VTABLE);
10201 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
10203 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
10205 if (cfg->opt & MONO_OPT_SHARED) {
10206 /* Decompose now to avoid problems with references to the domainvar */
10207 MonoInst *iargs [3];
10209 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10210 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10211 iargs [2] = sp [0];
10213 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
10215 /* Decompose later since it is needed by abcrem */
10216 MonoClass *array_type = mono_array_class_get (klass, 1);
10217 mono_class_vtable (cfg->domain, array_type);
10218 CHECK_TYPELOAD (array_type);
10220 MONO_INST_NEW (cfg, ins, OP_NEWARR);
10221 ins->dreg = alloc_ireg_ref (cfg);
10222 ins->sreg1 = sp [0]->dreg;
10223 ins->inst_newa_class = klass;
10224 ins->type = STACK_OBJ;
10225 ins->klass = array_type;
10226 MONO_ADD_INS (cfg->cbb, ins);
10227 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10228 cfg->cbb->has_array_access = TRUE;
10230 /* Needed so mono_emit_load_get_addr () gets called */
10231 mono_get_got_var (cfg);
10241 * we inline/optimize the initialization sequence if possible.
10242 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
10243 * for small sizes open code the memcpy
10244 * ensure the rva field is big enough
10246 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
10247 MonoMethod *memcpy_method = get_memcpy_method ();
10248 MonoInst *iargs [3];
10249 int add_reg = alloc_ireg_mp (cfg);
10251 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
10252 if (cfg->compile_aot) {
10253 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
10255 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
10257 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
10258 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
10267 if (sp [0]->type != STACK_OBJ)
10270 MONO_INST_NEW (cfg, ins, OP_LDLEN);
10271 ins->dreg = alloc_preg (cfg);
10272 ins->sreg1 = sp [0]->dreg;
10273 ins->type = STACK_I4;
10274 /* This flag will be inherited by the decomposition */
10275 ins->flags |= MONO_INST_FAULT;
10276 MONO_ADD_INS (cfg->cbb, ins);
10277 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10278 cfg->cbb->has_array_access = TRUE;
10286 if (sp [0]->type != STACK_OBJ)
10289 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10291 klass = mini_get_class (method, read32 (ip + 1), generic_context);
10292 CHECK_TYPELOAD (klass);
10293 /* we need to make sure that this array is exactly the type it needs
10294 * to be for correctness. the wrappers are lax with their usage
10295 * so we need to ignore them here
10297 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
10298 MonoClass *array_class = mono_array_class_get (klass, 1);
10299 mini_emit_check_array_type (cfg, sp [0], array_class);
10300 CHECK_TYPELOAD (array_class);
10304 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10309 case CEE_LDELEM_I1:
10310 case CEE_LDELEM_U1:
10311 case CEE_LDELEM_I2:
10312 case CEE_LDELEM_U2:
10313 case CEE_LDELEM_I4:
10314 case CEE_LDELEM_U4:
10315 case CEE_LDELEM_I8:
10317 case CEE_LDELEM_R4:
10318 case CEE_LDELEM_R8:
10319 case CEE_LDELEM_REF: {
10325 if (*ip == CEE_LDELEM) {
10327 token = read32 (ip + 1);
10328 klass = mini_get_class (method, token, generic_context);
10329 CHECK_TYPELOAD (klass);
10330 mono_class_init (klass);
10333 klass = array_access_to_klass (*ip);
10335 if (sp [0]->type != STACK_OBJ)
10338 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10340 if (mini_is_gsharedvt_klass (cfg, klass)) {
10341 // FIXME-VT: OP_ICONST optimization
10342 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10343 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10344 ins->opcode = OP_LOADV_MEMBASE;
10345 } else if (sp [1]->opcode == OP_ICONST) {
10346 int array_reg = sp [0]->dreg;
10347 int index_reg = sp [1]->dreg;
10348 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
10350 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
10351 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
10353 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10354 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10357 if (*ip == CEE_LDELEM)
10364 case CEE_STELEM_I1:
10365 case CEE_STELEM_I2:
10366 case CEE_STELEM_I4:
10367 case CEE_STELEM_I8:
10368 case CEE_STELEM_R4:
10369 case CEE_STELEM_R8:
10370 case CEE_STELEM_REF:
10375 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10377 if (*ip == CEE_STELEM) {
10379 token = read32 (ip + 1);
10380 klass = mini_get_class (method, token, generic_context);
10381 CHECK_TYPELOAD (klass);
10382 mono_class_init (klass);
10385 klass = array_access_to_klass (*ip);
10387 if (sp [0]->type != STACK_OBJ)
10390 emit_array_store (cfg, klass, sp, TRUE);
10392 if (*ip == CEE_STELEM)
10399 case CEE_CKFINITE: {
10403 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
10404 ins->sreg1 = sp [0]->dreg;
10405 ins->dreg = alloc_freg (cfg);
10406 ins->type = STACK_R8;
10407 MONO_ADD_INS (bblock, ins);
10409 *sp++ = mono_decompose_opcode (cfg, ins);
10414 case CEE_REFANYVAL: {
10415 MonoInst *src_var, *src;
10417 int klass_reg = alloc_preg (cfg);
10418 int dreg = alloc_preg (cfg);
10420 GSHAREDVT_FAILURE (*ip);
10423 MONO_INST_NEW (cfg, ins, *ip);
10426 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
10427 CHECK_TYPELOAD (klass);
10428 mono_class_init (klass);
10430 context_used = mini_class_check_context_used (cfg, klass);
10433 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10435 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
10436 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
10437 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
10439 if (context_used) {
10440 MonoInst *klass_ins;
10442 klass_ins = emit_get_rgctx_klass (cfg, context_used,
10443 klass, MONO_RGCTX_INFO_KLASS);
10446 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
10447 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
10449 mini_emit_class_check (cfg, klass_reg, klass);
10451 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
10452 ins->type = STACK_MP;
10457 case CEE_MKREFANY: {
10458 MonoInst *loc, *addr;
10460 GSHAREDVT_FAILURE (*ip);
10463 MONO_INST_NEW (cfg, ins, *ip);
10466 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
10467 CHECK_TYPELOAD (klass);
10468 mono_class_init (klass);
10470 context_used = mini_class_check_context_used (cfg, klass);
10472 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
10473 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
10475 if (context_used) {
10476 MonoInst *const_ins;
10477 int type_reg = alloc_preg (cfg);
10479 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
10480 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
10481 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
10482 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
10483 } else if (cfg->compile_aot) {
10484 int const_reg = alloc_preg (cfg);
10485 int type_reg = alloc_preg (cfg);
10487 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
10488 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
10489 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
10490 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
10492 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
10493 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
10495 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
10497 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
10498 ins->type = STACK_VTYPE;
10499 ins->klass = mono_defaults.typed_reference_class;
10504 case CEE_LDTOKEN: {
10506 MonoClass *handle_class;
10508 CHECK_STACK_OVF (1);
10511 n = read32 (ip + 1);
10513 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
10514 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
10515 handle = mono_method_get_wrapper_data (method, n);
10516 handle_class = mono_method_get_wrapper_data (method, n + 1);
10517 if (handle_class == mono_defaults.typehandle_class)
10518 handle = &((MonoClass*)handle)->byval_arg;
10521 handle = mono_ldtoken (image, n, &handle_class, generic_context);
10525 mono_class_init (handle_class);
10526 if (cfg->generic_sharing_context) {
10527 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
10528 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
10529 /* This case handles ldtoken
10530 of an open type, like for
10533 } else if (handle_class == mono_defaults.typehandle_class) {
10534 /* If we get a MONO_TYPE_CLASS
10535 then we need to provide the
10537 instantiation of it. */
10538 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
10541 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type (handle));
10542 } else if (handle_class == mono_defaults.fieldhandle_class)
10543 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
10544 else if (handle_class == mono_defaults.methodhandle_class)
10545 context_used = mini_method_check_context_used (cfg, handle);
10547 g_assert_not_reached ();
10550 if ((cfg->opt & MONO_OPT_SHARED) &&
10551 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
10552 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
10553 MonoInst *addr, *vtvar, *iargs [3];
10554 int method_context_used;
10556 method_context_used = mini_method_check_context_used (cfg, method);
10558 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
10560 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10561 EMIT_NEW_ICONST (cfg, iargs [1], n);
10562 if (method_context_used) {
10563 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
10564 method, MONO_RGCTX_INFO_METHOD);
10565 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
10567 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
10568 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
10570 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
10572 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
10574 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
10576 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
10577 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
10578 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
10579 (cmethod->klass == mono_defaults.monotype_class->parent) &&
10580 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
10581 MonoClass *tclass = mono_class_from_mono_type (handle);
10583 mono_class_init (tclass);
10584 if (context_used) {
10585 ins = emit_get_rgctx_klass (cfg, context_used,
10586 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
10587 } else if (cfg->compile_aot) {
10588 if (method->wrapper_type) {
10589 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
10590 /* Special case for static synchronized wrappers */
10591 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
10593 /* FIXME: n is not a normal token */
10594 cfg->disable_aot = TRUE;
10595 EMIT_NEW_PCONST (cfg, ins, NULL);
10598 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
10601 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
10603 ins->type = STACK_OBJ;
10604 ins->klass = cmethod->klass;
10607 MonoInst *addr, *vtvar;
10609 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
10611 if (context_used) {
10612 if (handle_class == mono_defaults.typehandle_class) {
10613 ins = emit_get_rgctx_klass (cfg, context_used,
10614 mono_class_from_mono_type (handle),
10615 MONO_RGCTX_INFO_TYPE);
10616 } else if (handle_class == mono_defaults.methodhandle_class) {
10617 ins = emit_get_rgctx_method (cfg, context_used,
10618 handle, MONO_RGCTX_INFO_METHOD);
10619 } else if (handle_class == mono_defaults.fieldhandle_class) {
10620 ins = emit_get_rgctx_field (cfg, context_used,
10621 handle, MONO_RGCTX_INFO_CLASS_FIELD);
10623 g_assert_not_reached ();
10625 } else if (cfg->compile_aot) {
10626 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
10628 EMIT_NEW_PCONST (cfg, ins, handle);
10630 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
10631 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
10632 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
10642 MONO_INST_NEW (cfg, ins, OP_THROW);
10644 ins->sreg1 = sp [0]->dreg;
10646 bblock->out_of_line = TRUE;
10647 MONO_ADD_INS (bblock, ins);
10648 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
10649 MONO_ADD_INS (bblock, ins);
10652 link_bblock (cfg, bblock, end_bblock);
10653 start_new_bblock = 1;
10655 case CEE_ENDFINALLY:
10656 /* mono_save_seq_point_info () depends on this */
10657 if (sp != stack_start)
10658 emit_seq_point (cfg, method, ip, FALSE);
10659 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
10660 MONO_ADD_INS (bblock, ins);
10662 start_new_bblock = 1;
10665 * Control will leave the method so empty the stack, otherwise
10666 * the next basic block will start with a nonempty stack.
10668 while (sp != stack_start) {
10673 case CEE_LEAVE_S: {
10676 if (*ip == CEE_LEAVE) {
10678 target = ip + 5 + (gint32)read32(ip + 1);
10681 target = ip + 2 + (signed char)(ip [1]);
10684 /* empty the stack */
10685 while (sp != stack_start) {
10690 * If this leave statement is in a catch block, check for a
10691 * pending exception, and rethrow it if necessary.
10692 * We avoid doing this in runtime invoke wrappers, since those are called
10693 * by native code which excepts the wrapper to catch all exceptions.
10695 for (i = 0; i < header->num_clauses; ++i) {
10696 MonoExceptionClause *clause = &header->clauses [i];
10699 * Use <= in the final comparison to handle clauses with multiple
10700 * leave statements, like in bug #78024.
10701 * The ordering of the exception clauses guarantees that we find the
10702 * innermost clause.
10704 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
10706 MonoBasicBlock *dont_throw;
10711 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
10714 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
10716 NEW_BBLOCK (cfg, dont_throw);
10719 * Currently, we always rethrow the abort exception, despite the
10720 * fact that this is not correct. See thread6.cs for an example.
10721 * But propagating the abort exception is more important than
10722 * getting the sematics right.
10724 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
10725 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
10726 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
10728 MONO_START_BB (cfg, dont_throw);
10733 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
10735 MonoExceptionClause *clause;
10737 for (tmp = handlers; tmp; tmp = tmp->next) {
10738 clause = tmp->data;
10739 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
10741 link_bblock (cfg, bblock, tblock);
10742 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
10743 ins->inst_target_bb = tblock;
10744 ins->inst_eh_block = clause;
10745 MONO_ADD_INS (bblock, ins);
10746 bblock->has_call_handler = 1;
10747 if (COMPILE_LLVM (cfg)) {
10748 MonoBasicBlock *target_bb;
10751 * Link the finally bblock with the target, since it will
10752 * conceptually branch there.
10753 * FIXME: Have to link the bblock containing the endfinally.
10755 GET_BBLOCK (cfg, target_bb, target);
10756 link_bblock (cfg, tblock, target_bb);
10759 g_list_free (handlers);
10762 MONO_INST_NEW (cfg, ins, OP_BR);
10763 MONO_ADD_INS (bblock, ins);
10764 GET_BBLOCK (cfg, tblock, target);
10765 link_bblock (cfg, bblock, tblock);
10766 ins->inst_target_bb = tblock;
10767 start_new_bblock = 1;
10769 if (*ip == CEE_LEAVE)
10778 * Mono specific opcodes
10780 case MONO_CUSTOM_PREFIX: {
10782 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10786 case CEE_MONO_ICALL: {
10788 MonoJitICallInfo *info;
10790 token = read32 (ip + 2);
10791 func = mono_method_get_wrapper_data (method, token);
10792 info = mono_find_jit_icall_by_addr (func);
10794 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
10797 CHECK_STACK (info->sig->param_count);
10798 sp -= info->sig->param_count;
10800 ins = mono_emit_jit_icall (cfg, info->func, sp);
10801 if (!MONO_TYPE_IS_VOID (info->sig->ret))
10805 inline_costs += 10 * num_calls++;
10809 case CEE_MONO_LDPTR: {
10812 CHECK_STACK_OVF (1);
10814 token = read32 (ip + 2);
10816 ptr = mono_method_get_wrapper_data (method, token);
10817 /* FIXME: Generalize this */
10818 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
10819 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
10824 EMIT_NEW_PCONST (cfg, ins, ptr);
10827 inline_costs += 10 * num_calls++;
10828 /* Can't embed random pointers into AOT code */
10829 cfg->disable_aot = 1;
10832 case CEE_MONO_JIT_ICALL_ADDR: {
10833 MonoJitICallInfo *callinfo;
10836 CHECK_STACK_OVF (1);
10838 token = read32 (ip + 2);
10840 ptr = mono_method_get_wrapper_data (method, token);
10841 callinfo = mono_find_jit_icall_by_addr (ptr);
10842 g_assert (callinfo);
10843 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
10846 inline_costs += 10 * num_calls++;
10849 case CEE_MONO_ICALL_ADDR: {
10850 MonoMethod *cmethod;
10853 CHECK_STACK_OVF (1);
10855 token = read32 (ip + 2);
10857 cmethod = mono_method_get_wrapper_data (method, token);
10859 if (cfg->compile_aot) {
10860 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
10862 ptr = mono_lookup_internal_call (cmethod);
10864 EMIT_NEW_PCONST (cfg, ins, ptr);
10870 case CEE_MONO_VTADDR: {
10871 MonoInst *src_var, *src;
10877 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10878 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
10883 case CEE_MONO_NEWOBJ: {
10884 MonoInst *iargs [2];
10886 CHECK_STACK_OVF (1);
10888 token = read32 (ip + 2);
10889 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
10890 mono_class_init (klass);
10891 NEW_DOMAINCONST (cfg, iargs [0]);
10892 MONO_ADD_INS (cfg->cbb, iargs [0]);
10893 NEW_CLASSCONST (cfg, iargs [1], klass);
10894 MONO_ADD_INS (cfg->cbb, iargs [1]);
10895 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
10897 inline_costs += 10 * num_calls++;
10900 case CEE_MONO_OBJADDR:
10903 MONO_INST_NEW (cfg, ins, OP_MOVE);
10904 ins->dreg = alloc_ireg_mp (cfg);
10905 ins->sreg1 = sp [0]->dreg;
10906 ins->type = STACK_MP;
10907 MONO_ADD_INS (cfg->cbb, ins);
10911 case CEE_MONO_LDNATIVEOBJ:
10913 * Similar to LDOBJ, but instead load the unmanaged
10914 * representation of the vtype to the stack.
10919 token = read32 (ip + 2);
10920 klass = mono_method_get_wrapper_data (method, token);
10921 g_assert (klass->valuetype);
10922 mono_class_init (klass);
10925 MonoInst *src, *dest, *temp;
10928 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
10929 temp->backend.is_pinvoke = 1;
10930 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
10931 mini_emit_stobj (cfg, dest, src, klass, TRUE);
10933 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
10934 dest->type = STACK_VTYPE;
10935 dest->klass = klass;
10941 case CEE_MONO_RETOBJ: {
10943 * Same as RET, but return the native representation of a vtype
10946 g_assert (cfg->ret);
10947 g_assert (mono_method_signature (method)->pinvoke);
10952 token = read32 (ip + 2);
10953 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
10955 if (!cfg->vret_addr) {
10956 g_assert (cfg->ret_var_is_local);
10958 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
10960 EMIT_NEW_RETLOADA (cfg, ins);
10962 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
10964 if (sp != stack_start)
10967 MONO_INST_NEW (cfg, ins, OP_BR);
10968 ins->inst_target_bb = end_bblock;
10969 MONO_ADD_INS (bblock, ins);
10970 link_bblock (cfg, bblock, end_bblock);
10971 start_new_bblock = 1;
10975 case CEE_MONO_CISINST:
10976 case CEE_MONO_CCASTCLASS: {
10981 token = read32 (ip + 2);
10982 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
10983 if (ip [1] == CEE_MONO_CISINST)
10984 ins = handle_cisinst (cfg, klass, sp [0]);
10986 ins = handle_ccastclass (cfg, klass, sp [0]);
10992 case CEE_MONO_SAVE_LMF:
10993 case CEE_MONO_RESTORE_LMF:
10994 #ifdef MONO_ARCH_HAVE_LMF_OPS
10995 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
10996 MONO_ADD_INS (bblock, ins);
10997 cfg->need_lmf_area = TRUE;
11001 case CEE_MONO_CLASSCONST:
11002 CHECK_STACK_OVF (1);
11004 token = read32 (ip + 2);
11005 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
11008 inline_costs += 10 * num_calls++;
11010 case CEE_MONO_NOT_TAKEN:
11011 bblock->out_of_line = TRUE;
11015 CHECK_STACK_OVF (1);
11017 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
11018 ins->dreg = alloc_preg (cfg);
11019 ins->inst_offset = (gint32)read32 (ip + 2);
11020 ins->type = STACK_PTR;
11021 MONO_ADD_INS (bblock, ins);
11025 case CEE_MONO_DYN_CALL: {
11026 MonoCallInst *call;
11028 /* It would be easier to call a trampoline, but that would put an
11029 * extra frame on the stack, confusing exception handling. So
11030 * implement it inline using an opcode for now.
11033 if (!cfg->dyn_call_var) {
11034 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11035 /* prevent it from being register allocated */
11036 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
11039 /* Has to use a call inst since it local regalloc expects it */
11040 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
11041 ins = (MonoInst*)call;
11043 ins->sreg1 = sp [0]->dreg;
11044 ins->sreg2 = sp [1]->dreg;
11045 MONO_ADD_INS (bblock, ins);
11047 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
11048 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
11052 inline_costs += 10 * num_calls++;
11056 case CEE_MONO_MEMORY_BARRIER: {
11058 emit_memory_barrier (cfg, (int)read32 (ip + 1));
11062 case CEE_MONO_JIT_ATTACH: {
11063 MonoInst *args [16];
11064 MonoInst *ad_ins, *lmf_ins;
11065 MonoBasicBlock *next_bb = NULL;
11067 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11069 EMIT_NEW_PCONST (cfg, ins, NULL);
11070 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11076 ad_ins = mono_get_domain_intrinsic (cfg);
11077 lmf_ins = mono_get_lmf_intrinsic (cfg);
11080 if (MONO_ARCH_HAVE_TLS_GET && ad_ins && lmf_ins) {
11081 NEW_BBLOCK (cfg, next_bb);
11083 MONO_ADD_INS (cfg->cbb, ad_ins);
11084 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ad_ins->dreg, 0);
11085 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
11087 MONO_ADD_INS (cfg->cbb, lmf_ins);
11088 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, lmf_ins->dreg, 0);
11089 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
11092 if (cfg->compile_aot) {
11093 /* AOT code is only used in the root domain */
11094 EMIT_NEW_PCONST (cfg, args [0], NULL);
11096 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
11098 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
11099 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11102 MONO_START_BB (cfg, next_bb);
11108 case CEE_MONO_JIT_DETACH: {
11109 MonoInst *args [16];
11111 /* Restore the original domain */
11112 dreg = alloc_ireg (cfg);
11113 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
11114 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
11119 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
11125 case CEE_PREFIX1: {
11128 case CEE_ARGLIST: {
11129 /* somewhat similar to LDTOKEN */
11130 MonoInst *addr, *vtvar;
11131 CHECK_STACK_OVF (1);
11132 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
11134 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11135 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
11137 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11138 ins->type = STACK_VTYPE;
11139 ins->klass = mono_defaults.argumenthandle_class;
11152 * The following transforms:
11153 * CEE_CEQ into OP_CEQ
11154 * CEE_CGT into OP_CGT
11155 * CEE_CGT_UN into OP_CGT_UN
11156 * CEE_CLT into OP_CLT
11157 * CEE_CLT_UN into OP_CLT_UN
11159 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
11161 MONO_INST_NEW (cfg, ins, cmp->opcode);
11163 cmp->sreg1 = sp [0]->dreg;
11164 cmp->sreg2 = sp [1]->dreg;
11165 type_from_op (cmp, sp [0], sp [1]);
11167 if ((sp [0]->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
11168 cmp->opcode = OP_LCOMPARE;
11169 else if (sp [0]->type == STACK_R8)
11170 cmp->opcode = OP_FCOMPARE;
11172 cmp->opcode = OP_ICOMPARE;
11173 MONO_ADD_INS (bblock, cmp);
11174 ins->type = STACK_I4;
11175 ins->dreg = alloc_dreg (cfg, ins->type);
11176 type_from_op (ins, sp [0], sp [1]);
11178 if (cmp->opcode == OP_FCOMPARE) {
11180 * The backends expect the fceq opcodes to do the
11183 cmp->opcode = OP_NOP;
11184 ins->sreg1 = cmp->sreg1;
11185 ins->sreg2 = cmp->sreg2;
11187 MONO_ADD_INS (bblock, ins);
11193 MonoInst *argconst;
11194 MonoMethod *cil_method;
11196 CHECK_STACK_OVF (1);
11198 n = read32 (ip + 2);
11199 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
11200 if (!cmethod || mono_loader_get_last_error ())
11202 mono_class_init (cmethod->klass);
11204 mono_save_token_info (cfg, image, n, cmethod);
11206 context_used = mini_method_check_context_used (cfg, cmethod);
11208 cil_method = cmethod;
11209 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
11210 METHOD_ACCESS_FAILURE;
11212 if (mono_security_cas_enabled ()) {
11213 if (check_linkdemand (cfg, method, cmethod))
11214 INLINE_FAILURE ("linkdemand");
11215 CHECK_CFG_EXCEPTION;
11216 } else if (mono_security_core_clr_enabled ()) {
11217 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
11221 * Optimize the common case of ldftn+delegate creation
11223 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
11224 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
11225 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
11226 MonoInst *target_ins;
11227 MonoMethod *invoke;
11228 int invoke_context_used;
11230 invoke = mono_get_delegate_invoke (ctor_method->klass);
11231 if (!invoke || !mono_method_signature (invoke))
11234 invoke_context_used = mini_method_check_context_used (cfg, invoke);
11236 target_ins = sp [-1];
11238 if (mono_security_core_clr_enabled ())
11239 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
11241 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
11242 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
11243 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
11244 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
11245 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
11249 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
11250 /* FIXME: SGEN support */
11251 if (invoke_context_used == 0) {
11253 if (cfg->verbose_level > 3)
11254 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
11256 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
11257 CHECK_CFG_EXCEPTION;
11266 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
11267 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
11271 inline_costs += 10 * num_calls++;
11274 case CEE_LDVIRTFTN: {
11275 MonoInst *args [2];
11279 n = read32 (ip + 2);
11280 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
11281 if (!cmethod || mono_loader_get_last_error ())
11283 mono_class_init (cmethod->klass);
11285 context_used = mini_method_check_context_used (cfg, cmethod);
11287 if (mono_security_cas_enabled ()) {
11288 if (check_linkdemand (cfg, method, cmethod))
11289 INLINE_FAILURE ("linkdemand");
11290 CHECK_CFG_EXCEPTION;
11291 } else if (mono_security_core_clr_enabled ()) {
11292 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
11298 args [1] = emit_get_rgctx_method (cfg, context_used,
11299 cmethod, MONO_RGCTX_INFO_METHOD);
11302 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
11304 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
11307 inline_costs += 10 * num_calls++;
11311 CHECK_STACK_OVF (1);
11313 n = read16 (ip + 2);
11315 EMIT_NEW_ARGLOAD (cfg, ins, n);
11320 CHECK_STACK_OVF (1);
11322 n = read16 (ip + 2);
11324 NEW_ARGLOADA (cfg, ins, n);
11325 MONO_ADD_INS (cfg->cbb, ins);
11333 n = read16 (ip + 2);
11335 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
11337 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
11341 CHECK_STACK_OVF (1);
11343 n = read16 (ip + 2);
11345 EMIT_NEW_LOCLOAD (cfg, ins, n);
11350 unsigned char *tmp_ip;
11351 CHECK_STACK_OVF (1);
11353 n = read16 (ip + 2);
11356 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
11362 EMIT_NEW_LOCLOADA (cfg, ins, n);
11371 n = read16 (ip + 2);
11373 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
11375 emit_stloc_ir (cfg, sp, header, n);
11382 if (sp != stack_start)
11384 if (cfg->method != method)
11386 * Inlining this into a loop in a parent could lead to
11387 * stack overflows which is different behavior than the
11388 * non-inlined case, thus disable inlining in this case.
11390 goto inline_failure;
11392 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
11393 ins->dreg = alloc_preg (cfg);
11394 ins->sreg1 = sp [0]->dreg;
11395 ins->type = STACK_PTR;
11396 MONO_ADD_INS (cfg->cbb, ins);
11398 cfg->flags |= MONO_CFG_HAS_ALLOCA;
11400 ins->flags |= MONO_INST_INIT;
11405 case CEE_ENDFILTER: {
11406 MonoExceptionClause *clause, *nearest;
11407 int cc, nearest_num;
11411 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
11413 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
11414 ins->sreg1 = (*sp)->dreg;
11415 MONO_ADD_INS (bblock, ins);
11416 start_new_bblock = 1;
11421 for (cc = 0; cc < header->num_clauses; ++cc) {
11422 clause = &header->clauses [cc];
11423 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
11424 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
11425 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
11430 g_assert (nearest);
11431 if ((ip - header->code) != nearest->handler_offset)
11436 case CEE_UNALIGNED_:
11437 ins_flag |= MONO_INST_UNALIGNED;
11438 /* FIXME: record alignment? we can assume 1 for now */
11442 case CEE_VOLATILE_:
11443 ins_flag |= MONO_INST_VOLATILE;
11447 ins_flag |= MONO_INST_TAILCALL;
11448 cfg->flags |= MONO_CFG_HAS_TAIL;
11449 /* Can't inline tail calls at this time */
11450 inline_costs += 100000;
11457 token = read32 (ip + 2);
11458 klass = mini_get_class (method, token, generic_context);
11459 CHECK_TYPELOAD (klass);
11460 if (generic_class_is_reference_type (cfg, klass))
11461 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
11463 mini_emit_initobj (cfg, *sp, NULL, klass);
11467 case CEE_CONSTRAINED_:
11469 token = read32 (ip + 2);
11470 constrained_call = mini_get_class (method, token, generic_context);
11471 CHECK_TYPELOAD (constrained_call);
11475 case CEE_INITBLK: {
11476 MonoInst *iargs [3];
11480 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
11481 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
11482 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
11483 /* emit_memset only works when val == 0 */
11484 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
11486 iargs [0] = sp [0];
11487 iargs [1] = sp [1];
11488 iargs [2] = sp [2];
11489 if (ip [1] == CEE_CPBLK) {
11490 MonoMethod *memcpy_method = get_memcpy_method ();
11491 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11493 MonoMethod *memset_method = get_memset_method ();
11494 mono_emit_method_call (cfg, memset_method, iargs, NULL);
11504 ins_flag |= MONO_INST_NOTYPECHECK;
11506 ins_flag |= MONO_INST_NORANGECHECK;
11507 /* we ignore the no-nullcheck for now since we
11508 * really do it explicitly only when doing callvirt->call
11512 case CEE_RETHROW: {
11514 int handler_offset = -1;
11516 for (i = 0; i < header->num_clauses; ++i) {
11517 MonoExceptionClause *clause = &header->clauses [i];
11518 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
11519 handler_offset = clause->handler_offset;
11524 bblock->flags |= BB_EXCEPTION_UNSAFE;
11526 g_assert (handler_offset != -1);
11528 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
11529 MONO_INST_NEW (cfg, ins, OP_RETHROW);
11530 ins->sreg1 = load->dreg;
11531 MONO_ADD_INS (bblock, ins);
11533 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11534 MONO_ADD_INS (bblock, ins);
11537 link_bblock (cfg, bblock, end_bblock);
11538 start_new_bblock = 1;
11546 GSHAREDVT_FAILURE (*ip);
11548 CHECK_STACK_OVF (1);
11550 token = read32 (ip + 2);
11551 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic && !generic_context) {
11552 MonoType *type = mono_type_create_from_typespec (image, token);
11553 val = mono_type_size (type, &ialign);
11555 MonoClass *klass = mono_class_get_full (image, token, generic_context);
11556 CHECK_TYPELOAD (klass);
11557 mono_class_init (klass);
11558 val = mono_type_size (&klass->byval_arg, &ialign);
11560 EMIT_NEW_ICONST (cfg, ins, val);
11565 case CEE_REFANYTYPE: {
11566 MonoInst *src_var, *src;
11568 GSHAREDVT_FAILURE (*ip);
11574 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11576 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11577 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11578 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
11583 case CEE_READONLY_:
11596 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
11606 g_warning ("opcode 0x%02x not handled", *ip);
11610 if (start_new_bblock != 1)
11613 bblock->cil_length = ip - bblock->cil_code;
11614 if (bblock->next_bb) {
11615 /* This could already be set because of inlining, #693905 */
11616 MonoBasicBlock *bb = bblock;
11618 while (bb->next_bb)
11620 bb->next_bb = end_bblock;
11622 bblock->next_bb = end_bblock;
11625 if (cfg->method == method && cfg->domainvar) {
11627 MonoInst *get_domain;
11629 cfg->cbb = init_localsbb;
11631 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
11632 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
11635 get_domain->dreg = alloc_preg (cfg);
11636 MONO_ADD_INS (cfg->cbb, get_domain);
11638 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
11639 MONO_ADD_INS (cfg->cbb, store);
11642 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
11643 if (cfg->compile_aot)
11644 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
11645 mono_get_got_var (cfg);
11648 if (cfg->method == method && cfg->got_var)
11649 mono_emit_load_got_addr (cfg);
11654 cfg->cbb = init_localsbb;
11656 for (i = 0; i < header->num_locals; ++i) {
11657 MonoType *ptype = header->locals [i];
11658 int t = ptype->type;
11659 dreg = cfg->locals [i]->dreg;
11661 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
11662 t = mono_class_enum_basetype (ptype->data.klass)->type;
11663 if (ptype->byref) {
11664 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
11665 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
11666 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
11667 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
11668 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
11669 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
11670 MONO_INST_NEW (cfg, ins, OP_R8CONST);
11671 ins->type = STACK_R8;
11672 ins->inst_p0 = (void*)&r8_0;
11673 ins->dreg = alloc_dreg (cfg, STACK_R8);
11674 MONO_ADD_INS (init_localsbb, ins);
11675 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
11676 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
11677 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
11678 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
11679 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, ptype)) {
11680 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
11682 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
11687 if (cfg->init_ref_vars && cfg->method == method) {
11688 /* Emit initialization for ref vars */
11689 // FIXME: Avoid duplication initialization for IL locals.
11690 for (i = 0; i < cfg->num_varinfo; ++i) {
11691 MonoInst *ins = cfg->varinfo [i];
11693 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
11694 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
11699 MonoBasicBlock *bb;
11702 * Make seq points at backward branch targets interruptable.
11704 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
11705 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
11706 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
11709 /* Add a sequence point for method entry/exit events */
11711 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
11712 MONO_ADD_INS (init_localsbb, ins);
11713 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
11714 MONO_ADD_INS (cfg->bb_exit, ins);
11718 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
11719 * the code they refer to was dead (#11880).
11721 if (sym_seq_points) {
11722 for (i = 0; i < header->code_size; ++i) {
11723 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
11726 NEW_SEQ_POINT (cfg, ins, i, FALSE);
11727 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
11734 if (cfg->method == method) {
11735 MonoBasicBlock *bb;
11736 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
11737 bb->region = mono_find_block_region (cfg, bb->real_offset);
11739 mono_create_spvar_for_region (cfg, bb->region);
11740 if (cfg->verbose_level > 2)
11741 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
11745 g_slist_free (class_inits);
11746 dont_inline = g_list_remove (dont_inline, method);
11748 if (inline_costs < 0) {
11751 /* Method is too large */
11752 mname = mono_method_full_name (method, TRUE);
11753 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
11754 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
11756 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
11757 mono_basic_block_free (original_bb);
11761 if ((cfg->verbose_level > 2) && (cfg->method == method))
11762 mono_print_code (cfg, "AFTER METHOD-TO-IR");
11764 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
11765 mono_basic_block_free (original_bb);
11766 return inline_costs;
11769 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
11776 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
11780 set_exception_type_from_invalid_il (cfg, method, ip);
11784 g_slist_free (class_inits);
11785 mono_basic_block_free (original_bb);
11786 dont_inline = g_list_remove (dont_inline, method);
11787 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
11792 store_membase_reg_to_store_membase_imm (int opcode)
11795 case OP_STORE_MEMBASE_REG:
11796 return OP_STORE_MEMBASE_IMM;
11797 case OP_STOREI1_MEMBASE_REG:
11798 return OP_STOREI1_MEMBASE_IMM;
11799 case OP_STOREI2_MEMBASE_REG:
11800 return OP_STOREI2_MEMBASE_IMM;
11801 case OP_STOREI4_MEMBASE_REG:
11802 return OP_STOREI4_MEMBASE_IMM;
11803 case OP_STOREI8_MEMBASE_REG:
11804 return OP_STOREI8_MEMBASE_IMM;
11806 g_assert_not_reached ();
11813 mono_op_to_op_imm (int opcode)
11817 return OP_IADD_IMM;
11819 return OP_ISUB_IMM;
11821 return OP_IDIV_IMM;
11823 return OP_IDIV_UN_IMM;
11825 return OP_IREM_IMM;
11827 return OP_IREM_UN_IMM;
11829 return OP_IMUL_IMM;
11831 return OP_IAND_IMM;
11835 return OP_IXOR_IMM;
11837 return OP_ISHL_IMM;
11839 return OP_ISHR_IMM;
11841 return OP_ISHR_UN_IMM;
11844 return OP_LADD_IMM;
11846 return OP_LSUB_IMM;
11848 return OP_LAND_IMM;
11852 return OP_LXOR_IMM;
11854 return OP_LSHL_IMM;
11856 return OP_LSHR_IMM;
11858 return OP_LSHR_UN_IMM;
11861 return OP_COMPARE_IMM;
11863 return OP_ICOMPARE_IMM;
11865 return OP_LCOMPARE_IMM;
11867 case OP_STORE_MEMBASE_REG:
11868 return OP_STORE_MEMBASE_IMM;
11869 case OP_STOREI1_MEMBASE_REG:
11870 return OP_STOREI1_MEMBASE_IMM;
11871 case OP_STOREI2_MEMBASE_REG:
11872 return OP_STOREI2_MEMBASE_IMM;
11873 case OP_STOREI4_MEMBASE_REG:
11874 return OP_STOREI4_MEMBASE_IMM;
11876 #if defined(TARGET_X86) || defined (TARGET_AMD64)
11878 return OP_X86_PUSH_IMM;
11879 case OP_X86_COMPARE_MEMBASE_REG:
11880 return OP_X86_COMPARE_MEMBASE_IMM;
11882 #if defined(TARGET_AMD64)
11883 case OP_AMD64_ICOMPARE_MEMBASE_REG:
11884 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
11886 case OP_VOIDCALL_REG:
11887 return OP_VOIDCALL;
11895 return OP_LOCALLOC_IMM;
11902 ldind_to_load_membase (int opcode)
11906 return OP_LOADI1_MEMBASE;
11908 return OP_LOADU1_MEMBASE;
11910 return OP_LOADI2_MEMBASE;
11912 return OP_LOADU2_MEMBASE;
11914 return OP_LOADI4_MEMBASE;
11916 return OP_LOADU4_MEMBASE;
11918 return OP_LOAD_MEMBASE;
11919 case CEE_LDIND_REF:
11920 return OP_LOAD_MEMBASE;
11922 return OP_LOADI8_MEMBASE;
11924 return OP_LOADR4_MEMBASE;
11926 return OP_LOADR8_MEMBASE;
11928 g_assert_not_reached ();
11935 stind_to_store_membase (int opcode)
11939 return OP_STOREI1_MEMBASE_REG;
11941 return OP_STOREI2_MEMBASE_REG;
11943 return OP_STOREI4_MEMBASE_REG;
11945 case CEE_STIND_REF:
11946 return OP_STORE_MEMBASE_REG;
11948 return OP_STOREI8_MEMBASE_REG;
11950 return OP_STORER4_MEMBASE_REG;
11952 return OP_STORER8_MEMBASE_REG;
11954 g_assert_not_reached ();
11961 mono_load_membase_to_load_mem (int opcode)
11963 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
11964 #if defined(TARGET_X86) || defined(TARGET_AMD64)
11966 case OP_LOAD_MEMBASE:
11967 return OP_LOAD_MEM;
11968 case OP_LOADU1_MEMBASE:
11969 return OP_LOADU1_MEM;
11970 case OP_LOADU2_MEMBASE:
11971 return OP_LOADU2_MEM;
11972 case OP_LOADI4_MEMBASE:
11973 return OP_LOADI4_MEM;
11974 case OP_LOADU4_MEMBASE:
11975 return OP_LOADU4_MEM;
11976 #if SIZEOF_REGISTER == 8
11977 case OP_LOADI8_MEMBASE:
11978 return OP_LOADI8_MEM;
11987 op_to_op_dest_membase (int store_opcode, int opcode)
11989 #if defined(TARGET_X86)
11990 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
11995 return OP_X86_ADD_MEMBASE_REG;
11997 return OP_X86_SUB_MEMBASE_REG;
11999 return OP_X86_AND_MEMBASE_REG;
12001 return OP_X86_OR_MEMBASE_REG;
12003 return OP_X86_XOR_MEMBASE_REG;
12006 return OP_X86_ADD_MEMBASE_IMM;
12009 return OP_X86_SUB_MEMBASE_IMM;
12012 return OP_X86_AND_MEMBASE_IMM;
12015 return OP_X86_OR_MEMBASE_IMM;
12018 return OP_X86_XOR_MEMBASE_IMM;
12024 #if defined(TARGET_AMD64)
12025 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
12030 return OP_X86_ADD_MEMBASE_REG;
12032 return OP_X86_SUB_MEMBASE_REG;
12034 return OP_X86_AND_MEMBASE_REG;
12036 return OP_X86_OR_MEMBASE_REG;
12038 return OP_X86_XOR_MEMBASE_REG;
12040 return OP_X86_ADD_MEMBASE_IMM;
12042 return OP_X86_SUB_MEMBASE_IMM;
12044 return OP_X86_AND_MEMBASE_IMM;
12046 return OP_X86_OR_MEMBASE_IMM;
12048 return OP_X86_XOR_MEMBASE_IMM;
12050 return OP_AMD64_ADD_MEMBASE_REG;
12052 return OP_AMD64_SUB_MEMBASE_REG;
12054 return OP_AMD64_AND_MEMBASE_REG;
12056 return OP_AMD64_OR_MEMBASE_REG;
12058 return OP_AMD64_XOR_MEMBASE_REG;
12061 return OP_AMD64_ADD_MEMBASE_IMM;
12064 return OP_AMD64_SUB_MEMBASE_IMM;
12067 return OP_AMD64_AND_MEMBASE_IMM;
12070 return OP_AMD64_OR_MEMBASE_IMM;
12073 return OP_AMD64_XOR_MEMBASE_IMM;
12083 op_to_op_store_membase (int store_opcode, int opcode)
12085 #if defined(TARGET_X86) || defined(TARGET_AMD64)
12088 if (store_opcode == OP_STOREI1_MEMBASE_REG)
12089 return OP_X86_SETEQ_MEMBASE;
12091 if (store_opcode == OP_STOREI1_MEMBASE_REG)
12092 return OP_X86_SETNE_MEMBASE;
12100 op_to_op_src1_membase (int load_opcode, int opcode)
12103 /* FIXME: This has sign extension issues */
12105 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
12106 return OP_X86_COMPARE_MEMBASE8_IMM;
12109 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
12114 return OP_X86_PUSH_MEMBASE;
12115 case OP_COMPARE_IMM:
12116 case OP_ICOMPARE_IMM:
12117 return OP_X86_COMPARE_MEMBASE_IMM;
12120 return OP_X86_COMPARE_MEMBASE_REG;
12124 #ifdef TARGET_AMD64
12125 /* FIXME: This has sign extension issues */
12127 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
12128 return OP_X86_COMPARE_MEMBASE8_IMM;
12133 #ifdef __mono_ilp32__
12134 if (load_opcode == OP_LOADI8_MEMBASE)
12136 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12138 return OP_X86_PUSH_MEMBASE;
12140 /* FIXME: This only works for 32 bit immediates
12141 case OP_COMPARE_IMM:
12142 case OP_LCOMPARE_IMM:
12143 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12144 return OP_AMD64_COMPARE_MEMBASE_IMM;
12146 case OP_ICOMPARE_IMM:
12147 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
12148 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
12152 #ifdef __mono_ilp32__
12153 if (load_opcode == OP_LOAD_MEMBASE)
12154 return OP_AMD64_ICOMPARE_MEMBASE_REG;
12155 if (load_opcode == OP_LOADI8_MEMBASE)
12157 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12159 return OP_AMD64_COMPARE_MEMBASE_REG;
12162 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
12163 return OP_AMD64_ICOMPARE_MEMBASE_REG;
12172 op_to_op_src2_membase (int load_opcode, int opcode)
12175 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
12181 return OP_X86_COMPARE_REG_MEMBASE;
12183 return OP_X86_ADD_REG_MEMBASE;
12185 return OP_X86_SUB_REG_MEMBASE;
12187 return OP_X86_AND_REG_MEMBASE;
12189 return OP_X86_OR_REG_MEMBASE;
12191 return OP_X86_XOR_REG_MEMBASE;
12195 #ifdef TARGET_AMD64
12196 #ifdef __mono_ilp32__
12197 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
12199 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
12203 return OP_AMD64_ICOMPARE_REG_MEMBASE;
12205 return OP_X86_ADD_REG_MEMBASE;
12207 return OP_X86_SUB_REG_MEMBASE;
12209 return OP_X86_AND_REG_MEMBASE;
12211 return OP_X86_OR_REG_MEMBASE;
12213 return OP_X86_XOR_REG_MEMBASE;
12215 #ifdef __mono_ilp32__
12216 } else if (load_opcode == OP_LOADI8_MEMBASE) {
12218 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
12223 return OP_AMD64_COMPARE_REG_MEMBASE;
12225 return OP_AMD64_ADD_REG_MEMBASE;
12227 return OP_AMD64_SUB_REG_MEMBASE;
12229 return OP_AMD64_AND_REG_MEMBASE;
12231 return OP_AMD64_OR_REG_MEMBASE;
12233 return OP_AMD64_XOR_REG_MEMBASE;
12242 mono_op_to_op_imm_noemul (int opcode)
12245 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
12251 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
12258 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
12263 return mono_op_to_op_imm (opcode);
12268 * mono_handle_global_vregs:
12270 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
12274 mono_handle_global_vregs (MonoCompile *cfg)
12276 gint32 *vreg_to_bb;
12277 MonoBasicBlock *bb;
12280 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
12282 #ifdef MONO_ARCH_SIMD_INTRINSICS
12283 if (cfg->uses_simd_intrinsics)
12284 mono_simd_simplify_indirection (cfg);
12287 /* Find local vregs used in more than one bb */
12288 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12289 MonoInst *ins = bb->code;
12290 int block_num = bb->block_num;
12292 if (cfg->verbose_level > 2)
12293 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
12296 for (; ins; ins = ins->next) {
12297 const char *spec = INS_INFO (ins->opcode);
12298 int regtype = 0, regindex;
12301 if (G_UNLIKELY (cfg->verbose_level > 2))
12302 mono_print_ins (ins);
12304 g_assert (ins->opcode >= MONO_CEE_LAST);
12306 for (regindex = 0; regindex < 4; regindex ++) {
12309 if (regindex == 0) {
12310 regtype = spec [MONO_INST_DEST];
12311 if (regtype == ' ')
12314 } else if (regindex == 1) {
12315 regtype = spec [MONO_INST_SRC1];
12316 if (regtype == ' ')
12319 } else if (regindex == 2) {
12320 regtype = spec [MONO_INST_SRC2];
12321 if (regtype == ' ')
12324 } else if (regindex == 3) {
12325 regtype = spec [MONO_INST_SRC3];
12326 if (regtype == ' ')
12331 #if SIZEOF_REGISTER == 4
12332 /* In the LLVM case, the long opcodes are not decomposed */
12333 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
12335 * Since some instructions reference the original long vreg,
12336 * and some reference the two component vregs, it is quite hard
12337 * to determine when it needs to be global. So be conservative.
12339 if (!get_vreg_to_inst (cfg, vreg)) {
12340 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
12342 if (cfg->verbose_level > 2)
12343 printf ("LONG VREG R%d made global.\n", vreg);
12347 * Make the component vregs volatile since the optimizations can
12348 * get confused otherwise.
12350 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
12351 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
12355 g_assert (vreg != -1);
12357 prev_bb = vreg_to_bb [vreg];
12358 if (prev_bb == 0) {
12359 /* 0 is a valid block num */
12360 vreg_to_bb [vreg] = block_num + 1;
12361 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
12362 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
12365 if (!get_vreg_to_inst (cfg, vreg)) {
12366 if (G_UNLIKELY (cfg->verbose_level > 2))
12367 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
12371 if (vreg_is_ref (cfg, vreg))
12372 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
12374 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
12377 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
12380 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
12383 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
12386 g_assert_not_reached ();
12390 /* Flag as having been used in more than one bb */
12391 vreg_to_bb [vreg] = -1;
12397 /* If a variable is used in only one bblock, convert it into a local vreg */
12398 for (i = 0; i < cfg->num_varinfo; i++) {
12399 MonoInst *var = cfg->varinfo [i];
12400 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
12402 switch (var->type) {
12408 #if SIZEOF_REGISTER == 8
12411 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
12412 /* Enabling this screws up the fp stack on x86 */
12415 /* Arguments are implicitly global */
12416 /* Putting R4 vars into registers doesn't work currently */
12417 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
12418 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var) {
12420 * Make that the variable's liveness interval doesn't contain a call, since
12421 * that would cause the lvreg to be spilled, making the whole optimization
12424 /* This is too slow for JIT compilation */
12426 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
12428 int def_index, call_index, ins_index;
12429 gboolean spilled = FALSE;
12434 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
12435 const char *spec = INS_INFO (ins->opcode);
12437 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
12438 def_index = ins_index;
12440 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
12441 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
12442 if (call_index > def_index) {
12448 if (MONO_IS_CALL (ins))
12449 call_index = ins_index;
12459 if (G_UNLIKELY (cfg->verbose_level > 2))
12460 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
12461 var->flags |= MONO_INST_IS_DEAD;
12462 cfg->vreg_to_inst [var->dreg] = NULL;
12469 * Compress the varinfo and vars tables so the liveness computation is faster and
12470 * takes up less space.
12473 for (i = 0; i < cfg->num_varinfo; ++i) {
12474 MonoInst *var = cfg->varinfo [i];
12475 if (pos < i && cfg->locals_start == i)
12476 cfg->locals_start = pos;
12477 if (!(var->flags & MONO_INST_IS_DEAD)) {
12479 cfg->varinfo [pos] = cfg->varinfo [i];
12480 cfg->varinfo [pos]->inst_c0 = pos;
12481 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
12482 cfg->vars [pos].idx = pos;
12483 #if SIZEOF_REGISTER == 4
12484 if (cfg->varinfo [pos]->type == STACK_I8) {
12485 /* Modify the two component vars too */
12488 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
12489 var1->inst_c0 = pos;
12490 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
12491 var1->inst_c0 = pos;
12498 cfg->num_varinfo = pos;
12499 if (cfg->locals_start > cfg->num_varinfo)
12500 cfg->locals_start = cfg->num_varinfo;
12504 * mono_spill_global_vars:
12506 * Generate spill code for variables which are not allocated to registers,
12507 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
12508 * code is generated which could be optimized by the local optimization passes.
12511 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
12513 MonoBasicBlock *bb;
12515 int orig_next_vreg;
12516 guint32 *vreg_to_lvreg;
12518 guint32 i, lvregs_len;
12519 gboolean dest_has_lvreg = FALSE;
12520 guint32 stacktypes [128];
12521 MonoInst **live_range_start, **live_range_end;
12522 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
12523 int *gsharedvt_vreg_to_idx = NULL;
12525 *need_local_opts = FALSE;
12527 memset (spec2, 0, sizeof (spec2));
12529 /* FIXME: Move this function to mini.c */
12530 stacktypes ['i'] = STACK_PTR;
12531 stacktypes ['l'] = STACK_I8;
12532 stacktypes ['f'] = STACK_R8;
12533 #ifdef MONO_ARCH_SIMD_INTRINSICS
12534 stacktypes ['x'] = STACK_VTYPE;
12537 #if SIZEOF_REGISTER == 4
12538 /* Create MonoInsts for longs */
12539 for (i = 0; i < cfg->num_varinfo; i++) {
12540 MonoInst *ins = cfg->varinfo [i];
12542 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
12543 switch (ins->type) {
12548 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
12551 g_assert (ins->opcode == OP_REGOFFSET);
12553 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
12555 tree->opcode = OP_REGOFFSET;
12556 tree->inst_basereg = ins->inst_basereg;
12557 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
12559 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
12561 tree->opcode = OP_REGOFFSET;
12562 tree->inst_basereg = ins->inst_basereg;
12563 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
12573 if (cfg->compute_gc_maps) {
12574 /* registers need liveness info even for !non refs */
12575 for (i = 0; i < cfg->num_varinfo; i++) {
12576 MonoInst *ins = cfg->varinfo [i];
12578 if (ins->opcode == OP_REGVAR)
12579 ins->flags |= MONO_INST_GC_TRACK;
12583 if (cfg->gsharedvt) {
12584 gsharedvt_vreg_to_idx = g_new0 (int, cfg->next_vreg);
12586 for (i = 0; i < cfg->num_varinfo; ++i) {
12587 MonoInst *ins = cfg->varinfo [i];
12590 if (mini_is_gsharedvt_variable_type (cfg, ins->inst_vtype)) {
12591 if (i >= cfg->locals_start) {
12593 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
12594 gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
12595 ins->opcode = OP_GSHAREDVT_LOCAL;
12596 ins->inst_imm = idx;
12599 gsharedvt_vreg_to_idx [ins->dreg] = -1;
12600 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
12606 /* FIXME: widening and truncation */
12609 * As an optimization, when a variable allocated to the stack is first loaded into
12610 * an lvreg, we will remember the lvreg and use it the next time instead of loading
12611 * the variable again.
12613 orig_next_vreg = cfg->next_vreg;
12614 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
12615 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
12619 * These arrays contain the first and last instructions accessing a given
12621 * Since we emit bblocks in the same order we process them here, and we
12622 * don't split live ranges, these will precisely describe the live range of
12623 * the variable, i.e. the instruction range where a valid value can be found
12624 * in the variables location.
12625 * The live range is computed using the liveness info computed by the liveness pass.
12626 * We can't use vmv->range, since that is an abstract live range, and we need
12627 * one which is instruction precise.
12628 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
12630 /* FIXME: Only do this if debugging info is requested */
12631 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
12632 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
12633 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
12634 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
12636 /* Add spill loads/stores */
12637 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12640 if (cfg->verbose_level > 2)
12641 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
12643 /* Clear vreg_to_lvreg array */
12644 for (i = 0; i < lvregs_len; i++)
12645 vreg_to_lvreg [lvregs [i]] = 0;
12649 MONO_BB_FOR_EACH_INS (bb, ins) {
12650 const char *spec = INS_INFO (ins->opcode);
12651 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
12652 gboolean store, no_lvreg;
12653 int sregs [MONO_MAX_SRC_REGS];
12655 if (G_UNLIKELY (cfg->verbose_level > 2))
12656 mono_print_ins (ins);
12658 if (ins->opcode == OP_NOP)
12662 * We handle LDADDR here as well, since it can only be decomposed
12663 * when variable addresses are known.
12665 if (ins->opcode == OP_LDADDR) {
12666 MonoInst *var = ins->inst_p0;
12668 if (var->opcode == OP_VTARG_ADDR) {
12669 /* Happens on SPARC/S390 where vtypes are passed by reference */
12670 MonoInst *vtaddr = var->inst_left;
12671 if (vtaddr->opcode == OP_REGVAR) {
12672 ins->opcode = OP_MOVE;
12673 ins->sreg1 = vtaddr->dreg;
12675 else if (var->inst_left->opcode == OP_REGOFFSET) {
12676 ins->opcode = OP_LOAD_MEMBASE;
12677 ins->inst_basereg = vtaddr->inst_basereg;
12678 ins->inst_offset = vtaddr->inst_offset;
12681 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg] < 0) {
12682 /* gsharedvt arg passed by ref */
12683 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
12685 ins->opcode = OP_LOAD_MEMBASE;
12686 ins->inst_basereg = var->inst_basereg;
12687 ins->inst_offset = var->inst_offset;
12688 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg]) {
12689 MonoInst *load, *load2, *load3;
12690 int idx = gsharedvt_vreg_to_idx [var->dreg] - 1;
12691 int reg1, reg2, reg3;
12692 MonoInst *info_var = cfg->gsharedvt_info_var;
12693 MonoInst *locals_var = cfg->gsharedvt_locals_var;
12697 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
12700 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
12702 g_assert (info_var);
12703 g_assert (locals_var);
12705 /* Mark the instruction used to compute the locals var as used */
12706 cfg->gsharedvt_locals_var_ins = NULL;
12708 /* Load the offset */
12709 if (info_var->opcode == OP_REGOFFSET) {
12710 reg1 = alloc_ireg (cfg);
12711 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
12712 } else if (info_var->opcode == OP_REGVAR) {
12714 reg1 = info_var->dreg;
12716 g_assert_not_reached ();
12718 reg2 = alloc_ireg (cfg);
12719 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
12720 /* Load the locals area address */
12721 reg3 = alloc_ireg (cfg);
12722 if (locals_var->opcode == OP_REGOFFSET) {
12723 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
12724 } else if (locals_var->opcode == OP_REGVAR) {
12725 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
12727 g_assert_not_reached ();
12729 /* Compute the address */
12730 ins->opcode = OP_PADD;
12734 mono_bblock_insert_before_ins (bb, ins, load3);
12735 mono_bblock_insert_before_ins (bb, load3, load2);
12737 mono_bblock_insert_before_ins (bb, load2, load);
12739 g_assert (var->opcode == OP_REGOFFSET);
12741 ins->opcode = OP_ADD_IMM;
12742 ins->sreg1 = var->inst_basereg;
12743 ins->inst_imm = var->inst_offset;
12746 *need_local_opts = TRUE;
12747 spec = INS_INFO (ins->opcode);
12750 if (ins->opcode < MONO_CEE_LAST) {
12751 mono_print_ins (ins);
12752 g_assert_not_reached ();
12756 * Store opcodes have destbasereg in the dreg, but in reality, it is an
12760 if (MONO_IS_STORE_MEMBASE (ins)) {
12761 tmp_reg = ins->dreg;
12762 ins->dreg = ins->sreg2;
12763 ins->sreg2 = tmp_reg;
12766 spec2 [MONO_INST_DEST] = ' ';
12767 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
12768 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
12769 spec2 [MONO_INST_SRC3] = ' ';
12771 } else if (MONO_IS_STORE_MEMINDEX (ins))
12772 g_assert_not_reached ();
12777 if (G_UNLIKELY (cfg->verbose_level > 2)) {
12778 printf ("\t %.3s %d", spec, ins->dreg);
12779 num_sregs = mono_inst_get_src_registers (ins, sregs);
12780 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
12781 printf (" %d", sregs [srcindex]);
12788 regtype = spec [MONO_INST_DEST];
12789 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
12792 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
12793 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
12794 MonoInst *store_ins;
12796 MonoInst *def_ins = ins;
12797 int dreg = ins->dreg; /* The original vreg */
12799 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
12801 if (var->opcode == OP_REGVAR) {
12802 ins->dreg = var->dreg;
12803 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
12805 * Instead of emitting a load+store, use a _membase opcode.
12807 g_assert (var->opcode == OP_REGOFFSET);
12808 if (ins->opcode == OP_MOVE) {
12812 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
12813 ins->inst_basereg = var->inst_basereg;
12814 ins->inst_offset = var->inst_offset;
12817 spec = INS_INFO (ins->opcode);
12821 g_assert (var->opcode == OP_REGOFFSET);
12823 prev_dreg = ins->dreg;
12825 /* Invalidate any previous lvreg for this vreg */
12826 vreg_to_lvreg [ins->dreg] = 0;
12830 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
12832 store_opcode = OP_STOREI8_MEMBASE_REG;
12835 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
12837 #if SIZEOF_REGISTER != 8
12838 if (regtype == 'l') {
12839 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
12840 mono_bblock_insert_after_ins (bb, ins, store_ins);
12841 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
12842 mono_bblock_insert_after_ins (bb, ins, store_ins);
12843 def_ins = store_ins;
12848 g_assert (store_opcode != OP_STOREV_MEMBASE);
12850 /* Try to fuse the store into the instruction itself */
12851 /* FIXME: Add more instructions */
12852 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
12853 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
12854 ins->inst_imm = ins->inst_c0;
12855 ins->inst_destbasereg = var->inst_basereg;
12856 ins->inst_offset = var->inst_offset;
12857 spec = INS_INFO (ins->opcode);
12858 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
12859 ins->opcode = store_opcode;
12860 ins->inst_destbasereg = var->inst_basereg;
12861 ins->inst_offset = var->inst_offset;
12865 tmp_reg = ins->dreg;
12866 ins->dreg = ins->sreg2;
12867 ins->sreg2 = tmp_reg;
12870 spec2 [MONO_INST_DEST] = ' ';
12871 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
12872 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
12873 spec2 [MONO_INST_SRC3] = ' ';
12875 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
12876 // FIXME: The backends expect the base reg to be in inst_basereg
12877 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
12879 ins->inst_basereg = var->inst_basereg;
12880 ins->inst_offset = var->inst_offset;
12881 spec = INS_INFO (ins->opcode);
12883 /* printf ("INS: "); mono_print_ins (ins); */
12884 /* Create a store instruction */
12885 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
12887 /* Insert it after the instruction */
12888 mono_bblock_insert_after_ins (bb, ins, store_ins);
12890 def_ins = store_ins;
12893 * We can't assign ins->dreg to var->dreg here, since the
12894 * sregs could use it. So set a flag, and do it after
12897 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
12898 dest_has_lvreg = TRUE;
12903 if (def_ins && !live_range_start [dreg]) {
12904 live_range_start [dreg] = def_ins;
12905 live_range_start_bb [dreg] = bb;
12908 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
12911 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
12912 tmp->inst_c1 = dreg;
12913 mono_bblock_insert_after_ins (bb, def_ins, tmp);
12920 num_sregs = mono_inst_get_src_registers (ins, sregs);
12921 for (srcindex = 0; srcindex < 3; ++srcindex) {
12922 regtype = spec [MONO_INST_SRC1 + srcindex];
12923 sreg = sregs [srcindex];
12925 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
12926 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
12927 MonoInst *var = get_vreg_to_inst (cfg, sreg);
12928 MonoInst *use_ins = ins;
12929 MonoInst *load_ins;
12930 guint32 load_opcode;
12932 if (var->opcode == OP_REGVAR) {
12933 sregs [srcindex] = var->dreg;
12934 //mono_inst_set_src_registers (ins, sregs);
12935 live_range_end [sreg] = use_ins;
12936 live_range_end_bb [sreg] = bb;
12938 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
12941 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
12942 /* var->dreg is a hreg */
12943 tmp->inst_c1 = sreg;
12944 mono_bblock_insert_after_ins (bb, ins, tmp);
12950 g_assert (var->opcode == OP_REGOFFSET);
12952 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
12954 g_assert (load_opcode != OP_LOADV_MEMBASE);
12956 if (vreg_to_lvreg [sreg]) {
12957 g_assert (vreg_to_lvreg [sreg] != -1);
12959 /* The variable is already loaded to an lvreg */
12960 if (G_UNLIKELY (cfg->verbose_level > 2))
12961 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
12962 sregs [srcindex] = vreg_to_lvreg [sreg];
12963 //mono_inst_set_src_registers (ins, sregs);
12967 /* Try to fuse the load into the instruction */
12968 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
12969 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
12970 sregs [0] = var->inst_basereg;
12971 //mono_inst_set_src_registers (ins, sregs);
12972 ins->inst_offset = var->inst_offset;
12973 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
12974 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
12975 sregs [1] = var->inst_basereg;
12976 //mono_inst_set_src_registers (ins, sregs);
12977 ins->inst_offset = var->inst_offset;
12979 if (MONO_IS_REAL_MOVE (ins)) {
12980 ins->opcode = OP_NOP;
12983 //printf ("%d ", srcindex); mono_print_ins (ins);
12985 sreg = alloc_dreg (cfg, stacktypes [regtype]);
12987 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
12988 if (var->dreg == prev_dreg) {
12990 * sreg refers to the value loaded by the load
12991 * emitted below, but we need to use ins->dreg
12992 * since it refers to the store emitted earlier.
12996 g_assert (sreg != -1);
12997 vreg_to_lvreg [var->dreg] = sreg;
12998 g_assert (lvregs_len < 1024);
12999 lvregs [lvregs_len ++] = var->dreg;
13003 sregs [srcindex] = sreg;
13004 //mono_inst_set_src_registers (ins, sregs);
13006 #if SIZEOF_REGISTER != 8
13007 if (regtype == 'l') {
13008 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
13009 mono_bblock_insert_before_ins (bb, ins, load_ins);
13010 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
13011 mono_bblock_insert_before_ins (bb, ins, load_ins);
13012 use_ins = load_ins;
13017 #if SIZEOF_REGISTER == 4
13018 g_assert (load_opcode != OP_LOADI8_MEMBASE);
13020 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
13021 mono_bblock_insert_before_ins (bb, ins, load_ins);
13022 use_ins = load_ins;
13026 if (var->dreg < orig_next_vreg) {
13027 live_range_end [var->dreg] = use_ins;
13028 live_range_end_bb [var->dreg] = bb;
13031 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
13034 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
13035 tmp->inst_c1 = var->dreg;
13036 mono_bblock_insert_after_ins (bb, ins, tmp);
13040 mono_inst_set_src_registers (ins, sregs);
13042 if (dest_has_lvreg) {
13043 g_assert (ins->dreg != -1);
13044 vreg_to_lvreg [prev_dreg] = ins->dreg;
13045 g_assert (lvregs_len < 1024);
13046 lvregs [lvregs_len ++] = prev_dreg;
13047 dest_has_lvreg = FALSE;
13051 tmp_reg = ins->dreg;
13052 ins->dreg = ins->sreg2;
13053 ins->sreg2 = tmp_reg;
13056 if (MONO_IS_CALL (ins)) {
13057 /* Clear vreg_to_lvreg array */
13058 for (i = 0; i < lvregs_len; i++)
13059 vreg_to_lvreg [lvregs [i]] = 0;
13061 } else if (ins->opcode == OP_NOP) {
13063 MONO_INST_NULLIFY_SREGS (ins);
13066 if (cfg->verbose_level > 2)
13067 mono_print_ins_index (1, ins);
13070 /* Extend the live range based on the liveness info */
13071 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
13072 for (i = 0; i < cfg->num_varinfo; i ++) {
13073 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
13075 if (vreg_is_volatile (cfg, vi->vreg))
13076 /* The liveness info is incomplete */
13079 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
13080 /* Live from at least the first ins of this bb */
13081 live_range_start [vi->vreg] = bb->code;
13082 live_range_start_bb [vi->vreg] = bb;
13085 if (mono_bitset_test_fast (bb->live_out_set, i)) {
13086 /* Live at least until the last ins of this bb */
13087 live_range_end [vi->vreg] = bb->last_ins;
13088 live_range_end_bb [vi->vreg] = bb;
13094 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
13096 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
13097 * by storing the current native offset into MonoMethodVar->live_range_start/end.
13099 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
13100 for (i = 0; i < cfg->num_varinfo; ++i) {
13101 int vreg = MONO_VARINFO (cfg, i)->vreg;
13104 if (live_range_start [vreg]) {
13105 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
13107 ins->inst_c1 = vreg;
13108 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
13110 if (live_range_end [vreg]) {
13111 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
13113 ins->inst_c1 = vreg;
13114 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
13115 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
13117 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
13123 if (cfg->gsharedvt_locals_var_ins) {
13124 /* Nullify if unused */
13125 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
13126 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
13129 g_free (live_range_start);
13130 g_free (live_range_end);
13131 g_free (live_range_start_bb);
13132 g_free (live_range_end_bb);
13137 * - use 'iadd' instead of 'int_add'
13138 * - handling ovf opcodes: decompose in method_to_ir.
13139 * - unify iregs/fregs
13140 * -> partly done, the missing parts are:
13141 * - a more complete unification would involve unifying the hregs as well, so
13142 * code wouldn't need if (fp) all over the place. but that would mean the hregs
13143 * would no longer map to the machine hregs, so the code generators would need to
13144 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
13145 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
13146 * fp/non-fp branches speeds it up by about 15%.
13147 * - use sext/zext opcodes instead of shifts
13149 * - get rid of TEMPLOADs if possible and use vregs instead
13150 * - clean up usage of OP_P/OP_ opcodes
13151 * - cleanup usage of DUMMY_USE
13152 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
13154 * - set the stack type and allocate a dreg in the EMIT_NEW macros
13155 * - get rid of all the <foo>2 stuff when the new JIT is ready.
13156 * - make sure handle_stack_args () is called before the branch is emitted
13157 * - when the new IR is done, get rid of all unused stuff
13158 * - COMPARE/BEQ as separate instructions or unify them ?
13159 * - keeping them separate allows specialized compare instructions like
13160 * compare_imm, compare_membase
13161 * - most back ends unify fp compare+branch, fp compare+ceq
13162 * - integrate mono_save_args into inline_method
13163 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
13164 * - handle long shift opts on 32 bit platforms somehow: they require
13165 * 3 sregs (2 for arg1 and 1 for arg2)
13166 * - make byref a 'normal' type.
13167 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
13168 * variable if needed.
13169 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
13170 * like inline_method.
13171 * - remove inlining restrictions
13172 * - fix LNEG and enable cfold of INEG
13173 * - generalize x86 optimizations like ldelema as a peephole optimization
13174 * - add store_mem_imm for amd64
13175 * - optimize the loading of the interruption flag in the managed->native wrappers
13176 * - avoid special handling of OP_NOP in passes
13177 * - move code inserting instructions into one function/macro.
13178 * - try a coalescing phase after liveness analysis
13179 * - add float -> vreg conversion + local optimizations on !x86
13180 * - figure out how to handle decomposed branches during optimizations, ie.
13181 * compare+branch, op_jump_table+op_br etc.
13182 * - promote RuntimeXHandles to vregs
13183 * - vtype cleanups:
13184 * - add a NEW_VARLOADA_VREG macro
13185 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
13186 * accessing vtype fields.
13187 * - get rid of I8CONST on 64 bit platforms
13188 * - dealing with the increase in code size due to branches created during opcode
13190 * - use extended basic blocks
13191 * - all parts of the JIT
13192 * - handle_global_vregs () && local regalloc
13193 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
13194 * - sources of increase in code size:
13197 * - isinst and castclass
13198 * - lvregs not allocated to global registers even if used multiple times
13199 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
13201 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
13202 * - add all micro optimizations from the old JIT
13203 * - put tree optimizations into the deadce pass
13204 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
13205 * specific function.
13206 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
13207 * fcompare + branchCC.
13208 * - create a helper function for allocating a stack slot, taking into account
13209 * MONO_CFG_HAS_SPILLUP.
13211 * - merge the ia64 switch changes.
13212 * - optimize mono_regstate2_alloc_int/float.
13213 * - fix the pessimistic handling of variables accessed in exception handler blocks.
13214 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
13215 * parts of the tree could be separated by other instructions, killing the tree
13216 * arguments, or stores killing loads etc. Also, should we fold loads into other
13217 * instructions if the result of the load is used multiple times ?
13218 * - make the REM_IMM optimization in mini-x86.c arch-independent.
13219 * - LAST MERGE: 108395.
13220 * - when returning vtypes in registers, generate IR and append it to the end of the
13221 * last bb instead of doing it in the epilog.
13222 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
13230 - When to decompose opcodes:
13231 - earlier: this makes some optimizations hard to implement, since the low level IR
13232 no longer contains the neccessary information. But it is easier to do.
13233 - later: harder to implement, enables more optimizations.
13234 - Branches inside bblocks:
13235 - created when decomposing complex opcodes.
13236 - branches to another bblock: harmless, but not tracked by the branch
13237 optimizations, so need to branch to a label at the start of the bblock.
13238 - branches to inside the same bblock: very problematic, trips up the local
13239 reg allocator. Can be fixed by spitting the current bblock, but that is a
13240 complex operation, since some local vregs can become global vregs etc.
13241 - Local/global vregs:
13242 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
13243 local register allocator.
13244 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
13245 structure, created by mono_create_var (). Assigned to hregs or the stack by
13246 the global register allocator.
13247 - When to do optimizations like alu->alu_imm:
13248 - earlier -> saves work later on since the IR will be smaller/simpler
13249 - later -> can work on more instructions
13250 - Handling of valuetypes:
13251 - When a vtype is pushed on the stack, a new temporary is created, an
13252 instruction computing its address (LDADDR) is emitted and pushed on
13253 the stack. Need to optimize cases when the vtype is used immediately as in
13254 argument passing, stloc etc.
13255 - Instead of the to_end stuff in the old JIT, simply call the function handling
13256 the values on the stack before emitting the last instruction of the bb.
13259 #endif /* DISABLE_JIT */