2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/assembly.h>
38 #include <mono/metadata/attrdefs.h>
39 #include <mono/metadata/loader.h>
40 #include <mono/metadata/tabledefs.h>
41 #include <mono/metadata/class.h>
42 #include <mono/metadata/object.h>
43 #include <mono/metadata/exception.h>
44 #include <mono/metadata/opcodes.h>
45 #include <mono/metadata/mono-endian.h>
46 #include <mono/metadata/tokentype.h>
47 #include <mono/metadata/tabledefs.h>
48 #include <mono/metadata/marshal.h>
49 #include <mono/metadata/debug-helpers.h>
50 #include <mono/metadata/mono-debug.h>
51 #include <mono/metadata/gc-internal.h>
52 #include <mono/metadata/security-manager.h>
53 #include <mono/metadata/threads-types.h>
54 #include <mono/metadata/security-core-clr.h>
55 #include <mono/metadata/monitor.h>
56 #include <mono/metadata/profiler-private.h>
57 #include <mono/metadata/profiler.h>
58 #include <mono/metadata/debug-mono-symfile.h>
59 #include <mono/utils/mono-compiler.h>
60 #include <mono/utils/mono-memory-model.h>
61 #include <mono/metadata/mono-basic-block.h>
68 #include "jit-icalls.h"
70 #include "debugger-agent.h"
72 #define BRANCH_COST 10
73 #define INLINE_LENGTH_LIMIT 20
74 #define INLINE_FAILURE(msg) do { \
75 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE)) { \
76 if (cfg->verbose_level >= 2) \
77 printf ("inline failed: %s\n", msg); \
78 goto inline_failure; \
81 #define CHECK_CFG_EXCEPTION do {\
82 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
85 #define METHOD_ACCESS_FAILURE do { \
86 char *method_fname = mono_method_full_name (method, TRUE); \
87 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
88 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS); \
89 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
90 g_free (method_fname); \
91 g_free (cil_method_fname); \
92 goto exception_exit; \
94 #define FIELD_ACCESS_FAILURE do { \
95 char *method_fname = mono_method_full_name (method, TRUE); \
96 char *field_fname = mono_field_full_name (field); \
97 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS); \
98 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
99 g_free (method_fname); \
100 g_free (field_fname); \
101 goto exception_exit; \
103 #define GENERIC_SHARING_FAILURE(opcode) do { \
104 if (cfg->generic_sharing_context) { \
105 if (cfg->verbose_level > 2) \
106 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
107 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
108 goto exception_exit; \
111 #define GSHAREDVT_FAILURE(opcode) do { \
112 if (cfg->gsharedvt) { \
113 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __FILE__, __LINE__); \
114 if (cfg->verbose_level >= 2) \
115 printf ("%s\n", cfg->exception_message); \
116 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
117 goto exception_exit; \
120 #define OUT_OF_MEMORY_FAILURE do { \
121 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
122 goto exception_exit; \
124 /* Determine whenever 'ins' represents a load of the 'this' argument */
125 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
127 static int ldind_to_load_membase (int opcode);
128 static int stind_to_store_membase (int opcode);
130 int mono_op_to_op_imm (int opcode);
131 int mono_op_to_op_imm_noemul (int opcode);
133 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
135 /* helper methods signatures */
136 static MonoMethodSignature *helper_sig_class_init_trampoline = NULL;
137 static MonoMethodSignature *helper_sig_domain_get = NULL;
138 static MonoMethodSignature *helper_sig_generic_class_init_trampoline = NULL;
139 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm = NULL;
140 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline = NULL;
141 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline = NULL;
142 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm = NULL;
145 * Instruction metadata
153 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
154 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
160 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
165 /* keep in sync with the enum in mini.h */
168 #include "mini-ops.h"
173 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
174 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
176 * This should contain the index of the last sreg + 1. This is not the same
177 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
179 const gint8 ins_sreg_counts[] = {
180 #include "mini-ops.h"
185 #define MONO_INIT_VARINFO(vi,id) do { \
186 (vi)->range.first_use.pos.bid = 0xffff; \
192 mono_inst_set_src_registers (MonoInst *ins, int *regs)
194 ins->sreg1 = regs [0];
195 ins->sreg2 = regs [1];
196 ins->sreg3 = regs [2];
200 mono_alloc_ireg (MonoCompile *cfg)
202 return alloc_ireg (cfg);
206 mono_alloc_freg (MonoCompile *cfg)
208 return alloc_freg (cfg);
212 mono_alloc_preg (MonoCompile *cfg)
214 return alloc_preg (cfg);
218 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
220 return alloc_dreg (cfg, stack_type);
224 * mono_alloc_ireg_ref:
226 * Allocate an IREG, and mark it as holding a GC ref.
229 mono_alloc_ireg_ref (MonoCompile *cfg)
231 return alloc_ireg_ref (cfg);
235 * mono_alloc_ireg_mp:
237 * Allocate an IREG, and mark it as holding a managed pointer.
240 mono_alloc_ireg_mp (MonoCompile *cfg)
242 return alloc_ireg_mp (cfg);
246 * mono_alloc_ireg_copy:
248 * Allocate an IREG with the same GC type as VREG.
251 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
253 if (vreg_is_ref (cfg, vreg))
254 return alloc_ireg_ref (cfg);
255 else if (vreg_is_mp (cfg, vreg))
256 return alloc_ireg_mp (cfg);
258 return alloc_ireg (cfg);
262 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
268 switch (type->type) {
271 case MONO_TYPE_BOOLEAN:
283 case MONO_TYPE_FNPTR:
285 case MONO_TYPE_CLASS:
286 case MONO_TYPE_STRING:
287 case MONO_TYPE_OBJECT:
288 case MONO_TYPE_SZARRAY:
289 case MONO_TYPE_ARRAY:
293 #if SIZEOF_REGISTER == 8
302 case MONO_TYPE_VALUETYPE:
303 if (type->data.klass->enumtype) {
304 type = mono_class_enum_basetype (type->data.klass);
307 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
310 case MONO_TYPE_TYPEDBYREF:
312 case MONO_TYPE_GENERICINST:
313 type = &type->data.generic_class->container_class->byval_arg;
317 g_assert (cfg->generic_sharing_context);
318 if (mini_type_var_is_vt (cfg, type))
323 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
329 mono_print_bb (MonoBasicBlock *bb, const char *msg)
334 printf ("\n%s %d: [IN: ", msg, bb->block_num);
335 for (i = 0; i < bb->in_count; ++i)
336 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
338 for (i = 0; i < bb->out_count; ++i)
339 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
341 for (tree = bb->code; tree; tree = tree->next)
342 mono_print_ins_index (-1, tree);
346 mono_create_helper_signatures (void)
348 helper_sig_domain_get = mono_create_icall_signature ("ptr");
349 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
350 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
351 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
352 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
353 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
354 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
358 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
359 * foo<T> (int i) { ldarg.0; box T; }
361 #define UNVERIFIED do { \
362 if (cfg->gsharedvt) { \
363 if (cfg->verbose_level > 2) \
364 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
365 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
366 goto exception_exit; \
368 if (mini_get_debug_options ()->break_on_unverified) \
374 #define LOAD_ERROR do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto load_error; } while (0)
376 #define TYPE_LOAD_ERROR(klass) do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else { cfg->exception_ptr = klass; goto load_error; } } while (0)
378 #define GET_BBLOCK(cfg,tblock,ip) do { \
379 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
381 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
382 NEW_BBLOCK (cfg, (tblock)); \
383 (tblock)->cil_code = (ip); \
384 ADD_BBLOCK (cfg, (tblock)); \
388 #if defined(TARGET_X86) || defined(TARGET_AMD64)
389 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
390 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
391 (dest)->dreg = alloc_ireg_mp ((cfg)); \
392 (dest)->sreg1 = (sr1); \
393 (dest)->sreg2 = (sr2); \
394 (dest)->inst_imm = (imm); \
395 (dest)->backend.shift_amount = (shift); \
396 MONO_ADD_INS ((cfg)->cbb, (dest)); \
400 #if SIZEOF_REGISTER == 8
401 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
402 /* FIXME: Need to add many more cases */ \
403 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
405 int dr = alloc_preg (cfg); \
406 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
407 (ins)->sreg2 = widen->dreg; \
411 #define ADD_WIDEN_OP(ins, arg1, arg2)
414 #define ADD_BINOP(op) do { \
415 MONO_INST_NEW (cfg, ins, (op)); \
417 ins->sreg1 = sp [0]->dreg; \
418 ins->sreg2 = sp [1]->dreg; \
419 type_from_op (ins, sp [0], sp [1]); \
421 /* Have to insert a widening op */ \
422 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
423 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
424 MONO_ADD_INS ((cfg)->cbb, (ins)); \
425 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
428 #define ADD_UNOP(op) do { \
429 MONO_INST_NEW (cfg, ins, (op)); \
431 ins->sreg1 = sp [0]->dreg; \
432 type_from_op (ins, sp [0], NULL); \
434 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
435 MONO_ADD_INS ((cfg)->cbb, (ins)); \
436 *sp++ = mono_decompose_opcode (cfg, ins); \
439 #define ADD_BINCOND(next_block) do { \
442 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
443 cmp->sreg1 = sp [0]->dreg; \
444 cmp->sreg2 = sp [1]->dreg; \
445 type_from_op (cmp, sp [0], sp [1]); \
447 type_from_op (ins, sp [0], sp [1]); \
448 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
449 GET_BBLOCK (cfg, tblock, target); \
450 link_bblock (cfg, bblock, tblock); \
451 ins->inst_true_bb = tblock; \
452 if ((next_block)) { \
453 link_bblock (cfg, bblock, (next_block)); \
454 ins->inst_false_bb = (next_block); \
455 start_new_bblock = 1; \
457 GET_BBLOCK (cfg, tblock, ip); \
458 link_bblock (cfg, bblock, tblock); \
459 ins->inst_false_bb = tblock; \
460 start_new_bblock = 2; \
462 if (sp != stack_start) { \
463 handle_stack_args (cfg, stack_start, sp - stack_start); \
464 CHECK_UNVERIFIABLE (cfg); \
466 MONO_ADD_INS (bblock, cmp); \
467 MONO_ADD_INS (bblock, ins); \
471 * link_bblock: Links two basic blocks
473 * links two basic blocks in the control flow graph, the 'from'
474 * argument is the starting block and the 'to' argument is the block
475 * the control flow ends to after 'from'.
478 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
480 MonoBasicBlock **newa;
484 if (from->cil_code) {
486 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
488 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
491 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
493 printf ("edge from entry to exit\n");
498 for (i = 0; i < from->out_count; ++i) {
499 if (to == from->out_bb [i]) {
505 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
506 for (i = 0; i < from->out_count; ++i) {
507 newa [i] = from->out_bb [i];
515 for (i = 0; i < to->in_count; ++i) {
516 if (from == to->in_bb [i]) {
522 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
523 for (i = 0; i < to->in_count; ++i) {
524 newa [i] = to->in_bb [i];
533 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
535 link_bblock (cfg, from, to);
539 * mono_find_block_region:
541 * We mark each basic block with a region ID. We use that to avoid BB
542 * optimizations when blocks are in different regions.
545 * A region token that encodes where this region is, and information
546 * about the clause owner for this block.
548 * The region encodes the try/catch/filter clause that owns this block
549 * as well as the type. -1 is a special value that represents a block
550 * that is in none of try/catch/filter.
553 mono_find_block_region (MonoCompile *cfg, int offset)
555 MonoMethodHeader *header = cfg->header;
556 MonoExceptionClause *clause;
559 for (i = 0; i < header->num_clauses; ++i) {
560 clause = &header->clauses [i];
561 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
562 (offset < (clause->handler_offset)))
563 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
565 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
566 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
567 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
568 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
569 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
571 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
574 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
575 return ((i + 1) << 8) | clause->flags;
582 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
584 MonoMethodHeader *header = cfg->header;
585 MonoExceptionClause *clause;
589 for (i = 0; i < header->num_clauses; ++i) {
590 clause = &header->clauses [i];
591 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
592 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
593 if (clause->flags == type)
594 res = g_list_append (res, clause);
601 mono_create_spvar_for_region (MonoCompile *cfg, int region)
605 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
609 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
610 /* prevent it from being register allocated */
611 var->flags |= MONO_INST_INDIRECT;
613 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
617 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
619 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
623 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
627 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
631 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
632 /* prevent it from being register allocated */
633 var->flags |= MONO_INST_INDIRECT;
635 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
641 * Returns the type used in the eval stack when @type is loaded.
642 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
645 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
649 inst->klass = klass = mono_class_from_mono_type (type);
651 inst->type = STACK_MP;
656 switch (type->type) {
658 inst->type = STACK_INV;
662 case MONO_TYPE_BOOLEAN:
668 inst->type = STACK_I4;
673 case MONO_TYPE_FNPTR:
674 inst->type = STACK_PTR;
676 case MONO_TYPE_CLASS:
677 case MONO_TYPE_STRING:
678 case MONO_TYPE_OBJECT:
679 case MONO_TYPE_SZARRAY:
680 case MONO_TYPE_ARRAY:
681 inst->type = STACK_OBJ;
685 inst->type = STACK_I8;
689 inst->type = STACK_R8;
691 case MONO_TYPE_VALUETYPE:
692 if (type->data.klass->enumtype) {
693 type = mono_class_enum_basetype (type->data.klass);
697 inst->type = STACK_VTYPE;
700 case MONO_TYPE_TYPEDBYREF:
701 inst->klass = mono_defaults.typed_reference_class;
702 inst->type = STACK_VTYPE;
704 case MONO_TYPE_GENERICINST:
705 type = &type->data.generic_class->container_class->byval_arg;
709 g_assert (cfg->generic_sharing_context);
710 if (mini_is_gsharedvt_type (cfg, type)) {
711 g_assert (cfg->gsharedvt);
712 inst->type = STACK_VTYPE;
714 inst->type = STACK_OBJ;
718 g_error ("unknown type 0x%02x in eval stack type", type->type);
723 * The following tables are used to quickly validate the IL code in type_from_op ().
726 bin_num_table [STACK_MAX] [STACK_MAX] = {
727 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
728 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
729 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
730 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
731 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
732 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
733 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
734 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
739 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
742 /* reduce the size of this table */
744 bin_int_table [STACK_MAX] [STACK_MAX] = {
745 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
746 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
747 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
748 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
749 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
750 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
751 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
752 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
756 bin_comp_table [STACK_MAX] [STACK_MAX] = {
757 /* Inv i L p F & O vt */
759 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
760 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
761 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
762 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
763 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
764 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
765 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
768 /* reduce the size of this table */
770 shift_table [STACK_MAX] [STACK_MAX] = {
771 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
772 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
773 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
774 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
775 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
776 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
777 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
778 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
782 * Tables to map from the non-specific opcode to the matching
783 * type-specific opcode.
785 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
787 binops_op_map [STACK_MAX] = {
788 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
791 /* handles from CEE_NEG to CEE_CONV_U8 */
793 unops_op_map [STACK_MAX] = {
794 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
797 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
799 ovfops_op_map [STACK_MAX] = {
800 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
803 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
805 ovf2ops_op_map [STACK_MAX] = {
806 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
809 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
811 ovf3ops_op_map [STACK_MAX] = {
812 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
815 /* handles from CEE_BEQ to CEE_BLT_UN */
817 beqops_op_map [STACK_MAX] = {
818 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
821 /* handles from CEE_CEQ to CEE_CLT_UN */
823 ceqops_op_map [STACK_MAX] = {
824 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
828 * Sets ins->type (the type on the eval stack) according to the
829 * type of the opcode and the arguments to it.
830 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
832 * FIXME: this function sets ins->type unconditionally in some cases, but
833 * it should set it to invalid for some types (a conv.x on an object)
836 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
838 switch (ins->opcode) {
845 /* FIXME: check unverifiable args for STACK_MP */
846 ins->type = bin_num_table [src1->type] [src2->type];
847 ins->opcode += binops_op_map [ins->type];
854 ins->type = bin_int_table [src1->type] [src2->type];
855 ins->opcode += binops_op_map [ins->type];
860 ins->type = shift_table [src1->type] [src2->type];
861 ins->opcode += binops_op_map [ins->type];
866 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
867 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
868 ins->opcode = OP_LCOMPARE;
869 else if (src1->type == STACK_R8)
870 ins->opcode = OP_FCOMPARE;
872 ins->opcode = OP_ICOMPARE;
874 case OP_ICOMPARE_IMM:
875 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
876 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
877 ins->opcode = OP_LCOMPARE_IMM;
889 ins->opcode += beqops_op_map [src1->type];
892 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
893 ins->opcode += ceqops_op_map [src1->type];
899 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
900 ins->opcode += ceqops_op_map [src1->type];
904 ins->type = neg_table [src1->type];
905 ins->opcode += unops_op_map [ins->type];
908 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
909 ins->type = src1->type;
911 ins->type = STACK_INV;
912 ins->opcode += unops_op_map [ins->type];
918 ins->type = STACK_I4;
919 ins->opcode += unops_op_map [src1->type];
922 ins->type = STACK_R8;
923 switch (src1->type) {
926 ins->opcode = OP_ICONV_TO_R_UN;
929 ins->opcode = OP_LCONV_TO_R_UN;
933 case CEE_CONV_OVF_I1:
934 case CEE_CONV_OVF_U1:
935 case CEE_CONV_OVF_I2:
936 case CEE_CONV_OVF_U2:
937 case CEE_CONV_OVF_I4:
938 case CEE_CONV_OVF_U4:
939 ins->type = STACK_I4;
940 ins->opcode += ovf3ops_op_map [src1->type];
942 case CEE_CONV_OVF_I_UN:
943 case CEE_CONV_OVF_U_UN:
944 ins->type = STACK_PTR;
945 ins->opcode += ovf2ops_op_map [src1->type];
947 case CEE_CONV_OVF_I1_UN:
948 case CEE_CONV_OVF_I2_UN:
949 case CEE_CONV_OVF_I4_UN:
950 case CEE_CONV_OVF_U1_UN:
951 case CEE_CONV_OVF_U2_UN:
952 case CEE_CONV_OVF_U4_UN:
953 ins->type = STACK_I4;
954 ins->opcode += ovf2ops_op_map [src1->type];
957 ins->type = STACK_PTR;
958 switch (src1->type) {
960 ins->opcode = OP_ICONV_TO_U;
964 #if SIZEOF_VOID_P == 8
965 ins->opcode = OP_LCONV_TO_U;
967 ins->opcode = OP_MOVE;
971 ins->opcode = OP_LCONV_TO_U;
974 ins->opcode = OP_FCONV_TO_U;
980 ins->type = STACK_I8;
981 ins->opcode += unops_op_map [src1->type];
983 case CEE_CONV_OVF_I8:
984 case CEE_CONV_OVF_U8:
985 ins->type = STACK_I8;
986 ins->opcode += ovf3ops_op_map [src1->type];
988 case CEE_CONV_OVF_U8_UN:
989 case CEE_CONV_OVF_I8_UN:
990 ins->type = STACK_I8;
991 ins->opcode += ovf2ops_op_map [src1->type];
995 ins->type = STACK_R8;
996 ins->opcode += unops_op_map [src1->type];
999 ins->type = STACK_R8;
1003 ins->type = STACK_I4;
1004 ins->opcode += ovfops_op_map [src1->type];
1007 case CEE_CONV_OVF_I:
1008 case CEE_CONV_OVF_U:
1009 ins->type = STACK_PTR;
1010 ins->opcode += ovfops_op_map [src1->type];
1013 case CEE_ADD_OVF_UN:
1015 case CEE_MUL_OVF_UN:
1017 case CEE_SUB_OVF_UN:
1018 ins->type = bin_num_table [src1->type] [src2->type];
1019 ins->opcode += ovfops_op_map [src1->type];
1020 if (ins->type == STACK_R8)
1021 ins->type = STACK_INV;
1023 case OP_LOAD_MEMBASE:
1024 ins->type = STACK_PTR;
1026 case OP_LOADI1_MEMBASE:
1027 case OP_LOADU1_MEMBASE:
1028 case OP_LOADI2_MEMBASE:
1029 case OP_LOADU2_MEMBASE:
1030 case OP_LOADI4_MEMBASE:
1031 case OP_LOADU4_MEMBASE:
1032 ins->type = STACK_PTR;
1034 case OP_LOADI8_MEMBASE:
1035 ins->type = STACK_I8;
1037 case OP_LOADR4_MEMBASE:
1038 case OP_LOADR8_MEMBASE:
1039 ins->type = STACK_R8;
1042 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1046 if (ins->type == STACK_MP)
1047 ins->klass = mono_defaults.object_class;
1052 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1058 param_table [STACK_MAX] [STACK_MAX] = {
1063 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1067 switch (args->type) {
1077 for (i = 0; i < sig->param_count; ++i) {
1078 switch (args [i].type) {
1082 if (!sig->params [i]->byref)
1086 if (sig->params [i]->byref)
1088 switch (sig->params [i]->type) {
1089 case MONO_TYPE_CLASS:
1090 case MONO_TYPE_STRING:
1091 case MONO_TYPE_OBJECT:
1092 case MONO_TYPE_SZARRAY:
1093 case MONO_TYPE_ARRAY:
1100 if (sig->params [i]->byref)
1102 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1111 /*if (!param_table [args [i].type] [sig->params [i]->type])
1119 * When we need a pointer to the current domain many times in a method, we
1120 * call mono_domain_get() once and we store the result in a local variable.
1121 * This function returns the variable that represents the MonoDomain*.
1123 inline static MonoInst *
1124 mono_get_domainvar (MonoCompile *cfg)
1126 if (!cfg->domainvar)
1127 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1128 return cfg->domainvar;
1132 * The got_var contains the address of the Global Offset Table when AOT
1136 mono_get_got_var (MonoCompile *cfg)
1138 #ifdef MONO_ARCH_NEED_GOT_VAR
1139 if (!cfg->compile_aot)
1141 if (!cfg->got_var) {
1142 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1144 return cfg->got_var;
1151 mono_get_vtable_var (MonoCompile *cfg)
1153 g_assert (cfg->generic_sharing_context);
1155 if (!cfg->rgctx_var) {
1156 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1157 /* force the var to be stack allocated */
1158 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1161 return cfg->rgctx_var;
1165 type_from_stack_type (MonoInst *ins) {
1166 switch (ins->type) {
1167 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1168 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1169 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1170 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1172 return &ins->klass->this_arg;
1173 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1174 case STACK_VTYPE: return &ins->klass->byval_arg;
1176 g_error ("stack type %d to monotype not handled\n", ins->type);
1181 static G_GNUC_UNUSED int
1182 type_to_stack_type (MonoType *t)
1184 t = mono_type_get_underlying_type (t);
1188 case MONO_TYPE_BOOLEAN:
1191 case MONO_TYPE_CHAR:
1198 case MONO_TYPE_FNPTR:
1200 case MONO_TYPE_CLASS:
1201 case MONO_TYPE_STRING:
1202 case MONO_TYPE_OBJECT:
1203 case MONO_TYPE_SZARRAY:
1204 case MONO_TYPE_ARRAY:
1212 case MONO_TYPE_VALUETYPE:
1213 case MONO_TYPE_TYPEDBYREF:
1215 case MONO_TYPE_GENERICINST:
1216 if (mono_type_generic_inst_is_valuetype (t))
1222 g_assert_not_reached ();
1229 array_access_to_klass (int opcode)
1233 return mono_defaults.byte_class;
1235 return mono_defaults.uint16_class;
1238 return mono_defaults.int_class;
1241 return mono_defaults.sbyte_class;
1244 return mono_defaults.int16_class;
1247 return mono_defaults.int32_class;
1249 return mono_defaults.uint32_class;
1252 return mono_defaults.int64_class;
1255 return mono_defaults.single_class;
1258 return mono_defaults.double_class;
1259 case CEE_LDELEM_REF:
1260 case CEE_STELEM_REF:
1261 return mono_defaults.object_class;
1263 g_assert_not_reached ();
1269 * We try to share variables when possible
1272 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1277 /* inlining can result in deeper stacks */
1278 if (slot >= cfg->header->max_stack)
1279 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1281 pos = ins->type - 1 + slot * STACK_MAX;
1283 switch (ins->type) {
1290 if ((vnum = cfg->intvars [pos]))
1291 return cfg->varinfo [vnum];
1292 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1293 cfg->intvars [pos] = res->inst_c0;
1296 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1302 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1305 * Don't use this if a generic_context is set, since that means AOT can't
1306 * look up the method using just the image+token.
1307 * table == 0 means this is a reference made from a wrapper.
1309 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1310 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1311 jump_info_token->image = image;
1312 jump_info_token->token = token;
1313 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1318 * This function is called to handle items that are left on the evaluation stack
1319 * at basic block boundaries. What happens is that we save the values to local variables
1320 * and we reload them later when first entering the target basic block (with the
1321 * handle_loaded_temps () function).
1322 * A single joint point will use the same variables (stored in the array bb->out_stack or
1323 * bb->in_stack, if the basic block is before or after the joint point).
1325 * This function needs to be called _before_ emitting the last instruction of
1326 * the bb (i.e. before emitting a branch).
1327 * If the stack merge fails at a join point, cfg->unverifiable is set.
1330 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1333 MonoBasicBlock *bb = cfg->cbb;
1334 MonoBasicBlock *outb;
1335 MonoInst *inst, **locals;
1340 if (cfg->verbose_level > 3)
1341 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1342 if (!bb->out_scount) {
1343 bb->out_scount = count;
1344 //printf ("bblock %d has out:", bb->block_num);
1346 for (i = 0; i < bb->out_count; ++i) {
1347 outb = bb->out_bb [i];
1348 /* exception handlers are linked, but they should not be considered for stack args */
1349 if (outb->flags & BB_EXCEPTION_HANDLER)
1351 //printf (" %d", outb->block_num);
1352 if (outb->in_stack) {
1354 bb->out_stack = outb->in_stack;
1360 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1361 for (i = 0; i < count; ++i) {
1363 * try to reuse temps already allocated for this purpouse, if they occupy the same
1364 * stack slot and if they are of the same type.
1365 * This won't cause conflicts since if 'local' is used to
1366 * store one of the values in the in_stack of a bblock, then
1367 * the same variable will be used for the same outgoing stack
1369 * This doesn't work when inlining methods, since the bblocks
1370 * in the inlined methods do not inherit their in_stack from
1371 * the bblock they are inlined to. See bug #58863 for an
1374 if (cfg->inlined_method)
1375 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1377 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1382 for (i = 0; i < bb->out_count; ++i) {
1383 outb = bb->out_bb [i];
1384 /* exception handlers are linked, but they should not be considered for stack args */
1385 if (outb->flags & BB_EXCEPTION_HANDLER)
1387 if (outb->in_scount) {
1388 if (outb->in_scount != bb->out_scount) {
1389 cfg->unverifiable = TRUE;
1392 continue; /* check they are the same locals */
1394 outb->in_scount = count;
1395 outb->in_stack = bb->out_stack;
1398 locals = bb->out_stack;
1400 for (i = 0; i < count; ++i) {
1401 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1402 inst->cil_code = sp [i]->cil_code;
1403 sp [i] = locals [i];
1404 if (cfg->verbose_level > 3)
1405 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1409 * It is possible that the out bblocks already have in_stack assigned, and
1410 * the in_stacks differ. In this case, we will store to all the different
1417 /* Find a bblock which has a different in_stack */
1419 while (bindex < bb->out_count) {
1420 outb = bb->out_bb [bindex];
1421 /* exception handlers are linked, but they should not be considered for stack args */
1422 if (outb->flags & BB_EXCEPTION_HANDLER) {
1426 if (outb->in_stack != locals) {
1427 for (i = 0; i < count; ++i) {
1428 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1429 inst->cil_code = sp [i]->cil_code;
1430 sp [i] = locals [i];
1431 if (cfg->verbose_level > 3)
1432 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1434 locals = outb->in_stack;
1443 /* Emit code which loads interface_offsets [klass->interface_id]
1444 * The array is stored in memory before vtable.
1447 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1449 if (cfg->compile_aot) {
1450 int ioffset_reg = alloc_preg (cfg);
1451 int iid_reg = alloc_preg (cfg);
1453 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1454 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1455 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1458 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1463 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1465 int ibitmap_reg = alloc_preg (cfg);
1466 #ifdef COMPRESSED_INTERFACE_BITMAP
1468 MonoInst *res, *ins;
1469 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1470 MONO_ADD_INS (cfg->cbb, ins);
1472 if (cfg->compile_aot)
1473 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1475 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1476 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1477 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1479 int ibitmap_byte_reg = alloc_preg (cfg);
1481 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1483 if (cfg->compile_aot) {
1484 int iid_reg = alloc_preg (cfg);
1485 int shifted_iid_reg = alloc_preg (cfg);
1486 int ibitmap_byte_address_reg = alloc_preg (cfg);
1487 int masked_iid_reg = alloc_preg (cfg);
1488 int iid_one_bit_reg = alloc_preg (cfg);
1489 int iid_bit_reg = alloc_preg (cfg);
1490 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1491 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1492 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1493 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1494 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1495 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1496 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1497 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1499 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1500 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1506 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1507 * stored in "klass_reg" implements the interface "klass".
1510 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1512 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1516 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1517 * stored in "vtable_reg" implements the interface "klass".
1520 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1522 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1526 * Emit code which checks whenever the interface id of @klass is smaller than
1527 * than the value given by max_iid_reg.
1530 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1531 MonoBasicBlock *false_target)
1533 if (cfg->compile_aot) {
1534 int iid_reg = alloc_preg (cfg);
1535 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1536 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1539 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1541 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1543 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1546 /* Same as above, but obtains max_iid from a vtable */
1548 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1549 MonoBasicBlock *false_target)
1551 int max_iid_reg = alloc_preg (cfg);
1553 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1554 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1557 /* Same as above, but obtains max_iid from a klass */
1559 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1560 MonoBasicBlock *false_target)
1562 int max_iid_reg = alloc_preg (cfg);
1564 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1565 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1569 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1571 int idepth_reg = alloc_preg (cfg);
1572 int stypes_reg = alloc_preg (cfg);
1573 int stype = alloc_preg (cfg);
1575 mono_class_setup_supertypes (klass);
1577 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1578 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1579 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1580 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1582 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1583 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1585 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1586 } else if (cfg->compile_aot) {
1587 int const_reg = alloc_preg (cfg);
1588 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1589 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1591 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1593 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1597 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1599 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1603 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1605 int intf_reg = alloc_preg (cfg);
1607 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1608 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1609 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1611 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1613 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1617 * Variant of the above that takes a register to the class, not the vtable.
1620 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1622 int intf_bit_reg = alloc_preg (cfg);
1624 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1625 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1626 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1628 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1630 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1634 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1637 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1638 } else if (cfg->compile_aot) {
1639 int const_reg = alloc_preg (cfg);
1640 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1641 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1643 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1645 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1649 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1651 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1655 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1657 if (cfg->compile_aot) {
1658 int const_reg = alloc_preg (cfg);
1659 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1660 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1662 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1664 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1668 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1671 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1674 int rank_reg = alloc_preg (cfg);
1675 int eclass_reg = alloc_preg (cfg);
1677 g_assert (!klass_inst);
1678 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1679 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1680 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1681 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1682 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1683 if (klass->cast_class == mono_defaults.object_class) {
1684 int parent_reg = alloc_preg (cfg);
1685 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1686 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1687 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1688 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1689 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1690 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1691 } else if (klass->cast_class == mono_defaults.enum_class) {
1692 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1693 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1694 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1696 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1697 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1700 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1701 /* Check that the object is a vector too */
1702 int bounds_reg = alloc_preg (cfg);
1703 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1704 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1705 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1708 int idepth_reg = alloc_preg (cfg);
1709 int stypes_reg = alloc_preg (cfg);
1710 int stype = alloc_preg (cfg);
1712 mono_class_setup_supertypes (klass);
1714 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1715 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1716 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1717 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1719 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1720 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1721 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1726 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1728 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1732 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1736 g_assert (val == 0);
1741 if ((size <= 4) && (size <= align)) {
1744 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1747 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1750 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1752 #if SIZEOF_REGISTER == 8
1754 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1760 val_reg = alloc_preg (cfg);
1762 if (SIZEOF_REGISTER == 8)
1763 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1765 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1768 /* This could be optimized further if neccesary */
1770 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1777 #if !NO_UNALIGNED_ACCESS
1778 if (SIZEOF_REGISTER == 8) {
1780 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1785 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1793 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1798 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1803 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1810 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1817 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1818 g_assert (size < 10000);
1821 /* This could be optimized further if neccesary */
1823 cur_reg = alloc_preg (cfg);
1824 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1825 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1832 #if !NO_UNALIGNED_ACCESS
1833 if (SIZEOF_REGISTER == 8) {
1835 cur_reg = alloc_preg (cfg);
1836 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1837 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1846 cur_reg = alloc_preg (cfg);
1847 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1848 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1854 cur_reg = alloc_preg (cfg);
1855 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1856 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1862 cur_reg = alloc_preg (cfg);
1863 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1864 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1872 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1875 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1878 type = mini_get_basic_type_from_generic (gsctx, type);
1879 switch (type->type) {
1880 case MONO_TYPE_VOID:
1881 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1884 case MONO_TYPE_BOOLEAN:
1887 case MONO_TYPE_CHAR:
1890 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1894 case MONO_TYPE_FNPTR:
1895 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1896 case MONO_TYPE_CLASS:
1897 case MONO_TYPE_STRING:
1898 case MONO_TYPE_OBJECT:
1899 case MONO_TYPE_SZARRAY:
1900 case MONO_TYPE_ARRAY:
1901 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1904 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1907 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1908 case MONO_TYPE_VALUETYPE:
1909 if (type->data.klass->enumtype) {
1910 type = mono_class_enum_basetype (type->data.klass);
1913 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1914 case MONO_TYPE_TYPEDBYREF:
1915 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1916 case MONO_TYPE_GENERICINST:
1917 type = &type->data.generic_class->container_class->byval_arg;
1920 case MONO_TYPE_MVAR:
1922 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1924 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1930 * target_type_is_incompatible:
1931 * @cfg: MonoCompile context
1933 * Check that the item @arg on the evaluation stack can be stored
1934 * in the target type (can be a local, or field, etc).
1935 * The cfg arg can be used to check if we need verification or just
1938 * Returns: non-0 value if arg can't be stored on a target.
1941 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1943 MonoType *simple_type;
1946 if (target->byref) {
1947 /* FIXME: check that the pointed to types match */
1948 if (arg->type == STACK_MP)
1949 return arg->klass != mono_class_from_mono_type (target);
1950 if (arg->type == STACK_PTR)
1955 simple_type = mono_type_get_underlying_type (target);
1956 switch (simple_type->type) {
1957 case MONO_TYPE_VOID:
1961 case MONO_TYPE_BOOLEAN:
1964 case MONO_TYPE_CHAR:
1967 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1971 /* STACK_MP is needed when setting pinned locals */
1972 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1977 case MONO_TYPE_FNPTR:
1979 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
1980 * in native int. (#688008).
1982 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1985 case MONO_TYPE_CLASS:
1986 case MONO_TYPE_STRING:
1987 case MONO_TYPE_OBJECT:
1988 case MONO_TYPE_SZARRAY:
1989 case MONO_TYPE_ARRAY:
1990 if (arg->type != STACK_OBJ)
1992 /* FIXME: check type compatibility */
1996 if (arg->type != STACK_I8)
2001 if (arg->type != STACK_R8)
2004 case MONO_TYPE_VALUETYPE:
2005 if (arg->type != STACK_VTYPE)
2007 klass = mono_class_from_mono_type (simple_type);
2008 if (klass != arg->klass)
2011 case MONO_TYPE_TYPEDBYREF:
2012 if (arg->type != STACK_VTYPE)
2014 klass = mono_class_from_mono_type (simple_type);
2015 if (klass != arg->klass)
2018 case MONO_TYPE_GENERICINST:
2019 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2020 if (arg->type != STACK_VTYPE)
2022 klass = mono_class_from_mono_type (simple_type);
2023 if (klass != arg->klass)
2027 if (arg->type != STACK_OBJ)
2029 /* FIXME: check type compatibility */
2033 case MONO_TYPE_MVAR:
2034 g_assert (cfg->generic_sharing_context);
2035 if (mini_type_var_is_vt (cfg, simple_type)) {
2036 if (arg->type != STACK_VTYPE)
2039 if (arg->type != STACK_OBJ)
2044 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2050 * Prepare arguments for passing to a function call.
2051 * Return a non-zero value if the arguments can't be passed to the given
2053 * The type checks are not yet complete and some conversions may need
2054 * casts on 32 or 64 bit architectures.
2056 * FIXME: implement this using target_type_is_incompatible ()
2059 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2061 MonoType *simple_type;
2065 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2069 for (i = 0; i < sig->param_count; ++i) {
2070 if (sig->params [i]->byref) {
2071 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2075 simple_type = sig->params [i];
2076 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
2078 switch (simple_type->type) {
2079 case MONO_TYPE_VOID:
2084 case MONO_TYPE_BOOLEAN:
2087 case MONO_TYPE_CHAR:
2090 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2096 case MONO_TYPE_FNPTR:
2097 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2100 case MONO_TYPE_CLASS:
2101 case MONO_TYPE_STRING:
2102 case MONO_TYPE_OBJECT:
2103 case MONO_TYPE_SZARRAY:
2104 case MONO_TYPE_ARRAY:
2105 if (args [i]->type != STACK_OBJ)
2110 if (args [i]->type != STACK_I8)
2115 if (args [i]->type != STACK_R8)
2118 case MONO_TYPE_VALUETYPE:
2119 if (simple_type->data.klass->enumtype) {
2120 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2123 if (args [i]->type != STACK_VTYPE)
2126 case MONO_TYPE_TYPEDBYREF:
2127 if (args [i]->type != STACK_VTYPE)
2130 case MONO_TYPE_GENERICINST:
2131 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2134 case MONO_TYPE_MVAR:
2136 if (args [i]->type != STACK_VTYPE)
2140 g_error ("unknown type 0x%02x in check_call_signature",
2148 callvirt_to_call (int opcode)
2153 case OP_VOIDCALLVIRT:
2162 g_assert_not_reached ();
2169 callvirt_to_call_membase (int opcode)
2173 return OP_CALL_MEMBASE;
2174 case OP_VOIDCALLVIRT:
2175 return OP_VOIDCALL_MEMBASE;
2177 return OP_FCALL_MEMBASE;
2179 return OP_LCALL_MEMBASE;
2181 return OP_VCALL_MEMBASE;
2183 g_assert_not_reached ();
2189 #ifdef MONO_ARCH_HAVE_IMT
2190 /* Either METHOD or IMT_ARG needs to be set */
2192 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2196 if (COMPILE_LLVM (cfg)) {
2197 method_reg = alloc_preg (cfg);
2200 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2201 } else if (cfg->compile_aot) {
2202 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2205 MONO_INST_NEW (cfg, ins, OP_PCONST);
2206 ins->inst_p0 = method;
2207 ins->dreg = method_reg;
2208 MONO_ADD_INS (cfg->cbb, ins);
2212 call->imt_arg_reg = method_reg;
2214 #ifdef MONO_ARCH_IMT_REG
2215 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2217 /* Need this to keep the IMT arg alive */
2218 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2223 #ifdef MONO_ARCH_IMT_REG
2224 method_reg = alloc_preg (cfg);
2227 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2228 } else if (cfg->compile_aot) {
2229 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2232 MONO_INST_NEW (cfg, ins, OP_PCONST);
2233 ins->inst_p0 = method;
2234 ins->dreg = method_reg;
2235 MONO_ADD_INS (cfg->cbb, ins);
2238 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2240 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2245 static MonoJumpInfo *
2246 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2248 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2252 ji->data.target = target;
2258 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2260 if (cfg->generic_sharing_context)
2261 return mono_class_check_context_used (klass);
2267 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2269 if (cfg->generic_sharing_context)
2270 return mono_method_check_context_used (method);
2276 * check_method_sharing:
2278 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2281 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2283 gboolean pass_vtable = FALSE;
2284 gboolean pass_mrgctx = FALSE;
2286 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2287 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2288 gboolean sharable = FALSE;
2290 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2293 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2294 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
2295 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2297 sharable = sharing_enabled && context_sharable;
2301 * Pass vtable iff target method might
2302 * be shared, which means that sharing
2303 * is enabled for its class and its
2304 * context is sharable (and it's not a
2307 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2311 if (mini_method_get_context (cmethod) &&
2312 mini_method_get_context (cmethod)->method_inst) {
2313 g_assert (!pass_vtable);
2315 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2318 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2319 MonoGenericContext *context = mini_method_get_context (cmethod);
2320 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2322 if (sharing_enabled && context_sharable)
2324 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, mono_method_signature (cmethod)))
2329 if (out_pass_vtable)
2330 *out_pass_vtable = pass_vtable;
2331 if (out_pass_mrgctx)
2332 *out_pass_mrgctx = pass_mrgctx;
2335 inline static MonoCallInst *
2336 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2337 MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
2340 #ifdef MONO_ARCH_SOFT_FLOAT
2345 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2347 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2350 call->signature = sig;
2351 call->rgctx_reg = rgctx;
2353 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2356 if (mini_type_is_vtype (cfg, sig->ret)) {
2357 call->vret_var = cfg->vret_addr;
2358 //g_assert_not_reached ();
2360 } else if (mini_type_is_vtype (cfg, sig->ret)) {
2361 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2364 temp->backend.is_pinvoke = sig->pinvoke;
2367 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2368 * address of return value to increase optimization opportunities.
2369 * Before vtype decomposition, the dreg of the call ins itself represents the
2370 * fact the call modifies the return value. After decomposition, the call will
2371 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2372 * will be transformed into an LDADDR.
2374 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2375 loada->dreg = alloc_preg (cfg);
2376 loada->inst_p0 = temp;
2377 /* We reference the call too since call->dreg could change during optimization */
2378 loada->inst_p1 = call;
2379 MONO_ADD_INS (cfg->cbb, loada);
2381 call->inst.dreg = temp->dreg;
2383 call->vret_var = loada;
2384 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2385 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2387 #ifdef MONO_ARCH_SOFT_FLOAT
2388 if (COMPILE_SOFT_FLOAT (cfg)) {
2390 * If the call has a float argument, we would need to do an r8->r4 conversion using
2391 * an icall, but that cannot be done during the call sequence since it would clobber
2392 * the call registers + the stack. So we do it before emitting the call.
2394 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2396 MonoInst *in = call->args [i];
2398 if (i >= sig->hasthis)
2399 t = sig->params [i - sig->hasthis];
2401 t = &mono_defaults.int_class->byval_arg;
2402 t = mono_type_get_underlying_type (t);
2404 if (!t->byref && t->type == MONO_TYPE_R4) {
2405 MonoInst *iargs [1];
2409 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2411 /* The result will be in an int vreg */
2412 call->args [i] = conv;
2418 call->need_unbox_trampoline = unbox_trampoline;
2421 if (COMPILE_LLVM (cfg))
2422 mono_llvm_emit_call (cfg, call);
2424 mono_arch_emit_call (cfg, call);
2426 mono_arch_emit_call (cfg, call);
2429 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2430 cfg->flags |= MONO_CFG_HAS_CALLS;
2436 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2438 #ifdef MONO_ARCH_RGCTX_REG
2439 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2440 cfg->uses_rgctx_reg = TRUE;
2441 call->rgctx_reg = TRUE;
2443 call->rgctx_arg_reg = rgctx_reg;
2450 inline static MonoInst*
2451 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2457 rgctx_reg = mono_alloc_preg (cfg);
2458 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2461 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2463 call->inst.sreg1 = addr->dreg;
2466 emit_imt_argument (cfg, call, NULL, imt_arg);
2468 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2471 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2473 return (MonoInst*)call;
2477 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2480 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2482 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2485 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2486 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2488 #ifndef DISABLE_REMOTING
2489 gboolean might_be_remote = FALSE;
2491 gboolean virtual = this != NULL;
2492 gboolean enable_for_aot = TRUE;
2496 gboolean need_unbox_trampoline;
2499 sig = mono_method_signature (method);
2502 rgctx_reg = mono_alloc_preg (cfg);
2503 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2506 if (method->string_ctor) {
2507 /* Create the real signature */
2508 /* FIXME: Cache these */
2509 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2510 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2515 context_used = mini_method_check_context_used (cfg, method);
2517 #ifndef DISABLE_REMOTING
2518 might_be_remote = this && sig->hasthis &&
2519 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2520 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2522 if (might_be_remote && context_used) {
2525 g_assert (cfg->generic_sharing_context);
2527 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2529 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2533 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2535 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2537 #ifndef DISABLE_REMOTING
2538 if (might_be_remote)
2539 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2542 call->method = method;
2543 call->inst.flags |= MONO_INST_HAS_METHOD;
2544 call->inst.inst_left = this;
2545 call->tail_call = tail;
2548 int vtable_reg, slot_reg, this_reg;
2551 this_reg = this->dreg;
2553 if (ARCH_HAVE_DELEGATE_TRAMPOLINES && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2554 MonoInst *dummy_use;
2556 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2558 /* Make a call to delegate->invoke_impl */
2559 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2560 call->inst.inst_basereg = this_reg;
2561 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2562 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2564 /* We must emit a dummy use here because the delegate trampoline will
2565 replace the 'this' argument with the delegate target making this activation
2566 no longer a root for the delegate.
2567 This is an issue for delegates that target collectible code such as dynamic
2568 methods of GC'able assemblies.
2570 For a test case look into #667921.
2572 FIXME: a dummy use is not the best way to do it as the local register allocator
2573 will put it on a caller save register and spil it around the call.
2574 Ideally, we would either put it on a callee save register or only do the store part.
2576 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2578 return (MonoInst*)call;
2581 if ((!cfg->compile_aot || enable_for_aot) &&
2582 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2583 (MONO_METHOD_IS_FINAL (method) &&
2584 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2585 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2587 * the method is not virtual, we just need to ensure this is not null
2588 * and then we can call the method directly.
2590 #ifndef DISABLE_REMOTING
2591 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2593 * The check above ensures method is not gshared, this is needed since
2594 * gshared methods can't have wrappers.
2596 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2600 if (!method->string_ctor)
2601 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2603 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2604 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2606 * the method is virtual, but we can statically dispatch since either
2607 * it's class or the method itself are sealed.
2608 * But first we need to ensure it's not a null reference.
2610 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2612 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2614 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2616 vtable_reg = alloc_preg (cfg);
2617 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2618 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2620 #ifdef MONO_ARCH_HAVE_IMT
2622 guint32 imt_slot = mono_method_get_imt_slot (method);
2623 emit_imt_argument (cfg, call, call->method, imt_arg);
2624 slot_reg = vtable_reg;
2625 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2628 if (slot_reg == -1) {
2629 slot_reg = alloc_preg (cfg);
2630 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2631 offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2634 slot_reg = vtable_reg;
2635 offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2636 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2637 #ifdef MONO_ARCH_HAVE_IMT
2639 g_assert (mono_method_signature (method)->generic_param_count);
2640 emit_imt_argument (cfg, call, call->method, imt_arg);
2645 call->inst.sreg1 = slot_reg;
2646 call->inst.inst_offset = offset;
2647 call->virtual = TRUE;
2651 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2654 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2656 return (MonoInst*)call;
2660 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2662 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this, NULL, NULL);
2666 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2673 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2676 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2678 return (MonoInst*)call;
2682 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2684 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2688 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2692 * mono_emit_abs_call:
2694 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2696 inline static MonoInst*
2697 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2698 MonoMethodSignature *sig, MonoInst **args)
2700 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2704 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2707 if (cfg->abs_patches == NULL)
2708 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2709 g_hash_table_insert (cfg->abs_patches, ji, ji);
2710 ins = mono_emit_native_call (cfg, ji, sig, args);
2711 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2716 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2718 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2719 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2723 * Native code might return non register sized integers
2724 * without initializing the upper bits.
2726 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2727 case OP_LOADI1_MEMBASE:
2728 widen_op = OP_ICONV_TO_I1;
2730 case OP_LOADU1_MEMBASE:
2731 widen_op = OP_ICONV_TO_U1;
2733 case OP_LOADI2_MEMBASE:
2734 widen_op = OP_ICONV_TO_I2;
2736 case OP_LOADU2_MEMBASE:
2737 widen_op = OP_ICONV_TO_U2;
2743 if (widen_op != -1) {
2744 int dreg = alloc_preg (cfg);
2747 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2748 widen->type = ins->type;
2758 get_memcpy_method (void)
2760 static MonoMethod *memcpy_method = NULL;
2761 if (!memcpy_method) {
2762 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2764 g_error ("Old corlib found. Install a new one");
2766 return memcpy_method;
2770 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
2772 MonoClassField *field;
2773 gpointer iter = NULL;
2775 while ((field = mono_class_get_fields (klass, &iter))) {
2778 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2780 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2781 if (mini_type_is_reference (cfg, mono_field_get_type (field))) {
2782 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2783 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2785 MonoClass *field_class = mono_class_from_mono_type (field->type);
2786 if (field_class->has_references)
2787 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
2793 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value, int value_reg)
2795 int card_table_shift_bits;
2796 gpointer card_table_mask;
2798 MonoInst *dummy_use;
2799 int nursery_shift_bits;
2800 size_t nursery_size;
2801 gboolean has_card_table_wb = FALSE;
2803 if (!cfg->gen_write_barriers)
2806 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2808 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2810 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
2811 has_card_table_wb = TRUE;
2814 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0) {
2817 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2818 wbarrier->sreg1 = ptr->dreg;
2820 wbarrier->sreg2 = value->dreg;
2822 wbarrier->sreg2 = value_reg;
2823 MONO_ADD_INS (cfg->cbb, wbarrier);
2824 } else if (card_table) {
2825 int offset_reg = alloc_preg (cfg);
2826 int card_reg = alloc_preg (cfg);
2829 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2830 if (card_table_mask)
2831 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
2833 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2834 * IMM's larger than 32bits.
2836 if (cfg->compile_aot) {
2837 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
2839 MONO_INST_NEW (cfg, ins, OP_PCONST);
2840 ins->inst_p0 = card_table;
2841 ins->dreg = card_reg;
2842 MONO_ADD_INS (cfg->cbb, ins);
2845 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
2846 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
2848 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2849 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
2853 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
2855 MONO_INST_NEW (cfg, dummy_use, OP_DUMMY_USE);
2856 dummy_use->sreg1 = value_reg;
2857 MONO_ADD_INS (cfg->cbb, dummy_use);
2862 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
2864 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
2865 unsigned need_wb = 0;
2870 /*types with references can't have alignment smaller than sizeof(void*) */
2871 if (align < SIZEOF_VOID_P)
2874 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
2875 if (size > 32 * SIZEOF_VOID_P)
2878 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
2880 /* We don't unroll more than 5 stores to avoid code bloat. */
2881 if (size > 5 * SIZEOF_VOID_P) {
2882 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
2883 size += (SIZEOF_VOID_P - 1);
2884 size &= ~(SIZEOF_VOID_P - 1);
2886 EMIT_NEW_ICONST (cfg, iargs [2], size);
2887 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
2888 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
2892 destreg = iargs [0]->dreg;
2893 srcreg = iargs [1]->dreg;
2896 dest_ptr_reg = alloc_preg (cfg);
2897 tmp_reg = alloc_preg (cfg);
2900 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
2902 while (size >= SIZEOF_VOID_P) {
2903 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, tmp_reg, srcreg, offset);
2904 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
2907 emit_write_barrier (cfg, iargs [0], NULL, tmp_reg);
2909 offset += SIZEOF_VOID_P;
2910 size -= SIZEOF_VOID_P;
2913 /*tmp += sizeof (void*)*/
2914 if (size >= SIZEOF_VOID_P) {
2915 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
2916 MONO_ADD_INS (cfg->cbb, iargs [0]);
2920 /* Those cannot be references since size < sizeof (void*) */
2922 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
2923 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
2929 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
2930 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
2936 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
2937 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
2946 * Emit code to copy a valuetype of type @klass whose address is stored in
2947 * @src->dreg to memory whose address is stored at @dest->dreg.
2950 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2952 MonoInst *iargs [4];
2953 int context_used, n;
2955 MonoMethod *memcpy_method;
2956 MonoInst *size_ins = NULL;
2957 MonoInst *memcpy_ins = NULL;
2961 * This check breaks with spilled vars... need to handle it during verification anyway.
2962 * g_assert (klass && klass == src->klass && klass == dest->klass);
2965 if (mini_is_gsharedvt_klass (cfg, klass)) {
2967 context_used = mini_class_check_context_used (cfg, klass);
2968 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
2969 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
2973 n = mono_class_native_size (klass, &align);
2975 n = mono_class_value_size (klass, &align);
2977 /* if native is true there should be no references in the struct */
2978 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
2979 /* Avoid barriers when storing to the stack */
2980 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2981 (dest->opcode == OP_LDADDR))) {
2987 context_used = mini_class_check_context_used (cfg, klass);
2989 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
2990 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
2992 } else if (context_used) {
2993 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2995 if (cfg->compile_aot) {
2996 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2998 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2999 mono_class_compute_gc_descriptor (klass);
3004 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3006 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3011 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
3012 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3013 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3018 iargs [2] = size_ins;
3020 EMIT_NEW_ICONST (cfg, iargs [2], n);
3022 memcpy_method = get_memcpy_method ();
3024 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3026 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3031 get_memset_method (void)
3033 static MonoMethod *memset_method = NULL;
3034 if (!memset_method) {
3035 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3037 g_error ("Old corlib found. Install a new one");
3039 return memset_method;
3043 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3045 MonoInst *iargs [3];
3046 int n, context_used;
3048 MonoMethod *memset_method;
3049 MonoInst *size_ins = NULL;
3050 MonoInst *bzero_ins = NULL;
3051 static MonoMethod *bzero_method;
3053 /* FIXME: Optimize this for the case when dest is an LDADDR */
3055 mono_class_init (klass);
3056 if (mini_is_gsharedvt_klass (cfg, klass)) {
3057 context_used = mini_class_check_context_used (cfg, klass);
3058 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3059 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3061 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3062 g_assert (bzero_method);
3064 iargs [1] = size_ins;
3065 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3069 n = mono_class_value_size (klass, &align);
3071 if (n <= sizeof (gpointer) * 5) {
3072 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3075 memset_method = get_memset_method ();
3077 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3078 EMIT_NEW_ICONST (cfg, iargs [2], n);
3079 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3084 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3086 MonoInst *this = NULL;
3088 g_assert (cfg->generic_sharing_context);
3090 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3091 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3092 !method->klass->valuetype)
3093 EMIT_NEW_ARGLOAD (cfg, this, 0);
3095 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3096 MonoInst *mrgctx_loc, *mrgctx_var;
3099 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3101 mrgctx_loc = mono_get_vtable_var (cfg);
3102 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3105 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3106 MonoInst *vtable_loc, *vtable_var;
3110 vtable_loc = mono_get_vtable_var (cfg);
3111 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3113 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3114 MonoInst *mrgctx_var = vtable_var;
3117 vtable_reg = alloc_preg (cfg);
3118 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3119 vtable_var->type = STACK_PTR;
3127 vtable_reg = alloc_preg (cfg);
3128 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3133 static MonoJumpInfoRgctxEntry *
3134 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3136 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3137 res->method = method;
3138 res->in_mrgctx = in_mrgctx;
3139 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3140 res->data->type = patch_type;
3141 res->data->data.target = patch_data;
3142 res->info_type = info_type;
3147 static inline MonoInst*
3148 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3150 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3154 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3155 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3157 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3158 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3160 return emit_rgctx_fetch (cfg, rgctx, entry);
3164 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3165 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3167 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3168 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3170 return emit_rgctx_fetch (cfg, rgctx, entry);
3174 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3175 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3177 MonoJumpInfoGSharedVtCall *call_info;
3178 MonoJumpInfoRgctxEntry *entry;
3181 call_info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3182 call_info->sig = sig;
3183 call_info->method = cmethod;
3185 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3186 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3188 return emit_rgctx_fetch (cfg, rgctx, entry);
3193 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3194 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3196 MonoJumpInfoRgctxEntry *entry;
3199 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3200 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3202 return emit_rgctx_fetch (cfg, rgctx, entry);
3206 * emit_get_rgctx_method:
3208 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3209 * normal constants, else emit a load from the rgctx.
3212 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3213 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3215 if (!context_used) {
3218 switch (rgctx_type) {
3219 case MONO_RGCTX_INFO_METHOD:
3220 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3222 case MONO_RGCTX_INFO_METHOD_RGCTX:
3223 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3226 g_assert_not_reached ();
3229 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3230 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3232 return emit_rgctx_fetch (cfg, rgctx, entry);
3237 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3238 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3240 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3241 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3243 return emit_rgctx_fetch (cfg, rgctx, entry);
3247 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3249 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3250 MonoRuntimeGenericContextInfoTemplate *template;
3255 for (i = 0; i < info->entries->len; ++i) {
3256 MonoRuntimeGenericContextInfoTemplate *otemplate = g_ptr_array_index (info->entries, i);
3258 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3262 template = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate));
3263 template->info_type = rgctx_type;
3264 template->data = data;
3266 idx = info->entries->len;
3268 g_ptr_array_add (info->entries, template);
3274 * emit_get_gsharedvt_info:
3276 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3279 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3284 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3285 /* Load info->entries [idx] */
3286 dreg = alloc_preg (cfg);
3287 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3293 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3295 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3299 * On return the caller must check @klass for load errors.
3302 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3304 MonoInst *vtable_arg;
3308 context_used = mini_class_check_context_used (cfg, klass);
3311 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3312 klass, MONO_RGCTX_INFO_VTABLE);
3314 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3318 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3321 if (COMPILE_LLVM (cfg))
3322 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3324 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3325 #ifdef MONO_ARCH_VTABLE_REG
3326 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3327 cfg->uses_vtable_reg = TRUE;
3334 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc)
3338 if (cfg->gen_seq_points && cfg->method == method) {
3339 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3340 MONO_ADD_INS (cfg->cbb, ins);
3345 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
3347 if (mini_get_debug_options ()->better_cast_details) {
3348 int to_klass_reg = alloc_preg (cfg);
3349 int vtable_reg = alloc_preg (cfg);
3350 int klass_reg = alloc_preg (cfg);
3351 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3354 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3358 MONO_ADD_INS (cfg->cbb, tls_get);
3359 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3360 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3362 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3363 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3364 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3369 reset_cast_details (MonoCompile *cfg)
3371 /* Reset the variables holding the cast details */
3372 if (mini_get_debug_options ()->better_cast_details) {
3373 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3375 MONO_ADD_INS (cfg->cbb, tls_get);
3376 /* It is enough to reset the from field */
3377 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3382 * On return the caller must check @array_class for load errors
3385 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3387 int vtable_reg = alloc_preg (cfg);
3390 context_used = mini_class_check_context_used (cfg, array_class);
3392 save_cast_details (cfg, array_class, obj->dreg);
3394 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3396 if (cfg->opt & MONO_OPT_SHARED) {
3397 int class_reg = alloc_preg (cfg);
3398 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3399 if (cfg->compile_aot) {
3400 int klass_reg = alloc_preg (cfg);
3401 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3402 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3404 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3406 } else if (context_used) {
3407 MonoInst *vtable_ins;
3409 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3410 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3412 if (cfg->compile_aot) {
3416 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3418 vt_reg = alloc_preg (cfg);
3419 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3420 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3423 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3425 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3429 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3431 reset_cast_details (cfg);
3435 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3436 * generic code is generated.
3439 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3441 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3444 MonoInst *rgctx, *addr;
3446 /* FIXME: What if the class is shared? We might not
3447 have to get the address of the method from the
3449 addr = emit_get_rgctx_method (cfg, context_used, method,
3450 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3452 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3454 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3456 gboolean pass_vtable, pass_mrgctx;
3457 MonoInst *rgctx_arg = NULL;
3459 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3460 g_assert (!pass_mrgctx);
3463 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3466 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3469 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3474 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3478 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3479 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3480 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3481 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3483 obj_reg = sp [0]->dreg;
3484 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3485 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3487 /* FIXME: generics */
3488 g_assert (klass->rank == 0);
3491 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3492 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3494 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3495 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3498 MonoInst *element_class;
3500 /* This assertion is from the unboxcast insn */
3501 g_assert (klass->rank == 0);
3503 element_class = emit_get_rgctx_klass (cfg, context_used,
3504 klass->element_class, MONO_RGCTX_INFO_KLASS);
3506 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3507 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3509 save_cast_details (cfg, klass->element_class, obj_reg);
3510 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3511 reset_cast_details (cfg);
3514 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3515 MONO_ADD_INS (cfg->cbb, add);
3516 add->type = STACK_MP;
3523 handle_unbox_gsharedvt (MonoCompile *cfg, int context_used, MonoClass *klass, MonoInst *obj, MonoBasicBlock **out_cbb)
3525 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3526 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3530 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3536 args [1] = klass_inst;
3539 obj = mono_emit_jit_icall (cfg, mono_object_castclass, args);
3541 NEW_BBLOCK (cfg, is_ref_bb);
3542 NEW_BBLOCK (cfg, is_nullable_bb);
3543 NEW_BBLOCK (cfg, end_bb);
3544 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3545 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3546 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3548 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3549 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3551 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3552 addr_reg = alloc_dreg (cfg, STACK_MP);
3556 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3557 MONO_ADD_INS (cfg->cbb, addr);
3559 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3562 MONO_START_BB (cfg, is_ref_bb);
3564 /* Save the ref to a temporary */
3565 dreg = alloc_ireg (cfg);
3566 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3567 addr->dreg = addr_reg;
3568 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3569 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3572 MONO_START_BB (cfg, is_nullable_bb);
3575 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3576 MonoInst *unbox_call;
3577 MonoMethodSignature *unbox_sig;
3580 var = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
3582 unbox_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3583 unbox_sig->ret = &klass->byval_arg;
3584 unbox_sig->param_count = 1;
3585 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3586 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3588 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3589 addr->dreg = addr_reg;
3592 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3595 MONO_START_BB (cfg, end_bb);
3598 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3600 *out_cbb = cfg->cbb;
3606 * Returns NULL and set the cfg exception on error.
3609 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3611 MonoInst *iargs [2];
3617 MonoInst *iargs [2];
3619 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box);
3621 if (cfg->opt & MONO_OPT_SHARED)
3622 rgctx_info = MONO_RGCTX_INFO_KLASS;
3624 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3625 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3627 if (cfg->opt & MONO_OPT_SHARED) {
3628 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3630 alloc_ftn = mono_object_new;
3633 alloc_ftn = mono_object_new_specific;
3636 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED))
3637 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3639 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3642 if (cfg->opt & MONO_OPT_SHARED) {
3643 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3644 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3646 alloc_ftn = mono_object_new;
3647 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3648 /* This happens often in argument checking code, eg. throw new FooException... */
3649 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3650 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3651 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3653 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3654 MonoMethod *managed_alloc = NULL;
3658 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3659 cfg->exception_ptr = klass;
3663 #ifndef MONO_CROSS_COMPILE
3664 managed_alloc = mono_gc_get_managed_allocator (klass, for_box);
3667 if (managed_alloc) {
3668 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3669 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3671 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3673 guint32 lw = vtable->klass->instance_size;
3674 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3675 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3676 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3679 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3683 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3687 * Returns NULL and set the cfg exception on error.
3690 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoBasicBlock **out_cbb)
3692 MonoInst *alloc, *ins;
3694 *out_cbb = cfg->cbb;
3696 if (mono_class_is_nullable (klass)) {
3697 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3700 /* FIXME: What if the class is shared? We might not
3701 have to get the method address from the RGCTX. */
3702 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3703 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3704 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3706 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3708 gboolean pass_vtable, pass_mrgctx;
3709 MonoInst *rgctx_arg = NULL;
3711 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3712 g_assert (!pass_mrgctx);
3715 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3718 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3721 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3725 if (mini_is_gsharedvt_klass (cfg, klass)) {
3726 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3727 MonoInst *res, *is_ref, *src_var, *addr;
3730 dreg = alloc_ireg (cfg);
3732 NEW_BBLOCK (cfg, is_ref_bb);
3733 NEW_BBLOCK (cfg, is_nullable_bb);
3734 NEW_BBLOCK (cfg, end_bb);
3735 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3736 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3737 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3739 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3740 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3743 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3746 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3747 ins->opcode = OP_STOREV_MEMBASE;
3749 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
3750 res->type = STACK_OBJ;
3752 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3755 MONO_START_BB (cfg, is_ref_bb);
3756 addr_reg = alloc_ireg (cfg);
3758 /* val is a vtype, so has to load the value manually */
3759 src_var = get_vreg_to_inst (cfg, val->dreg);
3761 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
3762 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
3763 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
3764 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3767 MONO_START_BB (cfg, is_nullable_bb);
3770 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
3771 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
3773 MonoMethodSignature *box_sig;
3776 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
3777 * construct that method at JIT time, so have to do things by hand.
3779 box_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3780 box_sig->ret = &mono_defaults.object_class->byval_arg;
3781 box_sig->param_count = 1;
3782 box_sig->params [0] = &klass->byval_arg;
3783 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
3784 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
3785 res->type = STACK_OBJ;
3789 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3791 MONO_START_BB (cfg, end_bb);
3793 *out_cbb = cfg->cbb;
3797 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3801 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3808 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
3811 MonoGenericContainer *container;
3812 MonoGenericInst *ginst;
3814 if (klass->generic_class) {
3815 container = klass->generic_class->container_class->generic_container;
3816 ginst = klass->generic_class->context.class_inst;
3817 } else if (klass->generic_container && context_used) {
3818 container = klass->generic_container;
3819 ginst = container->context.class_inst;
3824 for (i = 0; i < container->type_argc; ++i) {
3826 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
3828 type = ginst->type_argv [i];
3829 if (mini_type_is_reference (cfg, type))
3835 // FIXME: This doesn't work yet (class libs tests fail?)
3836 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3839 * Returns NULL and set the cfg exception on error.
3842 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3844 MonoBasicBlock *is_null_bb;
3845 int obj_reg = src->dreg;
3846 int vtable_reg = alloc_preg (cfg);
3847 MonoInst *klass_inst = NULL;
3852 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
3853 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
3854 MonoInst *cache_ins;
3856 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3861 /* klass - it's the second element of the cache entry*/
3862 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3865 args [2] = cache_ins;
3867 return mono_emit_method_call (cfg, mono_castclass, args, NULL);
3870 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3873 NEW_BBLOCK (cfg, is_null_bb);
3875 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3876 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3878 save_cast_details (cfg, klass, obj_reg);
3880 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3881 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3882 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3884 int klass_reg = alloc_preg (cfg);
3886 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3888 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3889 /* the remoting code is broken, access the class for now */
3890 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3891 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3893 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3894 cfg->exception_ptr = klass;
3897 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3899 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3900 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3902 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3904 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3905 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3909 MONO_START_BB (cfg, is_null_bb);
3911 reset_cast_details (cfg);
3917 * Returns NULL and set the cfg exception on error.
3920 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3923 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3924 int obj_reg = src->dreg;
3925 int vtable_reg = alloc_preg (cfg);
3926 int res_reg = alloc_ireg_ref (cfg);
3927 MonoInst *klass_inst = NULL;
3932 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
3933 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
3934 MonoInst *cache_ins;
3936 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3941 /* klass - it's the second element of the cache entry*/
3942 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3945 args [2] = cache_ins;
3947 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
3950 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3953 NEW_BBLOCK (cfg, is_null_bb);
3954 NEW_BBLOCK (cfg, false_bb);
3955 NEW_BBLOCK (cfg, end_bb);
3957 /* Do the assignment at the beginning, so the other assignment can be if converted */
3958 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3959 ins->type = STACK_OBJ;
3962 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3963 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3965 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3967 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3968 g_assert (!context_used);
3969 /* the is_null_bb target simply copies the input register to the output */
3970 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3972 int klass_reg = alloc_preg (cfg);
3975 int rank_reg = alloc_preg (cfg);
3976 int eclass_reg = alloc_preg (cfg);
3978 g_assert (!context_used);
3979 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3980 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3981 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3982 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3983 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3984 if (klass->cast_class == mono_defaults.object_class) {
3985 int parent_reg = alloc_preg (cfg);
3986 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3987 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3988 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3989 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3990 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3991 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3992 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3993 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3994 } else if (klass->cast_class == mono_defaults.enum_class) {
3995 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3996 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3997 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3998 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4000 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4001 /* Check that the object is a vector too */
4002 int bounds_reg = alloc_preg (cfg);
4003 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
4004 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4005 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4008 /* the is_null_bb target simply copies the input register to the output */
4009 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4011 } else if (mono_class_is_nullable (klass)) {
4012 g_assert (!context_used);
4013 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4014 /* the is_null_bb target simply copies the input register to the output */
4015 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4017 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4018 g_assert (!context_used);
4019 /* the remoting code is broken, access the class for now */
4020 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4021 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4023 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4024 cfg->exception_ptr = klass;
4027 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4029 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4030 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4032 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4033 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4035 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4036 /* the is_null_bb target simply copies the input register to the output */
4037 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4042 MONO_START_BB (cfg, false_bb);
4044 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4045 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4047 MONO_START_BB (cfg, is_null_bb);
4049 MONO_START_BB (cfg, end_bb);
4055 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4057 /* This opcode takes as input an object reference and a class, and returns:
4058 0) if the object is an instance of the class,
4059 1) if the object is not instance of the class,
4060 2) if the object is a proxy whose type cannot be determined */
4063 #ifndef DISABLE_REMOTING
4064 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4066 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4068 int obj_reg = src->dreg;
4069 int dreg = alloc_ireg (cfg);
4071 #ifndef DISABLE_REMOTING
4072 int klass_reg = alloc_preg (cfg);
4075 NEW_BBLOCK (cfg, true_bb);
4076 NEW_BBLOCK (cfg, false_bb);
4077 NEW_BBLOCK (cfg, end_bb);
4078 #ifndef DISABLE_REMOTING
4079 NEW_BBLOCK (cfg, false2_bb);
4080 NEW_BBLOCK (cfg, no_proxy_bb);
4083 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4084 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4086 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4087 #ifndef DISABLE_REMOTING
4088 NEW_BBLOCK (cfg, interface_fail_bb);
4091 tmp_reg = alloc_preg (cfg);
4092 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4093 #ifndef DISABLE_REMOTING
4094 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4095 MONO_START_BB (cfg, interface_fail_bb);
4096 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4098 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4100 tmp_reg = alloc_preg (cfg);
4101 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4102 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4103 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4105 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4108 #ifndef DISABLE_REMOTING
4109 tmp_reg = alloc_preg (cfg);
4110 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4111 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4113 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4114 tmp_reg = alloc_preg (cfg);
4115 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4116 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4118 tmp_reg = alloc_preg (cfg);
4119 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4120 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4121 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4123 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4124 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4126 MONO_START_BB (cfg, no_proxy_bb);
4128 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4130 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4134 MONO_START_BB (cfg, false_bb);
4136 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4137 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4139 #ifndef DISABLE_REMOTING
4140 MONO_START_BB (cfg, false2_bb);
4142 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4143 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4146 MONO_START_BB (cfg, true_bb);
4148 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4150 MONO_START_BB (cfg, end_bb);
4153 MONO_INST_NEW (cfg, ins, OP_ICONST);
4155 ins->type = STACK_I4;
4161 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4163 /* This opcode takes as input an object reference and a class, and returns:
4164 0) if the object is an instance of the class,
4165 1) if the object is a proxy whose type cannot be determined
4166 an InvalidCastException exception is thrown otherwhise*/
4169 #ifndef DISABLE_REMOTING
4170 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4172 MonoBasicBlock *ok_result_bb;
4174 int obj_reg = src->dreg;
4175 int dreg = alloc_ireg (cfg);
4176 int tmp_reg = alloc_preg (cfg);
4178 #ifndef DISABLE_REMOTING
4179 int klass_reg = alloc_preg (cfg);
4180 NEW_BBLOCK (cfg, end_bb);
4183 NEW_BBLOCK (cfg, ok_result_bb);
4185 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4186 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4188 save_cast_details (cfg, klass, obj_reg);
4190 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4191 #ifndef DISABLE_REMOTING
4192 NEW_BBLOCK (cfg, interface_fail_bb);
4194 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4195 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4196 MONO_START_BB (cfg, interface_fail_bb);
4197 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4199 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4201 tmp_reg = alloc_preg (cfg);
4202 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4203 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4204 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4206 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4207 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4209 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4210 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4211 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4214 #ifndef DISABLE_REMOTING
4215 NEW_BBLOCK (cfg, no_proxy_bb);
4217 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4218 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4219 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4221 tmp_reg = alloc_preg (cfg);
4222 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4223 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4225 tmp_reg = alloc_preg (cfg);
4226 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4227 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4228 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4230 NEW_BBLOCK (cfg, fail_1_bb);
4232 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4234 MONO_START_BB (cfg, fail_1_bb);
4236 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4237 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4239 MONO_START_BB (cfg, no_proxy_bb);
4241 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4243 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4247 MONO_START_BB (cfg, ok_result_bb);
4249 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4251 #ifndef DISABLE_REMOTING
4252 MONO_START_BB (cfg, end_bb);
4256 MONO_INST_NEW (cfg, ins, OP_ICONST);
4258 ins->type = STACK_I4;
4264 * Returns NULL and set the cfg exception on error.
4266 static G_GNUC_UNUSED MonoInst*
4267 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
4271 gpointer *trampoline;
4272 MonoInst *obj, *method_ins, *tramp_ins;
4276 obj = handle_alloc (cfg, klass, FALSE, 0);
4280 /* Inline the contents of mono_delegate_ctor */
4282 /* Set target field */
4283 /* Optimize away setting of NULL target */
4284 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
4285 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4286 if (cfg->gen_write_barriers) {
4287 dreg = alloc_preg (cfg);
4288 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target));
4289 emit_write_barrier (cfg, ptr, target, 0);
4293 /* Set method field */
4294 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4295 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4296 if (cfg->gen_write_barriers) {
4297 dreg = alloc_preg (cfg);
4298 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method));
4299 emit_write_barrier (cfg, ptr, method_ins, 0);
4302 * To avoid looking up the compiled code belonging to the target method
4303 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4304 * store it, and we fill it after the method has been compiled.
4306 if (!cfg->compile_aot && !method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4307 MonoInst *code_slot_ins;
4310 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4312 domain = mono_domain_get ();
4313 mono_domain_lock (domain);
4314 if (!domain_jit_info (domain)->method_code_hash)
4315 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4316 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4318 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
4319 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4321 mono_domain_unlock (domain);
4323 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
4325 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4328 /* Set invoke_impl field */
4329 if (cfg->compile_aot) {
4330 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
4332 trampoline = mono_create_delegate_trampoline (cfg->domain, klass);
4333 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4335 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4337 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4343 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4345 MonoJitICallInfo *info;
4347 /* Need to register the icall so it gets an icall wrapper */
4348 info = mono_get_array_new_va_icall (rank);
4350 cfg->flags |= MONO_CFG_HAS_VARARGS;
4352 /* mono_array_new_va () needs a vararg calling convention */
4353 cfg->disable_llvm = TRUE;
4355 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4356 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4360 mono_emit_load_got_addr (MonoCompile *cfg)
4362 MonoInst *getaddr, *dummy_use;
4364 if (!cfg->got_var || cfg->got_var_allocated)
4367 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
4368 getaddr->cil_code = cfg->header->code;
4369 getaddr->dreg = cfg->got_var->dreg;
4371 /* Add it to the start of the first bblock */
4372 if (cfg->bb_entry->code) {
4373 getaddr->next = cfg->bb_entry->code;
4374 cfg->bb_entry->code = getaddr;
4377 MONO_ADD_INS (cfg->bb_entry, getaddr);
4379 cfg->got_var_allocated = TRUE;
4382 * Add a dummy use to keep the got_var alive, since real uses might
4383 * only be generated by the back ends.
4384 * Add it to end_bblock, so the variable's lifetime covers the whole
4386 * It would be better to make the usage of the got var explicit in all
4387 * cases when the backend needs it (i.e. calls, throw etc.), so this
4388 * wouldn't be needed.
4390 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
4391 MONO_ADD_INS (cfg->bb_exit, dummy_use);
4394 static int inline_limit;
4395 static gboolean inline_limit_inited;
4398 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
4400 MonoMethodHeaderSummary header;
4402 #ifdef MONO_ARCH_SOFT_FLOAT
4403 MonoMethodSignature *sig = mono_method_signature (method);
4407 if (cfg->generic_sharing_context)
4410 if (cfg->inline_depth > 10)
4413 #ifdef MONO_ARCH_HAVE_LMF_OPS
4414 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
4415 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
4416 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
4421 if (!mono_method_get_header_summary (method, &header))
4424 /*runtime, icall and pinvoke are checked by summary call*/
4425 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
4426 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
4427 (mono_class_is_marshalbyref (method->klass)) ||
4431 /* also consider num_locals? */
4432 /* Do the size check early to avoid creating vtables */
4433 if (!inline_limit_inited) {
4434 if (getenv ("MONO_INLINELIMIT"))
4435 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
4437 inline_limit = INLINE_LENGTH_LIMIT;
4438 inline_limit_inited = TRUE;
4440 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
4444 * if we can initialize the class of the method right away, we do,
4445 * otherwise we don't allow inlining if the class needs initialization,
4446 * since it would mean inserting a call to mono_runtime_class_init()
4447 * inside the inlined code
4449 if (!(cfg->opt & MONO_OPT_SHARED)) {
4450 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
4451 if (cfg->run_cctors && method->klass->has_cctor) {
4452 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
4453 if (!method->klass->runtime_info)
4454 /* No vtable created yet */
4456 vtable = mono_class_vtable (cfg->domain, method->klass);
4459 /* This makes so that inline cannot trigger */
4460 /* .cctors: too many apps depend on them */
4461 /* running with a specific order... */
4462 if (! vtable->initialized)
4464 mono_runtime_class_init (vtable);
4466 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
4467 if (!method->klass->runtime_info)
4468 /* No vtable created yet */
4470 vtable = mono_class_vtable (cfg->domain, method->klass);
4473 if (!vtable->initialized)
4478 * If we're compiling for shared code
4479 * the cctor will need to be run at aot method load time, for example,
4480 * or at the end of the compilation of the inlining method.
4482 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
4487 * CAS - do not inline methods with declarative security
4488 * Note: this has to be before any possible return TRUE;
4490 if (mono_security_method_has_declsec (method))
4493 #ifdef MONO_ARCH_SOFT_FLOAT
4495 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4497 for (i = 0; i < sig->param_count; ++i)
4498 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4506 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
4508 if (vtable->initialized && !cfg->compile_aot)
4511 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
4514 if (!mono_class_needs_cctor_run (vtable->klass, method))
4517 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
4518 /* The initialization is already done before the method is called */
4525 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4529 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4532 if (mini_is_gsharedvt_klass (cfg, klass)) {
4535 mono_class_init (klass);
4536 size = mono_class_array_element_size (klass);
4539 mult_reg = alloc_preg (cfg);
4540 array_reg = arr->dreg;
4541 index_reg = index->dreg;
4543 #if SIZEOF_REGISTER == 8
4544 /* The array reg is 64 bits but the index reg is only 32 */
4545 if (COMPILE_LLVM (cfg)) {
4547 index2_reg = index_reg;
4549 index2_reg = alloc_preg (cfg);
4550 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4553 if (index->type == STACK_I8) {
4554 index2_reg = alloc_preg (cfg);
4555 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4557 index2_reg = index_reg;
4562 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4564 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4565 if (size == 1 || size == 2 || size == 4 || size == 8) {
4566 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4568 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
4569 ins->klass = mono_class_get_element_class (klass);
4570 ins->type = STACK_MP;
4576 add_reg = alloc_ireg_mp (cfg);
4579 MonoInst *rgctx_ins;
4582 g_assert (cfg->generic_sharing_context);
4583 context_used = mini_class_check_context_used (cfg, klass);
4584 g_assert (context_used);
4585 rgctx_ins = emit_get_gsharedvt_info (cfg, &klass->byval_arg, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
4586 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
4588 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4590 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4591 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4592 ins->klass = mono_class_get_element_class (klass);
4593 ins->type = STACK_MP;
4594 MONO_ADD_INS (cfg->cbb, ins);
4599 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4601 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4603 int bounds_reg = alloc_preg (cfg);
4604 int add_reg = alloc_ireg_mp (cfg);
4605 int mult_reg = alloc_preg (cfg);
4606 int mult2_reg = alloc_preg (cfg);
4607 int low1_reg = alloc_preg (cfg);
4608 int low2_reg = alloc_preg (cfg);
4609 int high1_reg = alloc_preg (cfg);
4610 int high2_reg = alloc_preg (cfg);
4611 int realidx1_reg = alloc_preg (cfg);
4612 int realidx2_reg = alloc_preg (cfg);
4613 int sum_reg = alloc_preg (cfg);
4614 int index1, index2, tmpreg;
4618 mono_class_init (klass);
4619 size = mono_class_array_element_size (klass);
4621 index1 = index_ins1->dreg;
4622 index2 = index_ins2->dreg;
4624 #if SIZEOF_REGISTER == 8
4625 /* The array reg is 64 bits but the index reg is only 32 */
4626 if (COMPILE_LLVM (cfg)) {
4629 tmpreg = alloc_preg (cfg);
4630 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
4632 tmpreg = alloc_preg (cfg);
4633 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
4637 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
4641 /* range checking */
4642 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4643 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4645 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4646 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4647 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4648 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4649 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4650 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4651 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4653 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4654 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4655 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4656 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4657 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
4658 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4659 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4661 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4662 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4663 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4664 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4665 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4667 ins->type = STACK_MP;
4669 MONO_ADD_INS (cfg->cbb, ins);
4676 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4680 MonoMethod *addr_method;
4683 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4686 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
4688 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4689 /* emit_ldelema_2 depends on OP_LMUL */
4690 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
4691 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
4695 element_size = mono_class_array_element_size (cmethod->klass->element_class);
4696 addr_method = mono_marshal_get_array_address (rank, element_size);
4697 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4702 static MonoBreakPolicy
4703 always_insert_breakpoint (MonoMethod *method)
4705 return MONO_BREAK_POLICY_ALWAYS;
4708 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4711 * mono_set_break_policy:
4712 * policy_callback: the new callback function
4714 * Allow embedders to decide wherther to actually obey breakpoint instructions
4715 * (both break IL instructions and Debugger.Break () method calls), for example
4716 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4717 * untrusted or semi-trusted code.
4719 * @policy_callback will be called every time a break point instruction needs to
4720 * be inserted with the method argument being the method that calls Debugger.Break()
4721 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4722 * if it wants the breakpoint to not be effective in the given method.
4723 * #MONO_BREAK_POLICY_ALWAYS is the default.
4726 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4728 if (policy_callback)
4729 break_policy_func = policy_callback;
4731 break_policy_func = always_insert_breakpoint;
4735 should_insert_brekpoint (MonoMethod *method) {
4736 switch (break_policy_func (method)) {
4737 case MONO_BREAK_POLICY_ALWAYS:
4739 case MONO_BREAK_POLICY_NEVER:
4741 case MONO_BREAK_POLICY_ON_DBG:
4742 return mono_debug_using_mono_debugger ();
4744 g_warning ("Incorrect value returned from break policy callback");
4749 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4751 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4753 MonoInst *addr, *store, *load;
4754 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4756 /* the bounds check is already done by the callers */
4757 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4759 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4760 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4761 if (mini_type_is_reference (cfg, fsig->params [2]))
4762 emit_write_barrier (cfg, addr, load, -1);
4764 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4765 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4772 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4774 return mini_type_is_reference (cfg, &klass->byval_arg);
4778 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
4780 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
4781 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
4782 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
4783 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
4784 MonoInst *iargs [3];
4787 mono_class_setup_vtable (obj_array);
4788 g_assert (helper->slot);
4790 if (sp [0]->type != STACK_OBJ)
4792 if (sp [2]->type != STACK_OBJ)
4799 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
4803 if (mini_is_gsharedvt_klass (cfg, klass)) {
4806 // FIXME-VT: OP_ICONST optimization
4807 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
4808 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
4809 ins->opcode = OP_STOREV_MEMBASE;
4810 } else if (sp [1]->opcode == OP_ICONST) {
4811 int array_reg = sp [0]->dreg;
4812 int index_reg = sp [1]->dreg;
4813 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
4816 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
4817 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
4819 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
4820 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
4821 if (generic_class_is_reference_type (cfg, klass))
4822 emit_write_barrier (cfg, addr, sp [2], -1);
4829 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4834 eklass = mono_class_from_mono_type (fsig->params [2]);
4836 eklass = mono_class_from_mono_type (fsig->ret);
4840 return emit_array_store (cfg, eklass, args, FALSE);
4842 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4843 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
4849 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4851 MonoInst *ins = NULL;
4852 #ifdef MONO_ARCH_SIMD_INTRINSICS
4853 if (cfg->opt & MONO_OPT_SIMD) {
4854 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4864 emit_memory_barrier (MonoCompile *cfg, int kind)
4866 MonoInst *ins = NULL;
4867 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4868 MONO_ADD_INS (cfg->cbb, ins);
4869 ins->backend.memory_barrier_kind = kind;
4875 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4877 MonoInst *ins = NULL;
4880 /* The LLVM backend supports these intrinsics */
4881 if (cmethod->klass == mono_defaults.math_class) {
4882 if (strcmp (cmethod->name, "Sin") == 0) {
4884 } else if (strcmp (cmethod->name, "Cos") == 0) {
4886 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
4888 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
4893 MONO_INST_NEW (cfg, ins, opcode);
4894 ins->type = STACK_R8;
4895 ins->dreg = mono_alloc_freg (cfg);
4896 ins->sreg1 = args [0]->dreg;
4897 MONO_ADD_INS (cfg->cbb, ins);
4901 if (cfg->opt & MONO_OPT_CMOV) {
4902 if (strcmp (cmethod->name, "Min") == 0) {
4903 if (fsig->params [0]->type == MONO_TYPE_I4)
4905 if (fsig->params [0]->type == MONO_TYPE_U4)
4906 opcode = OP_IMIN_UN;
4907 else if (fsig->params [0]->type == MONO_TYPE_I8)
4909 else if (fsig->params [0]->type == MONO_TYPE_U8)
4910 opcode = OP_LMIN_UN;
4911 } else if (strcmp (cmethod->name, "Max") == 0) {
4912 if (fsig->params [0]->type == MONO_TYPE_I4)
4914 if (fsig->params [0]->type == MONO_TYPE_U4)
4915 opcode = OP_IMAX_UN;
4916 else if (fsig->params [0]->type == MONO_TYPE_I8)
4918 else if (fsig->params [0]->type == MONO_TYPE_U8)
4919 opcode = OP_LMAX_UN;
4924 MONO_INST_NEW (cfg, ins, opcode);
4925 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
4926 ins->dreg = mono_alloc_ireg (cfg);
4927 ins->sreg1 = args [0]->dreg;
4928 ins->sreg2 = args [1]->dreg;
4929 MONO_ADD_INS (cfg->cbb, ins);
4937 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4939 if (cmethod->klass == mono_defaults.array_class) {
4940 if (strcmp (cmethod->name, "UnsafeStore") == 0)
4941 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
4942 if (strcmp (cmethod->name, "UnsafeLoad") == 0)
4943 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
4950 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4952 MonoInst *ins = NULL;
4954 static MonoClass *runtime_helpers_class = NULL;
4955 if (! runtime_helpers_class)
4956 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
4957 "System.Runtime.CompilerServices", "RuntimeHelpers");
4959 if (cmethod->klass == mono_defaults.string_class) {
4960 if (strcmp (cmethod->name, "get_Chars") == 0) {
4961 int dreg = alloc_ireg (cfg);
4962 int index_reg = alloc_preg (cfg);
4963 int mult_reg = alloc_preg (cfg);
4964 int add_reg = alloc_preg (cfg);
4966 #if SIZEOF_REGISTER == 8
4967 /* The array reg is 64 bits but the index reg is only 32 */
4968 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
4970 index_reg = args [1]->dreg;
4972 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
4974 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4975 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
4976 add_reg = ins->dreg;
4977 /* Avoid a warning */
4979 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4982 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
4983 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4984 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4985 add_reg, G_STRUCT_OFFSET (MonoString, chars));
4987 type_from_op (ins, NULL, NULL);
4989 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4990 int dreg = alloc_ireg (cfg);
4991 /* Decompose later to allow more optimizations */
4992 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
4993 ins->type = STACK_I4;
4994 ins->flags |= MONO_INST_FAULT;
4995 cfg->cbb->has_array_access = TRUE;
4996 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
4999 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
5000 int mult_reg = alloc_preg (cfg);
5001 int add_reg = alloc_preg (cfg);
5003 /* The corlib functions check for oob already. */
5004 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
5005 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5006 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
5007 return cfg->cbb->last_ins;
5010 } else if (cmethod->klass == mono_defaults.object_class) {
5012 if (strcmp (cmethod->name, "GetType") == 0) {
5013 int dreg = alloc_ireg_ref (cfg);
5014 int vt_reg = alloc_preg (cfg);
5015 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
5016 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
5017 type_from_op (ins, NULL, NULL);
5020 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
5021 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
5022 int dreg = alloc_ireg (cfg);
5023 int t1 = alloc_ireg (cfg);
5025 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5026 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5027 ins->type = STACK_I4;
5031 } else if (strcmp (cmethod->name, ".ctor") == 0) {
5032 MONO_INST_NEW (cfg, ins, OP_NOP);
5033 MONO_ADD_INS (cfg->cbb, ins);
5037 } else if (cmethod->klass == mono_defaults.array_class) {
5038 if (!cfg->gsharedvt && strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
5039 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
5041 #ifndef MONO_BIG_ARRAYS
5043 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5046 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5047 int dreg = alloc_ireg (cfg);
5048 int bounds_reg = alloc_ireg_mp (cfg);
5049 MonoBasicBlock *end_bb, *szarray_bb;
5050 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5052 NEW_BBLOCK (cfg, end_bb);
5053 NEW_BBLOCK (cfg, szarray_bb);
5055 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5056 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
5057 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5058 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5059 /* Non-szarray case */
5061 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5062 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
5064 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5065 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5066 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5067 MONO_START_BB (cfg, szarray_bb);
5070 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5071 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
5073 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5074 MONO_START_BB (cfg, end_bb);
5076 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5077 ins->type = STACK_I4;
5083 if (cmethod->name [0] != 'g')
5086 if (strcmp (cmethod->name, "get_Rank") == 0) {
5087 int dreg = alloc_ireg (cfg);
5088 int vtable_reg = alloc_preg (cfg);
5089 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5090 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
5091 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5092 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
5093 type_from_op (ins, NULL, NULL);
5096 } else if (strcmp (cmethod->name, "get_Length") == 0) {
5097 int dreg = alloc_ireg (cfg);
5099 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5100 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
5101 type_from_op (ins, NULL, NULL);
5106 } else if (cmethod->klass == runtime_helpers_class) {
5108 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
5109 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
5113 } else if (cmethod->klass == mono_defaults.thread_class) {
5114 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
5115 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5116 MONO_ADD_INS (cfg->cbb, ins);
5118 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
5119 return emit_memory_barrier (cfg, FullBarrier);
5121 } else if (cmethod->klass == mono_defaults.monitor_class) {
5123 /* FIXME this should be integrated to the check below once we support the trampoline version */
5124 #if defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
5125 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) {
5126 MonoMethod *fast_method = NULL;
5128 /* Avoid infinite recursion */
5129 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN && !strcmp (cfg->method->name, "FastMonitorEnterV4"))
5132 fast_method = mono_monitor_get_fast_path (cmethod);
5136 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
5140 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
5141 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
5144 if (COMPILE_LLVM (cfg)) {
5146 * Pass the argument normally, the LLVM backend will handle the
5147 * calling convention problems.
5149 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5151 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
5152 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5153 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5154 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5157 return (MonoInst*)call;
5158 } else if (strcmp (cmethod->name, "Exit") == 0) {
5161 if (COMPILE_LLVM (cfg)) {
5162 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5164 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
5165 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5166 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5167 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5170 return (MonoInst*)call;
5172 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
5174 MonoMethod *fast_method = NULL;
5176 /* Avoid infinite recursion */
5177 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
5178 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
5179 strcmp (cfg->method->name, "FastMonitorExit") == 0))
5182 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) ||
5183 strcmp (cmethod->name, "Exit") == 0)
5184 fast_method = mono_monitor_get_fast_path (cmethod);
5188 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
5191 } else if (cmethod->klass->image == mono_defaults.corlib &&
5192 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5193 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
5196 #if SIZEOF_REGISTER == 8
5197 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
5198 /* 64 bit reads are already atomic */
5199 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
5200 ins->dreg = mono_alloc_preg (cfg);
5201 ins->inst_basereg = args [0]->dreg;
5202 ins->inst_offset = 0;
5203 MONO_ADD_INS (cfg->cbb, ins);
5207 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
5208 if (strcmp (cmethod->name, "Increment") == 0) {
5209 MonoInst *ins_iconst;
5212 if (fsig->params [0]->type == MONO_TYPE_I4)
5213 opcode = OP_ATOMIC_ADD_NEW_I4;
5214 #if SIZEOF_REGISTER == 8
5215 else if (fsig->params [0]->type == MONO_TYPE_I8)
5216 opcode = OP_ATOMIC_ADD_NEW_I8;
5219 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5220 ins_iconst->inst_c0 = 1;
5221 ins_iconst->dreg = mono_alloc_ireg (cfg);
5222 MONO_ADD_INS (cfg->cbb, ins_iconst);
5224 MONO_INST_NEW (cfg, ins, opcode);
5225 ins->dreg = mono_alloc_ireg (cfg);
5226 ins->inst_basereg = args [0]->dreg;
5227 ins->inst_offset = 0;
5228 ins->sreg2 = ins_iconst->dreg;
5229 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5230 MONO_ADD_INS (cfg->cbb, ins);
5232 } else if (strcmp (cmethod->name, "Decrement") == 0) {
5233 MonoInst *ins_iconst;
5236 if (fsig->params [0]->type == MONO_TYPE_I4)
5237 opcode = OP_ATOMIC_ADD_NEW_I4;
5238 #if SIZEOF_REGISTER == 8
5239 else if (fsig->params [0]->type == MONO_TYPE_I8)
5240 opcode = OP_ATOMIC_ADD_NEW_I8;
5243 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5244 ins_iconst->inst_c0 = -1;
5245 ins_iconst->dreg = mono_alloc_ireg (cfg);
5246 MONO_ADD_INS (cfg->cbb, ins_iconst);
5248 MONO_INST_NEW (cfg, ins, opcode);
5249 ins->dreg = mono_alloc_ireg (cfg);
5250 ins->inst_basereg = args [0]->dreg;
5251 ins->inst_offset = 0;
5252 ins->sreg2 = ins_iconst->dreg;
5253 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5254 MONO_ADD_INS (cfg->cbb, ins);
5256 } else if (strcmp (cmethod->name, "Add") == 0) {
5259 if (fsig->params [0]->type == MONO_TYPE_I4)
5260 opcode = OP_ATOMIC_ADD_NEW_I4;
5261 #if SIZEOF_REGISTER == 8
5262 else if (fsig->params [0]->type == MONO_TYPE_I8)
5263 opcode = OP_ATOMIC_ADD_NEW_I8;
5267 MONO_INST_NEW (cfg, ins, opcode);
5268 ins->dreg = mono_alloc_ireg (cfg);
5269 ins->inst_basereg = args [0]->dreg;
5270 ins->inst_offset = 0;
5271 ins->sreg2 = args [1]->dreg;
5272 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5273 MONO_ADD_INS (cfg->cbb, ins);
5276 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
5278 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
5279 if (strcmp (cmethod->name, "Exchange") == 0) {
5281 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
5283 if (fsig->params [0]->type == MONO_TYPE_I4)
5284 opcode = OP_ATOMIC_EXCHANGE_I4;
5285 #if SIZEOF_REGISTER == 8
5286 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
5287 (fsig->params [0]->type == MONO_TYPE_I))
5288 opcode = OP_ATOMIC_EXCHANGE_I8;
5290 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
5291 opcode = OP_ATOMIC_EXCHANGE_I4;
5296 MONO_INST_NEW (cfg, ins, opcode);
5297 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
5298 ins->inst_basereg = args [0]->dreg;
5299 ins->inst_offset = 0;
5300 ins->sreg2 = args [1]->dreg;
5301 MONO_ADD_INS (cfg->cbb, ins);
5303 switch (fsig->params [0]->type) {
5305 ins->type = STACK_I4;
5309 ins->type = STACK_I8;
5311 case MONO_TYPE_OBJECT:
5312 ins->type = STACK_OBJ;
5315 g_assert_not_reached ();
5318 if (cfg->gen_write_barriers && is_ref)
5319 emit_write_barrier (cfg, args [0], args [1], -1);
5321 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
5323 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
5324 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
5326 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
5327 if (fsig->params [1]->type == MONO_TYPE_I4)
5329 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
5330 size = sizeof (gpointer);
5331 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
5334 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
5335 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5336 ins->sreg1 = args [0]->dreg;
5337 ins->sreg2 = args [1]->dreg;
5338 ins->sreg3 = args [2]->dreg;
5339 ins->type = STACK_I4;
5340 MONO_ADD_INS (cfg->cbb, ins);
5341 } else if (size == 8) {
5342 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
5343 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5344 ins->sreg1 = args [0]->dreg;
5345 ins->sreg2 = args [1]->dreg;
5346 ins->sreg3 = args [2]->dreg;
5347 ins->type = STACK_I8;
5348 MONO_ADD_INS (cfg->cbb, ins);
5350 /* g_assert_not_reached (); */
5352 if (cfg->gen_write_barriers && is_ref)
5353 emit_write_barrier (cfg, args [0], args [1], -1);
5355 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
5357 if (strcmp (cmethod->name, "MemoryBarrier") == 0)
5358 ins = emit_memory_barrier (cfg, FullBarrier);
5362 } else if (cmethod->klass->image == mono_defaults.corlib) {
5363 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
5364 && strcmp (cmethod->klass->name, "Debugger") == 0) {
5365 if (should_insert_brekpoint (cfg->method)) {
5366 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
5368 MONO_INST_NEW (cfg, ins, OP_NOP);
5369 MONO_ADD_INS (cfg->cbb, ins);
5373 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
5374 && strcmp (cmethod->klass->name, "Environment") == 0) {
5376 EMIT_NEW_ICONST (cfg, ins, 1);
5378 EMIT_NEW_ICONST (cfg, ins, 0);
5382 } else if (cmethod->klass == mono_defaults.math_class) {
5384 * There is general branches code for Min/Max, but it does not work for
5386 * http://everything2.com/?node_id=1051618
5390 #ifdef MONO_ARCH_SIMD_INTRINSICS
5391 if (cfg->opt & MONO_OPT_SIMD) {
5392 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5398 if (COMPILE_LLVM (cfg)) {
5399 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
5404 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
5408 * This entry point could be used later for arbitrary method
5411 inline static MonoInst*
5412 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
5413 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
5415 if (method->klass == mono_defaults.string_class) {
5416 /* managed string allocation support */
5417 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
5418 MonoInst *iargs [2];
5419 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
5420 MonoMethod *managed_alloc = NULL;
5422 g_assert (vtable); /*Should not fail since it System.String*/
5423 #ifndef MONO_CROSS_COMPILE
5424 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE);
5428 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
5429 iargs [1] = args [0];
5430 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
5437 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
5439 MonoInst *store, *temp;
5442 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
5443 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
5446 * FIXME: We should use *args++ = sp [0], but that would mean the arg
5447 * would be different than the MonoInst's used to represent arguments, and
5448 * the ldelema implementation can't deal with that.
5449 * Solution: When ldelema is used on an inline argument, create a var for
5450 * it, emit ldelema on that var, and emit the saving code below in
5451 * inline_method () if needed.
5453 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
5454 cfg->args [i] = temp;
5455 /* This uses cfg->args [i] which is set by the preceeding line */
5456 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
5457 store->cil_code = sp [0]->cil_code;
5462 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
5463 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
5465 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5467 check_inline_called_method_name_limit (MonoMethod *called_method)
5470 static char *limit = NULL;
5472 if (limit == NULL) {
5473 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
5475 if (limit_string != NULL)
5476 limit = limit_string;
5478 limit = (char *) "";
5481 if (limit [0] != '\0') {
5482 char *called_method_name = mono_method_full_name (called_method, TRUE);
5484 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
5485 g_free (called_method_name);
5487 //return (strncmp_result <= 0);
5488 return (strncmp_result == 0);
5495 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5497 check_inline_caller_method_name_limit (MonoMethod *caller_method)
5500 static char *limit = NULL;
5502 if (limit == NULL) {
5503 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
5504 if (limit_string != NULL) {
5505 limit = limit_string;
5507 limit = (char *) "";
5511 if (limit [0] != '\0') {
5512 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
5514 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
5515 g_free (caller_method_name);
5517 //return (strncmp_result <= 0);
5518 return (strncmp_result == 0);
5526 emit_init_rvar (MonoCompile *cfg, MonoInst *rvar, MonoType *rtype)
5528 static double r8_0 = 0.0;
5531 switch (rvar->type) {
5533 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
5536 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
5541 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
5544 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5545 ins->type = STACK_R8;
5546 ins->inst_p0 = (void*)&r8_0;
5547 ins->dreg = rvar->dreg;
5548 MONO_ADD_INS (cfg->cbb, ins);
5551 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (rtype));
5554 g_assert_not_reached ();
5559 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
5560 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_always)
5562 MonoInst *ins, *rvar = NULL;
5563 MonoMethodHeader *cheader;
5564 MonoBasicBlock *ebblock, *sbblock;
5566 MonoMethod *prev_inlined_method;
5567 MonoInst **prev_locals, **prev_args;
5568 MonoType **prev_arg_types;
5569 guint prev_real_offset;
5570 GHashTable *prev_cbb_hash;
5571 MonoBasicBlock **prev_cil_offset_to_bb;
5572 MonoBasicBlock *prev_cbb;
5573 unsigned char* prev_cil_start;
5574 guint32 prev_cil_offset_to_bb_len;
5575 MonoMethod *prev_current_method;
5576 MonoGenericContext *prev_generic_context;
5577 gboolean ret_var_set, prev_ret_var_set, virtual = FALSE;
5579 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
5581 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5582 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
5585 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5586 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
5590 if (cfg->verbose_level > 2)
5591 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
5593 if (!cmethod->inline_info) {
5594 cfg->stat_inlineable_methods++;
5595 cmethod->inline_info = 1;
5598 /* allocate local variables */
5599 cheader = mono_method_get_header (cmethod);
5601 if (cheader == NULL || mono_loader_get_last_error ()) {
5602 MonoLoaderError *error = mono_loader_get_last_error ();
5605 mono_metadata_free_mh (cheader);
5606 if (inline_always && error)
5607 mono_cfg_set_exception (cfg, error->exception_type);
5609 mono_loader_clear_error ();
5613 /*Must verify before creating locals as it can cause the JIT to assert.*/
5614 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
5615 mono_metadata_free_mh (cheader);
5619 /* allocate space to store the return value */
5620 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
5621 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
5624 prev_locals = cfg->locals;
5625 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
5626 for (i = 0; i < cheader->num_locals; ++i)
5627 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
5629 /* allocate start and end blocks */
5630 /* This is needed so if the inline is aborted, we can clean up */
5631 NEW_BBLOCK (cfg, sbblock);
5632 sbblock->real_offset = real_offset;
5634 NEW_BBLOCK (cfg, ebblock);
5635 ebblock->block_num = cfg->num_bblocks++;
5636 ebblock->real_offset = real_offset;
5638 prev_args = cfg->args;
5639 prev_arg_types = cfg->arg_types;
5640 prev_inlined_method = cfg->inlined_method;
5641 cfg->inlined_method = cmethod;
5642 cfg->ret_var_set = FALSE;
5643 cfg->inline_depth ++;
5644 prev_real_offset = cfg->real_offset;
5645 prev_cbb_hash = cfg->cbb_hash;
5646 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
5647 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
5648 prev_cil_start = cfg->cil_start;
5649 prev_cbb = cfg->cbb;
5650 prev_current_method = cfg->current_method;
5651 prev_generic_context = cfg->generic_context;
5652 prev_ret_var_set = cfg->ret_var_set;
5654 if (*ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
5657 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, virtual);
5659 ret_var_set = cfg->ret_var_set;
5661 cfg->inlined_method = prev_inlined_method;
5662 cfg->real_offset = prev_real_offset;
5663 cfg->cbb_hash = prev_cbb_hash;
5664 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
5665 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
5666 cfg->cil_start = prev_cil_start;
5667 cfg->locals = prev_locals;
5668 cfg->args = prev_args;
5669 cfg->arg_types = prev_arg_types;
5670 cfg->current_method = prev_current_method;
5671 cfg->generic_context = prev_generic_context;
5672 cfg->ret_var_set = prev_ret_var_set;
5673 cfg->inline_depth --;
5675 if ((costs >= 0 && costs < 60) || inline_always) {
5676 if (cfg->verbose_level > 2)
5677 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
5679 cfg->stat_inlined_methods++;
5681 /* always add some code to avoid block split failures */
5682 MONO_INST_NEW (cfg, ins, OP_NOP);
5683 MONO_ADD_INS (prev_cbb, ins);
5685 prev_cbb->next_bb = sbblock;
5686 link_bblock (cfg, prev_cbb, sbblock);
5689 * Get rid of the begin and end bblocks if possible to aid local
5692 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
5694 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
5695 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
5697 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
5698 MonoBasicBlock *prev = ebblock->in_bb [0];
5699 mono_merge_basic_blocks (cfg, prev, ebblock);
5701 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
5702 mono_merge_basic_blocks (cfg, prev_cbb, prev);
5703 cfg->cbb = prev_cbb;
5707 * Its possible that the rvar is set in some prev bblock, but not in others.
5713 for (i = 0; i < ebblock->in_count; ++i) {
5714 bb = ebblock->in_bb [i];
5716 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
5719 emit_init_rvar (cfg, rvar, fsig->ret);
5729 * If the inlined method contains only a throw, then the ret var is not
5730 * set, so set it to a dummy value.
5733 emit_init_rvar (cfg, rvar, fsig->ret);
5735 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
5738 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5741 if (cfg->verbose_level > 2)
5742 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
5743 cfg->exception_type = MONO_EXCEPTION_NONE;
5744 mono_loader_clear_error ();
5746 /* This gets rid of the newly added bblocks */
5747 cfg->cbb = prev_cbb;
5749 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5754 * Some of these comments may well be out-of-date.
5755 * Design decisions: we do a single pass over the IL code (and we do bblock
5756 * splitting/merging in the few cases when it's required: a back jump to an IL
5757 * address that was not already seen as bblock starting point).
5758 * Code is validated as we go (full verification is still better left to metadata/verify.c).
5759 * Complex operations are decomposed in simpler ones right away. We need to let the
5760 * arch-specific code peek and poke inside this process somehow (except when the
5761 * optimizations can take advantage of the full semantic info of coarse opcodes).
5762 * All the opcodes of the form opcode.s are 'normalized' to opcode.
5763 * MonoInst->opcode initially is the IL opcode or some simplification of that
5764 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
5765 * opcode with value bigger than OP_LAST.
5766 * At this point the IR can be handed over to an interpreter, a dumb code generator
5767 * or to the optimizing code generator that will translate it to SSA form.
5769 * Profiling directed optimizations.
5770 * We may compile by default with few or no optimizations and instrument the code
5771 * or the user may indicate what methods to optimize the most either in a config file
5772 * or through repeated runs where the compiler applies offline the optimizations to
5773 * each method and then decides if it was worth it.
5776 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
5777 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
5778 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
5779 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
5780 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
5781 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
5782 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
5783 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; LOAD_ERROR;}
5785 /* offset from br.s -> br like opcodes */
5786 #define BIG_BRANCH_OFFSET 13
5789 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
5791 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
5793 return b == NULL || b == bb;
5797 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
5799 unsigned char *ip = start;
5800 unsigned char *target;
5803 MonoBasicBlock *bblock;
5804 const MonoOpcode *opcode;
5807 cli_addr = ip - start;
5808 i = mono_opcode_value ((const guint8 **)&ip, end);
5811 opcode = &mono_opcodes [i];
5812 switch (opcode->argument) {
5813 case MonoInlineNone:
5816 case MonoInlineString:
5817 case MonoInlineType:
5818 case MonoInlineField:
5819 case MonoInlineMethod:
5822 case MonoShortInlineR:
5829 case MonoShortInlineVar:
5830 case MonoShortInlineI:
5833 case MonoShortInlineBrTarget:
5834 target = start + cli_addr + 2 + (signed char)ip [1];
5835 GET_BBLOCK (cfg, bblock, target);
5838 GET_BBLOCK (cfg, bblock, ip);
5840 case MonoInlineBrTarget:
5841 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
5842 GET_BBLOCK (cfg, bblock, target);
5845 GET_BBLOCK (cfg, bblock, ip);
5847 case MonoInlineSwitch: {
5848 guint32 n = read32 (ip + 1);
5851 cli_addr += 5 + 4 * n;
5852 target = start + cli_addr;
5853 GET_BBLOCK (cfg, bblock, target);
5855 for (j = 0; j < n; ++j) {
5856 target = start + cli_addr + (gint32)read32 (ip);
5857 GET_BBLOCK (cfg, bblock, target);
5867 g_assert_not_reached ();
5870 if (i == CEE_THROW) {
5871 unsigned char *bb_start = ip - 1;
5873 /* Find the start of the bblock containing the throw */
5875 while ((bb_start >= start) && !bblock) {
5876 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
5880 bblock->out_of_line = 1;
5890 static inline MonoMethod *
5891 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5895 if (m->wrapper_type != MONO_WRAPPER_NONE) {
5896 method = mono_method_get_wrapper_data (m, token);
5898 method = mono_class_inflate_generic_method (method, context);
5900 method = mono_get_method_full (m->klass->image, token, klass, context);
5906 static inline MonoMethod *
5907 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5909 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
5911 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
5917 static inline MonoClass*
5918 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
5922 if (method->wrapper_type != MONO_WRAPPER_NONE) {
5923 klass = mono_method_get_wrapper_data (method, token);
5925 klass = mono_class_inflate_generic_class (klass, context);
5927 klass = mono_class_get_full (method->klass->image, token, context);
5930 mono_class_init (klass);
5934 static inline MonoMethodSignature*
5935 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
5937 MonoMethodSignature *fsig;
5939 if (method->wrapper_type != MONO_WRAPPER_NONE) {
5942 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
5944 fsig = mono_inflate_generic_signature (fsig, context, &error);
5946 g_assert (mono_error_ok (&error));
5949 fsig = mono_metadata_parse_signature (method->klass->image, token);
5955 * Returns TRUE if the JIT should abort inlining because "callee"
5956 * is influenced by security attributes.
5959 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
5963 if ((cfg->method != caller) && mono_security_method_has_declsec (callee)) {
5967 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
5968 if (result == MONO_JIT_SECURITY_OK)
5971 if (result == MONO_JIT_LINKDEMAND_ECMA) {
5972 /* Generate code to throw a SecurityException before the actual call/link */
5973 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5976 NEW_ICONST (cfg, args [0], 4);
5977 NEW_METHODCONST (cfg, args [1], caller);
5978 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
5979 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
5980 /* don't hide previous results */
5981 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
5982 cfg->exception_data = result;
5990 throw_exception (void)
5992 static MonoMethod *method = NULL;
5995 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5996 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
6003 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
6005 MonoMethod *thrower = throw_exception ();
6008 EMIT_NEW_PCONST (cfg, args [0], ex);
6009 mono_emit_method_call (cfg, thrower, args, NULL);
6013 * Return the original method is a wrapper is specified. We can only access
6014 * the custom attributes from the original method.
6017 get_original_method (MonoMethod *method)
6019 if (method->wrapper_type == MONO_WRAPPER_NONE)
6022 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
6023 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
6026 /* in other cases we need to find the original method */
6027 return mono_marshal_method_from_wrapper (method);
6031 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
6032 MonoBasicBlock *bblock, unsigned char *ip)
6034 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6035 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
6037 emit_throw_exception (cfg, ex);
6041 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
6042 MonoBasicBlock *bblock, unsigned char *ip)
6044 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6045 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
6047 emit_throw_exception (cfg, ex);
6051 * Check that the IL instructions at ip are the array initialization
6052 * sequence and return the pointer to the data and the size.
6055 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
6058 * newarr[System.Int32]
6060 * ldtoken field valuetype ...
6061 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
6063 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
6064 guint32 token = read32 (ip + 7);
6065 guint32 field_token = read32 (ip + 2);
6066 guint32 field_index = field_token & 0xffffff;
6068 const char *data_ptr;
6070 MonoMethod *cmethod;
6071 MonoClass *dummy_class;
6072 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
6078 *out_field_token = field_token;
6080 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
6083 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
6085 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
6086 case MONO_TYPE_BOOLEAN:
6090 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
6091 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
6092 case MONO_TYPE_CHAR:
6102 return NULL; /* stupid ARM FP swapped format */
6112 if (size > mono_type_size (field->type, &dummy_align))
6115 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
6116 if (!method->klass->image->dynamic) {
6117 field_index = read32 (ip + 2) & 0xffffff;
6118 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
6119 data_ptr = mono_image_rva_map (method->klass->image, rva);
6120 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
6121 /* for aot code we do the lookup on load */
6122 if (aot && data_ptr)
6123 return GUINT_TO_POINTER (rva);
6125 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
6127 data_ptr = mono_field_get_data (field);
6135 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
6137 char *method_fname = mono_method_full_name (method, TRUE);
6139 MonoMethodHeader *header = mono_method_get_header (method);
6141 if (header->code_size == 0)
6142 method_code = g_strdup ("method body is empty.");
6144 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
6145 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
6146 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
6147 g_free (method_fname);
6148 g_free (method_code);
6149 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
6153 set_exception_object (MonoCompile *cfg, MonoException *exception)
6155 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
6156 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
6157 cfg->exception_ptr = exception;
6161 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
6164 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
6165 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
6166 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
6167 /* Optimize reg-reg moves away */
6169 * Can't optimize other opcodes, since sp[0] might point to
6170 * the last ins of a decomposed opcode.
6172 sp [0]->dreg = (cfg)->locals [n]->dreg;
6174 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
6179 * ldloca inhibits many optimizations so try to get rid of it in common
6182 static inline unsigned char *
6183 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
6192 local = read16 (ip + 2);
6196 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
6197 gboolean skip = FALSE;
6199 /* From the INITOBJ case */
6200 token = read32 (ip + 2);
6201 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
6202 CHECK_TYPELOAD (klass);
6203 if (mini_type_is_reference (cfg, &klass->byval_arg)) {
6204 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
6205 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
6206 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
6219 is_exception_class (MonoClass *class)
6222 if (class == mono_defaults.exception_class)
6224 class = class->parent;
6230 * is_jit_optimizer_disabled:
6232 * Determine whenever M's assembly has a DebuggableAttribute with the
6233 * IsJITOptimizerDisabled flag set.
6236 is_jit_optimizer_disabled (MonoMethod *m)
6238 MonoAssembly *ass = m->klass->image->assembly;
6239 MonoCustomAttrInfo* attrs;
6240 static MonoClass *klass;
6242 gboolean val = FALSE;
6245 if (ass->jit_optimizer_disabled_inited)
6246 return ass->jit_optimizer_disabled;
6249 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
6252 ass->jit_optimizer_disabled = FALSE;
6253 mono_memory_barrier ();
6254 ass->jit_optimizer_disabled_inited = TRUE;
6258 attrs = mono_custom_attrs_from_assembly (ass);
6260 for (i = 0; i < attrs->num_attrs; ++i) {
6261 MonoCustomAttrEntry *attr = &attrs->attrs [i];
6264 MonoMethodSignature *sig;
6266 if (!attr->ctor || attr->ctor->klass != klass)
6268 /* Decode the attribute. See reflection.c */
6269 len = attr->data_size;
6270 p = (const char*)attr->data;
6271 g_assert (read16 (p) == 0x0001);
6274 // FIXME: Support named parameters
6275 sig = mono_method_signature (attr->ctor);
6276 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
6278 /* Two boolean arguments */
6282 mono_custom_attrs_free (attrs);
6285 ass->jit_optimizer_disabled = val;
6286 mono_memory_barrier ();
6287 ass->jit_optimizer_disabled_inited = TRUE;
6293 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig)
6295 gboolean supported_tail_call;
6298 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6299 supported_tail_call = MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
6301 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6304 for (i = 0; i < fsig->param_count; ++i) {
6305 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
6306 /* These can point to the current method's stack */
6307 supported_tail_call = FALSE;
6309 if (fsig->hasthis && cmethod->klass->valuetype)
6310 /* this might point to the current method's stack */
6311 supported_tail_call = FALSE;
6312 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
6313 supported_tail_call = FALSE;
6314 if (cfg->method->save_lmf)
6315 supported_tail_call = FALSE;
6316 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
6317 supported_tail_call = FALSE;
6319 /* Debugging support */
6321 if (supported_tail_call) {
6322 if (!mono_debug_count ())
6323 supported_tail_call = FALSE;
6327 return supported_tail_call;
6330 /* the JIT intercepts ldflda instructions to the tlsdata field in ThreadLocal<T> and redirects
6331 * it to the thread local value based on the tls_offset field. Every other kind of access to
6332 * the field causes an assert.
6335 is_magic_tls_access (MonoClassField *field)
6337 if (strcmp (field->name, "tlsdata"))
6339 if (strcmp (field->parent->name, "ThreadLocal`1"))
6341 return field->parent->image == mono_defaults.corlib;
6344 /* emits the code needed to access a managed tls var (like ThreadStatic)
6345 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
6346 * pointer for the current thread.
6347 * Returns the MonoInst* representing the address of the tls var.
6350 emit_managed_static_data_access (MonoCompile *cfg, MonoInst *thread_ins, int offset_reg)
6353 int static_data_reg, array_reg, dreg;
6354 int offset2_reg, idx_reg;
6355 // inlined access to the tls data
6356 // idx = (offset >> 24) - 1;
6357 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
6358 static_data_reg = alloc_ireg (cfg);
6359 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
6360 idx_reg = alloc_ireg (cfg);
6361 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
6362 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
6363 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
6364 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
6365 array_reg = alloc_ireg (cfg);
6366 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
6367 offset2_reg = alloc_ireg (cfg);
6368 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
6369 dreg = alloc_ireg (cfg);
6370 EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
6375 * redirect access to the tlsdata field to the tls var given by the tls_offset field.
6376 * this address is cached per-method in cached_tls_addr.
6379 create_magic_tls_access (MonoCompile *cfg, MonoClassField *tls_field, MonoInst **cached_tls_addr, MonoInst *thread_local)
6381 MonoInst *load, *addr, *temp, *store, *thread_ins;
6382 MonoClassField *offset_field;
6384 if (*cached_tls_addr) {
6385 EMIT_NEW_TEMPLOAD (cfg, addr, (*cached_tls_addr)->inst_c0);
6388 thread_ins = mono_get_thread_intrinsic (cfg);
6389 offset_field = mono_class_get_field_from_name (tls_field->parent, "tls_offset");
6391 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, offset_field->type, thread_local->dreg, offset_field->offset);
6393 MONO_ADD_INS (cfg->cbb, thread_ins);
6395 MonoMethod *thread_method;
6396 thread_method = mono_class_get_method_from_name (mono_get_thread_class(), "CurrentInternalThread_internal", 0);
6397 thread_ins = mono_emit_method_call (cfg, thread_method, NULL, NULL);
6399 addr = emit_managed_static_data_access (cfg, thread_ins, load->dreg);
6400 addr->klass = mono_class_from_mono_type (tls_field->type);
6401 addr->type = STACK_MP;
6402 *cached_tls_addr = temp = mono_compile_create_var (cfg, type_from_stack_type (addr), OP_LOCAL);
6403 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, addr);
6405 EMIT_NEW_TEMPLOAD (cfg, addr, temp->inst_c0);
6410 * mono_method_to_ir:
6412 * Translate the .net IL into linear IR.
6415 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
6416 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
6417 guint inline_offset, gboolean is_virtual_call)
6420 MonoInst *ins, **sp, **stack_start;
6421 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
6422 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
6423 MonoMethod *cmethod, *method_definition;
6424 MonoInst **arg_array;
6425 MonoMethodHeader *header;
6427 guint32 token, ins_flag;
6429 MonoClass *constrained_call = NULL;
6430 unsigned char *ip, *end, *target, *err_pos;
6431 static double r8_0 = 0.0;
6432 MonoMethodSignature *sig;
6433 MonoGenericContext *generic_context = NULL;
6434 MonoGenericContainer *generic_container = NULL;
6435 MonoType **param_types;
6436 int i, n, start_new_bblock, dreg;
6437 int num_calls = 0, inline_costs = 0;
6438 int breakpoint_id = 0;
6440 MonoBoolean security, pinvoke;
6441 MonoSecurityManager* secman = NULL;
6442 MonoDeclSecurityActions actions;
6443 GSList *class_inits = NULL;
6444 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
6446 gboolean init_locals, seq_points, skip_dead_blocks;
6447 gboolean disable_inline, sym_seq_points = FALSE;
6448 MonoInst *cached_tls_addr = NULL;
6449 MonoDebugMethodInfo *minfo;
6450 MonoBitSet *seq_point_locs = NULL;
6451 MonoBitSet *seq_point_set_locs = NULL;
6453 disable_inline = is_jit_optimizer_disabled (method);
6455 /* serialization and xdomain stuff may need access to private fields and methods */
6456 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
6457 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
6458 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
6459 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
6460 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
6461 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
6463 dont_verify |= mono_security_smcs_hack_enabled ();
6465 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
6466 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
6467 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
6468 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
6469 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
6471 image = method->klass->image;
6472 header = mono_method_get_header (method);
6474 MonoLoaderError *error;
6476 if ((error = mono_loader_get_last_error ())) {
6477 mono_cfg_set_exception (cfg, error->exception_type);
6479 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
6480 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
6482 goto exception_exit;
6484 generic_container = mono_method_get_generic_container (method);
6485 sig = mono_method_signature (method);
6486 num_args = sig->hasthis + sig->param_count;
6487 ip = (unsigned char*)header->code;
6488 cfg->cil_start = ip;
6489 end = ip + header->code_size;
6490 cfg->stat_cil_code_size += header->code_size;
6491 init_locals = header->init_locals;
6493 seq_points = cfg->gen_seq_points && cfg->method == method;
6494 #ifdef PLATFORM_ANDROID
6495 seq_points &= cfg->method->wrapper_type == MONO_WRAPPER_NONE;
6498 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
6499 /* We could hit a seq point before attaching to the JIT (#8338) */
6503 if (cfg->gen_seq_points && cfg->method == method) {
6504 minfo = mono_debug_lookup_method (method);
6506 int i, n_il_offsets;
6510 mono_debug_symfile_get_line_numbers_full (minfo, NULL, NULL, &n_il_offsets, &il_offsets, &line_numbers, NULL, NULL);
6511 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6512 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6513 sym_seq_points = TRUE;
6514 for (i = 0; i < n_il_offsets; ++i) {
6515 if (il_offsets [i] < header->code_size)
6516 mono_bitset_set_fast (seq_point_locs, il_offsets [i]);
6522 * Methods without init_locals set could cause asserts in various passes
6527 method_definition = method;
6528 while (method_definition->is_inflated) {
6529 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
6530 method_definition = imethod->declaring;
6533 /* SkipVerification is not allowed if core-clr is enabled */
6534 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
6536 dont_verify_stloc = TRUE;
6539 if (mono_debug_using_mono_debugger ())
6540 cfg->keep_cil_nops = TRUE;
6542 if (sig->is_inflated)
6543 generic_context = mono_method_get_context (method);
6544 else if (generic_container)
6545 generic_context = &generic_container->context;
6546 cfg->generic_context = generic_context;
6548 if (!cfg->generic_sharing_context)
6549 g_assert (!sig->has_type_parameters);
6551 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
6552 g_assert (method->is_inflated);
6553 g_assert (mono_method_get_context (method)->method_inst);
6555 if (method->is_inflated && mono_method_get_context (method)->method_inst)
6556 g_assert (sig->generic_param_count);
6558 if (cfg->method == method) {
6559 cfg->real_offset = 0;
6561 cfg->real_offset = inline_offset;
6564 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
6565 cfg->cil_offset_to_bb_len = header->code_size;
6567 cfg->current_method = method;
6569 if (cfg->verbose_level > 2)
6570 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
6572 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
6574 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
6575 for (n = 0; n < sig->param_count; ++n)
6576 param_types [n + sig->hasthis] = sig->params [n];
6577 cfg->arg_types = param_types;
6579 dont_inline = g_list_prepend (dont_inline, method);
6580 if (cfg->method == method) {
6582 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
6583 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
6586 NEW_BBLOCK (cfg, start_bblock);
6587 cfg->bb_entry = start_bblock;
6588 start_bblock->cil_code = NULL;
6589 start_bblock->cil_length = 0;
6590 #if defined(__native_client_codegen__)
6591 MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
6592 ins->dreg = alloc_dreg (cfg, STACK_I4);
6593 MONO_ADD_INS (start_bblock, ins);
6597 NEW_BBLOCK (cfg, end_bblock);
6598 cfg->bb_exit = end_bblock;
6599 end_bblock->cil_code = NULL;
6600 end_bblock->cil_length = 0;
6601 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
6602 g_assert (cfg->num_bblocks == 2);
6604 arg_array = cfg->args;
6606 if (header->num_clauses) {
6607 cfg->spvars = g_hash_table_new (NULL, NULL);
6608 cfg->exvars = g_hash_table_new (NULL, NULL);
6610 /* handle exception clauses */
6611 for (i = 0; i < header->num_clauses; ++i) {
6612 MonoBasicBlock *try_bb;
6613 MonoExceptionClause *clause = &header->clauses [i];
6614 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
6615 try_bb->real_offset = clause->try_offset;
6616 try_bb->try_start = TRUE;
6617 try_bb->region = ((i + 1) << 8) | clause->flags;
6618 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
6619 tblock->real_offset = clause->handler_offset;
6620 tblock->flags |= BB_EXCEPTION_HANDLER;
6622 link_bblock (cfg, try_bb, tblock);
6624 if (*(ip + clause->handler_offset) == CEE_POP)
6625 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
6627 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
6628 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
6629 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
6630 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
6631 MONO_ADD_INS (tblock, ins);
6633 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) {
6634 /* finally clauses already have a seq point */
6635 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
6636 MONO_ADD_INS (tblock, ins);
6639 /* todo: is a fault block unsafe to optimize? */
6640 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
6641 tblock->flags |= BB_EXCEPTION_UNSAFE;
6645 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
6647 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
6649 /* catch and filter blocks get the exception object on the stack */
6650 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
6651 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
6652 MonoInst *dummy_use;
6654 /* mostly like handle_stack_args (), but just sets the input args */
6655 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
6656 tblock->in_scount = 1;
6657 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
6658 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
6661 * Add a dummy use for the exvar so its liveness info will be
6665 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
6667 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
6668 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
6669 tblock->flags |= BB_EXCEPTION_HANDLER;
6670 tblock->real_offset = clause->data.filter_offset;
6671 tblock->in_scount = 1;
6672 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
6673 /* The filter block shares the exvar with the handler block */
6674 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
6675 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
6676 MONO_ADD_INS (tblock, ins);
6680 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
6681 clause->data.catch_class &&
6682 cfg->generic_sharing_context &&
6683 mono_class_check_context_used (clause->data.catch_class)) {
6685 * In shared generic code with catch
6686 * clauses containing type variables
6687 * the exception handling code has to
6688 * be able to get to the rgctx.
6689 * Therefore we have to make sure that
6690 * the vtable/mrgctx argument (for
6691 * static or generic methods) or the
6692 * "this" argument (for non-static
6693 * methods) are live.
6695 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
6696 mini_method_get_context (method)->method_inst ||
6697 method->klass->valuetype) {
6698 mono_get_vtable_var (cfg);
6700 MonoInst *dummy_use;
6702 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
6707 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
6708 cfg->cbb = start_bblock;
6709 cfg->args = arg_array;
6710 mono_save_args (cfg, sig, inline_args);
6713 /* FIRST CODE BLOCK */
6714 NEW_BBLOCK (cfg, bblock);
6715 bblock->cil_code = ip;
6719 ADD_BBLOCK (cfg, bblock);
6721 if (cfg->method == method) {
6722 breakpoint_id = mono_debugger_method_has_breakpoint (method);
6723 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
6724 MONO_INST_NEW (cfg, ins, OP_BREAK);
6725 MONO_ADD_INS (bblock, ins);
6729 if (mono_security_cas_enabled ())
6730 secman = mono_security_manager_get_methods ();
6732 security = (secman && mono_security_method_has_declsec (method));
6733 /* at this point having security doesn't mean we have any code to generate */
6734 if (security && (cfg->method == method)) {
6735 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
6736 * And we do not want to enter the next section (with allocation) if we
6737 * have nothing to generate */
6738 security = mono_declsec_get_demands (method, &actions);
6741 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
6742 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
6744 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
6745 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6746 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
6748 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
6749 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
6753 mono_custom_attrs_free (custom);
6756 custom = mono_custom_attrs_from_class (wrapped->klass);
6757 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
6761 mono_custom_attrs_free (custom);
6764 /* not a P/Invoke after all */
6769 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
6770 /* we use a separate basic block for the initialization code */
6771 NEW_BBLOCK (cfg, init_localsbb);
6772 cfg->bb_init = init_localsbb;
6773 init_localsbb->real_offset = cfg->real_offset;
6774 start_bblock->next_bb = init_localsbb;
6775 init_localsbb->next_bb = bblock;
6776 link_bblock (cfg, start_bblock, init_localsbb);
6777 link_bblock (cfg, init_localsbb, bblock);
6779 cfg->cbb = init_localsbb;
6781 start_bblock->next_bb = bblock;
6782 link_bblock (cfg, start_bblock, bblock);
6785 if (cfg->gsharedvt && cfg->method == method) {
6786 MonoGSharedVtMethodInfo *info;
6787 MonoInst *var, *locals_var;
6790 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
6791 info->method = cfg->method;
6793 info->entries = g_ptr_array_new ();
6794 cfg->gsharedvt_info = info;
6796 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6797 /* prevent it from being register allocated */
6798 //var->flags |= MONO_INST_INDIRECT;
6799 cfg->gsharedvt_info_var = var;
6801 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
6802 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
6804 /* Allocate locals */
6805 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6806 /* prevent it from being register allocated */
6807 //locals_var->flags |= MONO_INST_INDIRECT;
6808 cfg->gsharedvt_locals_var = locals_var;
6810 dreg = alloc_ireg (cfg);
6811 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
6813 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
6814 ins->dreg = locals_var->dreg;
6816 MONO_ADD_INS (cfg->cbb, ins);
6817 cfg->gsharedvt_locals_var_ins = ins;
6819 cfg->flags |= MONO_CFG_HAS_ALLOCA;
6822 ins->flags |= MONO_INST_INIT;
6826 /* at this point we know, if security is TRUE, that some code needs to be generated */
6827 if (security && (cfg->method == method)) {
6830 cfg->stat_cas_demand_generation++;
6832 if (actions.demand.blob) {
6833 /* Add code for SecurityAction.Demand */
6834 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
6835 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
6836 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
6837 mono_emit_method_call (cfg, secman->demand, args, NULL);
6839 if (actions.noncasdemand.blob) {
6840 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
6841 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
6842 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
6843 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
6844 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
6845 mono_emit_method_call (cfg, secman->demand, args, NULL);
6847 if (actions.demandchoice.blob) {
6848 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
6849 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
6850 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
6851 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
6852 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
6856 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
6858 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
6861 if (mono_security_core_clr_enabled ()) {
6862 /* check if this is native code, e.g. an icall or a p/invoke */
6863 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
6864 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
6866 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
6867 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
6869 /* if this ia a native call then it can only be JITted from platform code */
6870 if ((icall || pinvk) && method->klass && method->klass->image) {
6871 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
6872 MonoException *ex = icall ? mono_get_exception_security () :
6873 mono_get_exception_method_access ();
6874 emit_throw_exception (cfg, ex);
6881 if (header->code_size == 0)
6884 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
6889 if (cfg->method == method)
6890 mono_debug_init_method (cfg, bblock, breakpoint_id);
6892 for (n = 0; n < header->num_locals; ++n) {
6893 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
6898 /* We force the vtable variable here for all shared methods
6899 for the possibility that they might show up in a stack
6900 trace where their exact instantiation is needed. */
6901 if (cfg->generic_sharing_context && method == cfg->method) {
6902 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
6903 mini_method_get_context (method)->method_inst ||
6904 method->klass->valuetype) {
6905 mono_get_vtable_var (cfg);
6907 /* FIXME: Is there a better way to do this?
6908 We need the variable live for the duration
6909 of the whole method. */
6910 cfg->args [0]->flags |= MONO_INST_INDIRECT;
6914 /* add a check for this != NULL to inlined methods */
6915 if (is_virtual_call) {
6918 NEW_ARGLOAD (cfg, arg_ins, 0);
6919 MONO_ADD_INS (cfg->cbb, arg_ins);
6920 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
6923 skip_dead_blocks = !dont_verify;
6924 if (skip_dead_blocks) {
6925 original_bb = bb = mono_basic_block_split (method, &error);
6926 if (!mono_error_ok (&error)) {
6927 mono_error_cleanup (&error);
6933 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
6934 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
6937 start_new_bblock = 0;
6940 if (cfg->method == method)
6941 cfg->real_offset = ip - header->code;
6943 cfg->real_offset = inline_offset;
6948 if (start_new_bblock) {
6949 bblock->cil_length = ip - bblock->cil_code;
6950 if (start_new_bblock == 2) {
6951 g_assert (ip == tblock->cil_code);
6953 GET_BBLOCK (cfg, tblock, ip);
6955 bblock->next_bb = tblock;
6958 start_new_bblock = 0;
6959 for (i = 0; i < bblock->in_scount; ++i) {
6960 if (cfg->verbose_level > 3)
6961 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
6962 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
6966 g_slist_free (class_inits);
6969 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
6970 link_bblock (cfg, bblock, tblock);
6971 if (sp != stack_start) {
6972 handle_stack_args (cfg, stack_start, sp - stack_start);
6974 CHECK_UNVERIFIABLE (cfg);
6976 bblock->next_bb = tblock;
6979 for (i = 0; i < bblock->in_scount; ++i) {
6980 if (cfg->verbose_level > 3)
6981 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
6982 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
6985 g_slist_free (class_inits);
6990 if (skip_dead_blocks) {
6991 int ip_offset = ip - header->code;
6993 if (ip_offset == bb->end)
6997 int op_size = mono_opcode_size (ip, end);
6998 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
7000 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
7002 if (ip_offset + op_size == bb->end) {
7003 MONO_INST_NEW (cfg, ins, OP_NOP);
7004 MONO_ADD_INS (bblock, ins);
7005 start_new_bblock = 1;
7013 * Sequence points are points where the debugger can place a breakpoint.
7014 * Currently, we generate these automatically at points where the IL
7017 if (seq_points && ((sp == stack_start) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
7019 * Make methods interruptable at the beginning, and at the targets of
7020 * backward branches.
7021 * Also, do this at the start of every bblock in methods with clauses too,
7022 * to be able to handle instructions with inprecise control flow like
7024 * Backward branches are handled at the end of method-to-ir ().
7026 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
7028 /* Avoid sequence points on empty IL like .volatile */
7029 // FIXME: Enable this
7030 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
7031 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
7032 MONO_ADD_INS (cfg->cbb, ins);
7035 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
7038 bblock->real_offset = cfg->real_offset;
7040 if ((cfg->method == method) && cfg->coverage_info) {
7041 guint32 cil_offset = ip - header->code;
7042 cfg->coverage_info->data [cil_offset].cil_code = ip;
7044 /* TODO: Use an increment here */
7045 #if defined(TARGET_X86)
7046 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
7047 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
7049 MONO_ADD_INS (cfg->cbb, ins);
7051 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
7052 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
7056 if (cfg->verbose_level > 3)
7057 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
7061 if (seq_points && !sym_seq_points && sp != stack_start) {
7063 * The C# compiler uses these nops to notify the JIT that it should
7064 * insert seq points.
7066 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
7067 MONO_ADD_INS (cfg->cbb, ins);
7069 if (cfg->keep_cil_nops)
7070 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
7072 MONO_INST_NEW (cfg, ins, OP_NOP);
7074 MONO_ADD_INS (bblock, ins);
7077 if (should_insert_brekpoint (cfg->method)) {
7078 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
7080 MONO_INST_NEW (cfg, ins, OP_NOP);
7083 MONO_ADD_INS (bblock, ins);
7089 CHECK_STACK_OVF (1);
7090 n = (*ip)-CEE_LDARG_0;
7092 EMIT_NEW_ARGLOAD (cfg, ins, n);
7100 CHECK_STACK_OVF (1);
7101 n = (*ip)-CEE_LDLOC_0;
7103 EMIT_NEW_LOCLOAD (cfg, ins, n);
7112 n = (*ip)-CEE_STLOC_0;
7115 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
7117 emit_stloc_ir (cfg, sp, header, n);
7124 CHECK_STACK_OVF (1);
7127 EMIT_NEW_ARGLOAD (cfg, ins, n);
7133 CHECK_STACK_OVF (1);
7136 NEW_ARGLOADA (cfg, ins, n);
7137 MONO_ADD_INS (cfg->cbb, ins);
7147 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
7149 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
7154 CHECK_STACK_OVF (1);
7157 EMIT_NEW_LOCLOAD (cfg, ins, n);
7161 case CEE_LDLOCA_S: {
7162 unsigned char *tmp_ip;
7164 CHECK_STACK_OVF (1);
7165 CHECK_LOCAL (ip [1]);
7167 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
7173 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
7182 CHECK_LOCAL (ip [1]);
7183 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
7185 emit_stloc_ir (cfg, sp, header, ip [1]);
7190 CHECK_STACK_OVF (1);
7191 EMIT_NEW_PCONST (cfg, ins, NULL);
7192 ins->type = STACK_OBJ;
7197 CHECK_STACK_OVF (1);
7198 EMIT_NEW_ICONST (cfg, ins, -1);
7211 CHECK_STACK_OVF (1);
7212 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
7218 CHECK_STACK_OVF (1);
7220 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
7226 CHECK_STACK_OVF (1);
7227 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
7233 CHECK_STACK_OVF (1);
7234 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7235 ins->type = STACK_I8;
7236 ins->dreg = alloc_dreg (cfg, STACK_I8);
7238 ins->inst_l = (gint64)read64 (ip);
7239 MONO_ADD_INS (bblock, ins);
7245 gboolean use_aotconst = FALSE;
7247 #ifdef TARGET_POWERPC
7248 /* FIXME: Clean this up */
7249 if (cfg->compile_aot)
7250 use_aotconst = TRUE;
7253 /* FIXME: we should really allocate this only late in the compilation process */
7254 f = mono_domain_alloc (cfg->domain, sizeof (float));
7256 CHECK_STACK_OVF (1);
7262 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
7264 dreg = alloc_freg (cfg);
7265 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
7266 ins->type = STACK_R8;
7268 MONO_INST_NEW (cfg, ins, OP_R4CONST);
7269 ins->type = STACK_R8;
7270 ins->dreg = alloc_dreg (cfg, STACK_R8);
7272 MONO_ADD_INS (bblock, ins);
7282 gboolean use_aotconst = FALSE;
7284 #ifdef TARGET_POWERPC
7285 /* FIXME: Clean this up */
7286 if (cfg->compile_aot)
7287 use_aotconst = TRUE;
7290 /* FIXME: we should really allocate this only late in the compilation process */
7291 d = mono_domain_alloc (cfg->domain, sizeof (double));
7293 CHECK_STACK_OVF (1);
7299 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
7301 dreg = alloc_freg (cfg);
7302 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
7303 ins->type = STACK_R8;
7305 MONO_INST_NEW (cfg, ins, OP_R8CONST);
7306 ins->type = STACK_R8;
7307 ins->dreg = alloc_dreg (cfg, STACK_R8);
7309 MONO_ADD_INS (bblock, ins);
7318 MonoInst *temp, *store;
7320 CHECK_STACK_OVF (1);
7324 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
7325 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
7327 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
7330 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
7343 if (sp [0]->type == STACK_R8)
7344 /* we need to pop the value from the x86 FP stack */
7345 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
7351 INLINE_FAILURE ("jmp");
7352 GSHAREDVT_FAILURE (*ip);
7355 if (stack_start != sp)
7357 token = read32 (ip + 1);
7358 /* FIXME: check the signature matches */
7359 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7361 if (!cmethod || mono_loader_get_last_error ())
7364 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
7365 GENERIC_SHARING_FAILURE (CEE_JMP);
7367 if (mono_security_cas_enabled ())
7368 CHECK_CFG_EXCEPTION;
7370 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
7372 MonoMethodSignature *fsig = mono_method_signature (cmethod);
7375 /* Handle tail calls similarly to calls */
7376 n = fsig->param_count + fsig->hasthis;
7378 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
7379 call->method = cmethod;
7380 call->tail_call = TRUE;
7381 call->signature = mono_method_signature (cmethod);
7382 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
7383 call->inst.inst_p0 = cmethod;
7384 for (i = 0; i < n; ++i)
7385 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
7387 mono_arch_emit_call (cfg, call);
7388 MONO_ADD_INS (bblock, (MonoInst*)call);
7391 for (i = 0; i < num_args; ++i)
7392 /* Prevent arguments from being optimized away */
7393 arg_array [i]->flags |= MONO_INST_VOLATILE;
7395 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
7396 ins = (MonoInst*)call;
7397 ins->inst_p0 = cmethod;
7398 MONO_ADD_INS (bblock, ins);
7402 start_new_bblock = 1;
7407 case CEE_CALLVIRT: {
7408 MonoInst *addr = NULL;
7409 MonoMethodSignature *fsig = NULL;
7411 int virtual = *ip == CEE_CALLVIRT;
7412 int calli = *ip == CEE_CALLI;
7413 gboolean pass_imt_from_rgctx = FALSE;
7414 MonoInst *imt_arg = NULL;
7415 MonoInst *keep_this_alive = NULL;
7416 gboolean pass_vtable = FALSE;
7417 gboolean pass_mrgctx = FALSE;
7418 MonoInst *vtable_arg = NULL;
7419 gboolean check_this = FALSE;
7420 gboolean supported_tail_call = FALSE;
7421 gboolean tail_call = FALSE;
7422 gboolean need_seq_point = FALSE;
7423 guint32 call_opcode = *ip;
7424 gboolean emit_widen = TRUE;
7425 gboolean push_res = TRUE;
7426 gboolean skip_ret = FALSE;
7427 gboolean delegate_invoke = FALSE;
7430 token = read32 (ip + 1);
7435 //GSHAREDVT_FAILURE (*ip);
7440 fsig = mini_get_signature (method, token, generic_context);
7441 n = fsig->param_count + fsig->hasthis;
7443 if (method->dynamic && fsig->pinvoke) {
7447 * This is a call through a function pointer using a pinvoke
7448 * signature. Have to create a wrapper and call that instead.
7449 * FIXME: This is very slow, need to create a wrapper at JIT time
7450 * instead based on the signature.
7452 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
7453 EMIT_NEW_PCONST (cfg, args [1], fsig);
7455 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
7458 MonoMethod *cil_method;
7460 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7461 cil_method = cmethod;
7463 if (constrained_call) {
7464 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7465 if (cfg->verbose_level > 2)
7466 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_call));
7467 if (!((constrained_call->byval_arg.type == MONO_TYPE_VAR ||
7468 constrained_call->byval_arg.type == MONO_TYPE_MVAR) &&
7469 cfg->generic_sharing_context)) {
7470 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_call, generic_context);
7473 if (cfg->verbose_level > 2)
7474 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_call));
7476 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
7478 * This is needed since get_method_constrained can't find
7479 * the method in klass representing a type var.
7480 * The type var is guaranteed to be a reference type in this
7483 if (!mini_is_gsharedvt_klass (cfg, constrained_call))
7484 g_assert (!cmethod->klass->valuetype);
7486 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
7491 if (!cmethod || mono_loader_get_last_error ())
7493 if (!dont_verify && !cfg->skip_visibility) {
7494 MonoMethod *target_method = cil_method;
7495 if (method->is_inflated) {
7496 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
7498 if (!mono_method_can_access_method (method_definition, target_method) &&
7499 !mono_method_can_access_method (method, cil_method))
7500 METHOD_ACCESS_FAILURE;
7503 if (mono_security_core_clr_enabled ())
7504 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
7506 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
7507 /* MS.NET seems to silently convert this to a callvirt */
7512 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
7513 * converts to a callvirt.
7515 * tests/bug-515884.il is an example of this behavior
7517 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
7518 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
7519 if (!virtual && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
7523 if (!cmethod->klass->inited)
7524 if (!mono_class_init (cmethod->klass))
7525 TYPE_LOAD_ERROR (cmethod->klass);
7527 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
7528 mini_class_is_system_array (cmethod->klass)) {
7529 array_rank = cmethod->klass->rank;
7530 fsig = mono_method_signature (cmethod);
7532 fsig = mono_method_signature (cmethod);
7537 if (fsig->pinvoke) {
7538 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
7539 check_for_pending_exc, FALSE);
7540 fsig = mono_method_signature (wrapper);
7541 } else if (constrained_call) {
7542 fsig = mono_method_signature (cmethod);
7544 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
7548 mono_save_token_info (cfg, image, token, cil_method);
7550 if (!MONO_TYPE_IS_VOID (fsig->ret) && !sym_seq_points) {
7552 * Need to emit an implicit seq point after every non-void call so single stepping through nested calls like
7553 * foo (bar (), baz ())
7554 * works correctly. MS does this also:
7555 * http://stackoverflow.com/questions/6937198/making-your-net-language-step-correctly-in-the-debugger
7556 * The problem with this approach is that the debugger will stop after all calls returning a value,
7557 * even for simple cases, like:
7560 /* Special case a few common successor opcodes */
7561 if (!(ip + 5 < end && ip [5] == CEE_POP))
7562 need_seq_point = TRUE;
7565 n = fsig->param_count + fsig->hasthis;
7567 /* Don't support calls made using type arguments for now */
7569 if (cfg->gsharedvt) {
7570 if (mini_is_gsharedvt_signature (cfg, fsig))
7571 GSHAREDVT_FAILURE (*ip);
7575 if (mono_security_cas_enabled ()) {
7576 if (check_linkdemand (cfg, method, cmethod))
7577 INLINE_FAILURE ("linkdemand");
7578 CHECK_CFG_EXCEPTION;
7581 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
7582 g_assert_not_reached ();
7585 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
7588 if (!cfg->generic_sharing_context && cmethod)
7589 g_assert (!mono_method_check_context_used (cmethod));
7593 //g_assert (!virtual || fsig->hasthis);
7597 if (constrained_call) {
7598 if (mini_is_gsharedvt_klass (cfg, constrained_call)) {
7600 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
7602 /* Special case Object methods as they are easy to implement */
7603 if (cmethod->klass == mono_defaults.object_class) {
7604 MonoInst *args [16];
7607 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
7608 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_call), constrained_call, MONO_RGCTX_INFO_KLASS);
7610 if (!strcmp (cmethod->name, "ToString")) {
7611 ins = mono_emit_jit_icall (cfg, mono_object_tostring_gsharedvt, args);
7612 } else if (!strcmp (cmethod->name, "Equals")) {
7614 ins = mono_emit_jit_icall (cfg, mono_object_equals_gsharedvt, args);
7615 } else if (!strcmp (cmethod->name, "GetHashCode")) {
7616 ins = mono_emit_jit_icall (cfg, mono_object_gethashcode_gsharedvt, args);
7618 GSHAREDVT_FAILURE (*ip);
7621 } else if (constrained_call->valuetype && cmethod->klass->valuetype) {
7622 /* The 'Own method' case below */
7624 GSHAREDVT_FAILURE (*ip);
7628 * We have the `constrained.' prefix opcode.
7630 if (constrained_call->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
7632 * The type parameter is instantiated as a valuetype,
7633 * but that type doesn't override the method we're
7634 * calling, so we need to box `this'.
7636 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
7637 ins->klass = constrained_call;
7638 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
7639 CHECK_CFG_EXCEPTION;
7640 } else if (!constrained_call->valuetype) {
7641 int dreg = alloc_ireg_ref (cfg);
7644 * The type parameter is instantiated as a reference
7645 * type. We have a managed pointer on the stack, so
7646 * we need to dereference it here.
7648 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
7649 ins->type = STACK_OBJ;
7652 if (cmethod->klass->valuetype) {
7655 /* Interface method */
7658 mono_class_setup_vtable (constrained_call);
7659 CHECK_TYPELOAD (constrained_call);
7660 ioffset = mono_class_interface_offset (constrained_call, cmethod->klass);
7662 TYPE_LOAD_ERROR (constrained_call);
7663 slot = mono_method_get_vtable_slot (cmethod);
7665 TYPE_LOAD_ERROR (cmethod->klass);
7666 cmethod = constrained_call->vtable [ioffset + slot];
7668 if (cmethod->klass == mono_defaults.enum_class) {
7669 /* Enum implements some interfaces, so treat this as the first case */
7670 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
7671 ins->klass = constrained_call;
7672 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
7673 CHECK_CFG_EXCEPTION;
7678 constrained_call = NULL;
7681 if (!calli && check_call_signature (cfg, fsig, sp))
7684 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
7685 if (cmethod && (cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
7686 delegate_invoke = TRUE;
7689 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
7691 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7692 type_to_eval_stack_type ((cfg), fsig->ret, ins);
7700 * If the callee is a shared method, then its static cctor
7701 * might not get called after the call was patched.
7703 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
7704 emit_generic_class_init (cfg, cmethod->klass);
7705 CHECK_TYPELOAD (cmethod->klass);
7709 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
7711 if (cfg->generic_sharing_context && cmethod) {
7712 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
7714 context_used = mini_method_check_context_used (cfg, cmethod);
7716 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7717 /* Generic method interface
7718 calls are resolved via a
7719 helper function and don't
7721 if (!cmethod_context || !cmethod_context->method_inst)
7722 pass_imt_from_rgctx = TRUE;
7726 * If a shared method calls another
7727 * shared method then the caller must
7728 * have a generic sharing context
7729 * because the magic trampoline
7730 * requires it. FIXME: We shouldn't
7731 * have to force the vtable/mrgctx
7732 * variable here. Instead there
7733 * should be a flag in the cfg to
7734 * request a generic sharing context.
7737 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
7738 mono_get_vtable_var (cfg);
7743 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7745 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7747 CHECK_TYPELOAD (cmethod->klass);
7748 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7753 g_assert (!vtable_arg);
7755 if (!cfg->compile_aot) {
7757 * emit_get_rgctx_method () calls mono_class_vtable () so check
7758 * for type load errors before.
7760 mono_class_setup_vtable (cmethod->klass);
7761 CHECK_TYPELOAD (cmethod->klass);
7764 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7766 /* !marshalbyref is needed to properly handle generic methods + remoting */
7767 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
7768 MONO_METHOD_IS_FINAL (cmethod)) &&
7769 !mono_class_is_marshalbyref (cmethod->klass)) {
7776 if (pass_imt_from_rgctx) {
7777 g_assert (!pass_vtable);
7780 imt_arg = emit_get_rgctx_method (cfg, context_used,
7781 cmethod, MONO_RGCTX_INFO_METHOD);
7785 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
7787 /* Calling virtual generic methods */
7788 if (cmethod && virtual &&
7789 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
7790 !(MONO_METHOD_IS_FINAL (cmethod) &&
7791 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
7792 fsig->generic_param_count &&
7793 !(cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))) {
7794 MonoInst *this_temp, *this_arg_temp, *store;
7795 MonoInst *iargs [4];
7796 gboolean use_imt = FALSE;
7798 g_assert (fsig->is_inflated);
7800 /* Prevent inlining of methods that contain indirect calls */
7801 INLINE_FAILURE ("virtual generic call");
7803 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
7804 GSHAREDVT_FAILURE (*ip);
7806 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
7807 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt)
7812 g_assert (!imt_arg);
7814 g_assert (cmethod->is_inflated);
7815 imt_arg = emit_get_rgctx_method (cfg, context_used,
7816 cmethod, MONO_RGCTX_INFO_METHOD);
7817 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
7819 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
7820 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
7821 MONO_ADD_INS (bblock, store);
7823 /* FIXME: This should be a managed pointer */
7824 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7826 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
7827 iargs [1] = emit_get_rgctx_method (cfg, context_used,
7828 cmethod, MONO_RGCTX_INFO_METHOD);
7829 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
7830 addr = mono_emit_jit_icall (cfg,
7831 mono_helper_compile_generic_method, iargs);
7833 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
7835 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
7842 * Implement a workaround for the inherent races involved in locking:
7848 * If a thread abort happens between the call to Monitor.Enter () and the start of the
7849 * try block, the Exit () won't be executed, see:
7850 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
7851 * To work around this, we extend such try blocks to include the last x bytes
7852 * of the Monitor.Enter () call.
7854 if (cmethod && cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
7855 MonoBasicBlock *tbb;
7857 GET_BBLOCK (cfg, tbb, ip + 5);
7859 * Only extend try blocks with a finally, to avoid catching exceptions thrown
7860 * from Monitor.Enter like ArgumentNullException.
7862 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
7863 /* Mark this bblock as needing to be extended */
7864 tbb->extend_try_block = TRUE;
7868 /* Conversion to a JIT intrinsic */
7869 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
7871 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7872 type_to_eval_stack_type ((cfg), fsig->ret, ins);
7879 if (cmethod && (cfg->opt & MONO_OPT_INLINE) &&
7880 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
7881 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
7882 !g_list_find (dont_inline, cmethod)) {
7884 gboolean always = FALSE;
7886 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
7887 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
7888 /* Prevent inlining of methods that call wrappers */
7889 INLINE_FAILURE ("wrapper call");
7890 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
7894 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, always);
7896 cfg->real_offset += 5;
7899 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7900 /* *sp is already set by inline_method */
7905 inline_costs += costs;
7911 /* Tail recursion elimination */
7912 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
7913 gboolean has_vtargs = FALSE;
7916 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
7917 INLINE_FAILURE ("tail call");
7919 /* keep it simple */
7920 for (i = fsig->param_count - 1; i >= 0; i--) {
7921 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
7926 for (i = 0; i < n; ++i)
7927 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
7928 MONO_INST_NEW (cfg, ins, OP_BR);
7929 MONO_ADD_INS (bblock, ins);
7930 tblock = start_bblock->out_bb [0];
7931 link_bblock (cfg, bblock, tblock);
7932 ins->inst_target_bb = tblock;
7933 start_new_bblock = 1;
7935 /* skip the CEE_RET, too */
7936 if (ip_in_bb (cfg, bblock, ip + 5))
7943 inline_costs += 10 * num_calls++;
7946 * Making generic calls out of gsharedvt methods.
7948 if (cmethod && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
7949 MonoRgctxInfoType info_type;
7952 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
7953 //GSHAREDVT_FAILURE (*ip);
7954 // disable for possible remoting calls
7955 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
7956 GSHAREDVT_FAILURE (*ip);
7957 if (fsig->generic_param_count) {
7958 /* virtual generic call */
7959 g_assert (mono_use_imt);
7960 g_assert (!imt_arg);
7961 /* Same as the virtual generic case above */
7962 imt_arg = emit_get_rgctx_method (cfg, context_used,
7963 cmethod, MONO_RGCTX_INFO_METHOD);
7964 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
7969 if (cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)
7970 /* test_0_multi_dim_arrays () in gshared.cs */
7971 GSHAREDVT_FAILURE (*ip);
7973 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
7974 keep_this_alive = sp [0];
7976 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
7977 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
7979 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
7980 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
7982 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
7984 } else if (calli && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
7986 * We pass the address to the gsharedvt trampoline in the rgctx reg
7988 MonoInst *callee = addr;
7990 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
7992 GSHAREDVT_FAILURE (*ip);
7994 addr = emit_get_rgctx_sig (cfg, context_used,
7995 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
7996 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8000 /* Generic sharing */
8001 /* FIXME: only do this for generic methods if
8002 they are not shared! */
8003 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
8004 (!mono_method_is_generic_sharable (cmethod, TRUE) ||
8005 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
8006 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
8007 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
8008 INLINE_FAILURE ("gshared");
8010 g_assert (cfg->generic_sharing_context && cmethod);
8014 * We are compiling a call to a
8015 * generic method from shared code,
8016 * which means that we have to look up
8017 * the method in the rgctx and do an
8021 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8023 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8024 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8028 /* Indirect calls */
8030 if (call_opcode == CEE_CALL)
8031 g_assert (context_used);
8032 else if (call_opcode == CEE_CALLI)
8033 g_assert (!vtable_arg);
8035 /* FIXME: what the hell is this??? */
8036 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
8037 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
8039 /* Prevent inlining of methods with indirect calls */
8040 INLINE_FAILURE ("indirect call");
8042 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8047 * Instead of emitting an indirect call, emit a direct call
8048 * with the contents of the aotconst as the patch info.
8050 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8051 info_type = addr->inst_c1;
8052 info_data = addr->inst_p0;
8054 info_type = addr->inst_right->inst_c1;
8055 info_data = addr->inst_right->inst_left;
8058 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8059 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8064 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8072 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
8073 MonoInst *val = sp [fsig->param_count];
8075 if (val->type == STACK_OBJ) {
8076 MonoInst *iargs [2];
8081 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
8084 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
8085 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
8086 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
8087 emit_write_barrier (cfg, addr, val, 0);
8088 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
8089 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
8091 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
8092 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
8093 if (!cmethod->klass->element_class->valuetype && !readonly)
8094 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
8095 CHECK_TYPELOAD (cmethod->klass);
8098 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
8101 g_assert_not_reached ();
8108 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
8112 /* Tail prefix / tail call optimization */
8114 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
8115 /* FIXME: runtime generic context pointer for jumps? */
8116 /* FIXME: handle this for generic sharing eventually */
8117 if (cmethod && (ins_flag & MONO_INST_TAILCALL) &&
8118 !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig))
8119 supported_tail_call = TRUE;
8120 if (supported_tail_call) {
8121 if (call_opcode != CEE_CALL)
8122 supported_tail_call = FALSE;
8125 if (supported_tail_call) {
8128 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8129 INLINE_FAILURE ("tail call");
8131 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
8133 if (ARCH_USE_OP_TAIL_CALL) {
8134 /* Handle tail calls similarly to normal calls */
8137 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8138 call->tail_call = TRUE;
8139 call->method = cmethod;
8140 call->signature = mono_method_signature (cmethod);
8143 * We implement tail calls by storing the actual arguments into the
8144 * argument variables, then emitting a CEE_JMP.
8146 for (i = 0; i < n; ++i) {
8147 /* Prevent argument from being register allocated */
8148 arg_array [i]->flags |= MONO_INST_VOLATILE;
8149 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8151 ins = (MonoInst*)call;
8152 ins->inst_p0 = cmethod;
8153 ins->inst_p1 = arg_array [0];
8154 MONO_ADD_INS (bblock, ins);
8155 link_bblock (cfg, bblock, end_bblock);
8156 start_new_bblock = 1;
8158 // FIXME: Eliminate unreachable epilogs
8161 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
8162 * only reachable from this call.
8164 GET_BBLOCK (cfg, tblock, ip + 5);
8165 if (tblock == bblock || tblock->in_count == 0)
8174 * Synchronized wrappers.
8175 * Its hard to determine where to replace a method with its synchronized
8176 * wrapper without causing an infinite recursion. The current solution is
8177 * to add the synchronized wrapper in the trampolines, and to
8178 * change the called method to a dummy wrapper, and resolve that wrapper
8179 * to the real method in mono_jit_compile_method ().
8181 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED && mono_marshal_method_from_wrapper (cfg->method) == cmethod)
8182 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
8185 INLINE_FAILURE ("call");
8186 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual ? sp [0] : NULL,
8187 imt_arg, vtable_arg);
8190 link_bblock (cfg, bblock, end_bblock);
8191 start_new_bblock = 1;
8193 // FIXME: Eliminate unreachable epilogs
8196 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
8197 * only reachable from this call.
8199 GET_BBLOCK (cfg, tblock, ip + 5);
8200 if (tblock == bblock || tblock->in_count == 0)
8207 /* End of call, INS should contain the result of the call, if any */
8209 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
8212 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8217 if (keep_this_alive) {
8218 MonoInst *dummy_use;
8220 /* See mono_emit_method_call_full () */
8221 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
8224 CHECK_CFG_EXCEPTION;
8228 g_assert (*ip == CEE_RET);
8232 constrained_call = NULL;
8234 emit_seq_point (cfg, method, ip, FALSE);
8238 if (cfg->method != method) {
8239 /* return from inlined method */
8241 * If in_count == 0, that means the ret is unreachable due to
8242 * being preceeded by a throw. In that case, inline_method () will
8243 * handle setting the return value
8244 * (test case: test_0_inline_throw ()).
8246 if (return_var && cfg->cbb->in_count) {
8247 MonoType *ret_type = mono_method_signature (method)->ret;
8253 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
8256 //g_assert (returnvar != -1);
8257 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
8258 cfg->ret_var_set = TRUE;
8262 MonoType *ret_type = mono_method_signature (method)->ret;
8264 if (seq_points && !sym_seq_points) {
8266 * Place a seq point here too even through the IL stack is not
8267 * empty, so a step over on
8270 * will work correctly.
8272 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
8273 MONO_ADD_INS (cfg->cbb, ins);
8276 g_assert (!return_var);
8280 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
8283 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
8286 if (!cfg->vret_addr) {
8289 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
8291 EMIT_NEW_RETLOADA (cfg, ret_addr);
8293 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
8294 ins->klass = mono_class_from_mono_type (ret_type);
8297 #ifdef MONO_ARCH_SOFT_FLOAT
8298 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
8299 MonoInst *iargs [1];
8303 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
8304 mono_arch_emit_setret (cfg, method, conv);
8306 mono_arch_emit_setret (cfg, method, *sp);
8309 mono_arch_emit_setret (cfg, method, *sp);
8314 if (sp != stack_start)
8316 MONO_INST_NEW (cfg, ins, OP_BR);
8318 ins->inst_target_bb = end_bblock;
8319 MONO_ADD_INS (bblock, ins);
8320 link_bblock (cfg, bblock, end_bblock);
8321 start_new_bblock = 1;
8325 MONO_INST_NEW (cfg, ins, OP_BR);
8327 target = ip + 1 + (signed char)(*ip);
8329 GET_BBLOCK (cfg, tblock, target);
8330 link_bblock (cfg, bblock, tblock);
8331 ins->inst_target_bb = tblock;
8332 if (sp != stack_start) {
8333 handle_stack_args (cfg, stack_start, sp - stack_start);
8335 CHECK_UNVERIFIABLE (cfg);
8337 MONO_ADD_INS (bblock, ins);
8338 start_new_bblock = 1;
8339 inline_costs += BRANCH_COST;
8353 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
8355 target = ip + 1 + *(signed char*)ip;
8361 inline_costs += BRANCH_COST;
8365 MONO_INST_NEW (cfg, ins, OP_BR);
8368 target = ip + 4 + (gint32)read32(ip);
8370 GET_BBLOCK (cfg, tblock, target);
8371 link_bblock (cfg, bblock, tblock);
8372 ins->inst_target_bb = tblock;
8373 if (sp != stack_start) {
8374 handle_stack_args (cfg, stack_start, sp - stack_start);
8376 CHECK_UNVERIFIABLE (cfg);
8379 MONO_ADD_INS (bblock, ins);
8381 start_new_bblock = 1;
8382 inline_costs += BRANCH_COST;
8389 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
8390 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
8391 guint32 opsize = is_short ? 1 : 4;
8393 CHECK_OPSIZE (opsize);
8395 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
8398 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
8403 GET_BBLOCK (cfg, tblock, target);
8404 link_bblock (cfg, bblock, tblock);
8405 GET_BBLOCK (cfg, tblock, ip);
8406 link_bblock (cfg, bblock, tblock);
8408 if (sp != stack_start) {
8409 handle_stack_args (cfg, stack_start, sp - stack_start);
8410 CHECK_UNVERIFIABLE (cfg);
8413 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
8414 cmp->sreg1 = sp [0]->dreg;
8415 type_from_op (cmp, sp [0], NULL);
8418 #if SIZEOF_REGISTER == 4
8419 if (cmp->opcode == OP_LCOMPARE_IMM) {
8420 /* Convert it to OP_LCOMPARE */
8421 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8422 ins->type = STACK_I8;
8423 ins->dreg = alloc_dreg (cfg, STACK_I8);
8425 MONO_ADD_INS (bblock, ins);
8426 cmp->opcode = OP_LCOMPARE;
8427 cmp->sreg2 = ins->dreg;
8430 MONO_ADD_INS (bblock, cmp);
8432 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
8433 type_from_op (ins, sp [0], NULL);
8434 MONO_ADD_INS (bblock, ins);
8435 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
8436 GET_BBLOCK (cfg, tblock, target);
8437 ins->inst_true_bb = tblock;
8438 GET_BBLOCK (cfg, tblock, ip);
8439 ins->inst_false_bb = tblock;
8440 start_new_bblock = 2;
8443 inline_costs += BRANCH_COST;
8458 MONO_INST_NEW (cfg, ins, *ip);
8460 target = ip + 4 + (gint32)read32(ip);
8466 inline_costs += BRANCH_COST;
8470 MonoBasicBlock **targets;
8471 MonoBasicBlock *default_bblock;
8472 MonoJumpInfoBBTable *table;
8473 int offset_reg = alloc_preg (cfg);
8474 int target_reg = alloc_preg (cfg);
8475 int table_reg = alloc_preg (cfg);
8476 int sum_reg = alloc_preg (cfg);
8477 gboolean use_op_switch;
8481 n = read32 (ip + 1);
8484 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
8488 CHECK_OPSIZE (n * sizeof (guint32));
8489 target = ip + n * sizeof (guint32);
8491 GET_BBLOCK (cfg, default_bblock, target);
8492 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
8494 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
8495 for (i = 0; i < n; ++i) {
8496 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
8497 targets [i] = tblock;
8498 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
8502 if (sp != stack_start) {
8504 * Link the current bb with the targets as well, so handle_stack_args
8505 * will set their in_stack correctly.
8507 link_bblock (cfg, bblock, default_bblock);
8508 for (i = 0; i < n; ++i)
8509 link_bblock (cfg, bblock, targets [i]);
8511 handle_stack_args (cfg, stack_start, sp - stack_start);
8513 CHECK_UNVERIFIABLE (cfg);
8516 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
8517 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
8520 for (i = 0; i < n; ++i)
8521 link_bblock (cfg, bblock, targets [i]);
8523 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
8524 table->table = targets;
8525 table->table_size = n;
8527 use_op_switch = FALSE;
8529 /* ARM implements SWITCH statements differently */
8530 /* FIXME: Make it use the generic implementation */
8531 if (!cfg->compile_aot)
8532 use_op_switch = TRUE;
8535 if (COMPILE_LLVM (cfg))
8536 use_op_switch = TRUE;
8538 cfg->cbb->has_jump_table = 1;
8540 if (use_op_switch) {
8541 MONO_INST_NEW (cfg, ins, OP_SWITCH);
8542 ins->sreg1 = src1->dreg;
8543 ins->inst_p0 = table;
8544 ins->inst_many_bb = targets;
8545 ins->klass = GUINT_TO_POINTER (n);
8546 MONO_ADD_INS (cfg->cbb, ins);
8548 if (sizeof (gpointer) == 8)
8549 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
8551 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
8553 #if SIZEOF_REGISTER == 8
8554 /* The upper word might not be zero, and we add it to a 64 bit address later */
8555 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
8558 if (cfg->compile_aot) {
8559 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
8561 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
8562 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
8563 ins->inst_p0 = table;
8564 ins->dreg = table_reg;
8565 MONO_ADD_INS (cfg->cbb, ins);
8568 /* FIXME: Use load_memindex */
8569 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
8570 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
8571 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
8573 start_new_bblock = 1;
8574 inline_costs += (BRANCH_COST * 2);
8594 dreg = alloc_freg (cfg);
8597 dreg = alloc_lreg (cfg);
8600 dreg = alloc_ireg_ref (cfg);
8603 dreg = alloc_preg (cfg);
8606 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
8607 ins->type = ldind_type [*ip - CEE_LDIND_I1];
8608 ins->flags |= ins_flag;
8610 MONO_ADD_INS (bblock, ins);
8612 if (ins->flags & MONO_INST_VOLATILE) {
8613 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
8614 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
8615 emit_memory_barrier (cfg, FullBarrier);
8630 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
8631 ins->flags |= ins_flag;
8634 if (ins->flags & MONO_INST_VOLATILE) {
8635 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
8636 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
8637 emit_memory_barrier (cfg, FullBarrier);
8640 MONO_ADD_INS (bblock, ins);
8642 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
8643 emit_write_barrier (cfg, sp [0], sp [1], -1);
8652 MONO_INST_NEW (cfg, ins, (*ip));
8654 ins->sreg1 = sp [0]->dreg;
8655 ins->sreg2 = sp [1]->dreg;
8656 type_from_op (ins, sp [0], sp [1]);
8658 ins->dreg = alloc_dreg ((cfg), (ins)->type);
8660 /* Use the immediate opcodes if possible */
8661 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
8662 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
8663 if (imm_opcode != -1) {
8664 ins->opcode = imm_opcode;
8665 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
8668 sp [1]->opcode = OP_NOP;
8672 MONO_ADD_INS ((cfg)->cbb, (ins));
8674 *sp++ = mono_decompose_opcode (cfg, ins);
8691 MONO_INST_NEW (cfg, ins, (*ip));
8693 ins->sreg1 = sp [0]->dreg;
8694 ins->sreg2 = sp [1]->dreg;
8695 type_from_op (ins, sp [0], sp [1]);
8697 ADD_WIDEN_OP (ins, sp [0], sp [1]);
8698 ins->dreg = alloc_dreg ((cfg), (ins)->type);
8700 /* FIXME: Pass opcode to is_inst_imm */
8702 /* Use the immediate opcodes if possible */
8703 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
8706 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
8707 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
8708 /* Keep emulated opcodes which are optimized away later */
8709 if ((ins->opcode == OP_IREM_UN || ins->opcode == OP_IDIV_UN_IMM) && (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) && sp [1]->opcode == OP_ICONST && mono_is_power_of_two (sp [1]->inst_c0) >= 0) {
8710 imm_opcode = mono_op_to_op_imm (ins->opcode);
8713 if (imm_opcode != -1) {
8714 ins->opcode = imm_opcode;
8715 if (sp [1]->opcode == OP_I8CONST) {
8716 #if SIZEOF_REGISTER == 8
8717 ins->inst_imm = sp [1]->inst_l;
8719 ins->inst_ls_word = sp [1]->inst_ls_word;
8720 ins->inst_ms_word = sp [1]->inst_ms_word;
8724 ins->inst_imm = (gssize)(sp [1]->inst_c0);
8727 /* Might be followed by an instruction added by ADD_WIDEN_OP */
8728 if (sp [1]->next == NULL)
8729 sp [1]->opcode = OP_NOP;
8732 MONO_ADD_INS ((cfg)->cbb, (ins));
8734 *sp++ = mono_decompose_opcode (cfg, ins);
8747 case CEE_CONV_OVF_I8:
8748 case CEE_CONV_OVF_U8:
8752 /* Special case this earlier so we have long constants in the IR */
8753 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
8754 int data = sp [-1]->inst_c0;
8755 sp [-1]->opcode = OP_I8CONST;
8756 sp [-1]->type = STACK_I8;
8757 #if SIZEOF_REGISTER == 8
8758 if ((*ip) == CEE_CONV_U8)
8759 sp [-1]->inst_c0 = (guint32)data;
8761 sp [-1]->inst_c0 = data;
8763 sp [-1]->inst_ls_word = data;
8764 if ((*ip) == CEE_CONV_U8)
8765 sp [-1]->inst_ms_word = 0;
8767 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
8769 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
8776 case CEE_CONV_OVF_I4:
8777 case CEE_CONV_OVF_I1:
8778 case CEE_CONV_OVF_I2:
8779 case CEE_CONV_OVF_I:
8780 case CEE_CONV_OVF_U:
8783 if (sp [-1]->type == STACK_R8) {
8784 ADD_UNOP (CEE_CONV_OVF_I8);
8791 case CEE_CONV_OVF_U1:
8792 case CEE_CONV_OVF_U2:
8793 case CEE_CONV_OVF_U4:
8796 if (sp [-1]->type == STACK_R8) {
8797 ADD_UNOP (CEE_CONV_OVF_U8);
8804 case CEE_CONV_OVF_I1_UN:
8805 case CEE_CONV_OVF_I2_UN:
8806 case CEE_CONV_OVF_I4_UN:
8807 case CEE_CONV_OVF_I8_UN:
8808 case CEE_CONV_OVF_U1_UN:
8809 case CEE_CONV_OVF_U2_UN:
8810 case CEE_CONV_OVF_U4_UN:
8811 case CEE_CONV_OVF_U8_UN:
8812 case CEE_CONV_OVF_I_UN:
8813 case CEE_CONV_OVF_U_UN:
8820 CHECK_CFG_EXCEPTION;
8824 case CEE_ADD_OVF_UN:
8826 case CEE_MUL_OVF_UN:
8828 case CEE_SUB_OVF_UN:
8834 GSHAREDVT_FAILURE (*ip);
8837 token = read32 (ip + 1);
8838 klass = mini_get_class (method, token, generic_context);
8839 CHECK_TYPELOAD (klass);
8841 if (generic_class_is_reference_type (cfg, klass)) {
8842 MonoInst *store, *load;
8843 int dreg = alloc_ireg_ref (cfg);
8845 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
8846 load->flags |= ins_flag;
8847 MONO_ADD_INS (cfg->cbb, load);
8849 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
8850 store->flags |= ins_flag;
8851 MONO_ADD_INS (cfg->cbb, store);
8853 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
8854 emit_write_barrier (cfg, sp [0], sp [1], -1);
8856 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
8868 token = read32 (ip + 1);
8869 klass = mini_get_class (method, token, generic_context);
8870 CHECK_TYPELOAD (klass);
8872 /* Optimize the common ldobj+stloc combination */
8882 loc_index = ip [5] - CEE_STLOC_0;
8889 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
8890 CHECK_LOCAL (loc_index);
8892 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
8893 ins->dreg = cfg->locals [loc_index]->dreg;
8899 /* Optimize the ldobj+stobj combination */
8900 /* The reference case ends up being a load+store anyway */
8901 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
8906 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
8913 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
8922 CHECK_STACK_OVF (1);
8924 n = read32 (ip + 1);
8926 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
8927 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
8928 ins->type = STACK_OBJ;
8931 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
8932 MonoInst *iargs [1];
8934 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
8935 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
8937 if (cfg->opt & MONO_OPT_SHARED) {
8938 MonoInst *iargs [3];
8940 if (cfg->compile_aot) {
8941 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
8943 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8944 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
8945 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
8946 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
8947 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
8949 if (bblock->out_of_line) {
8950 MonoInst *iargs [2];
8952 if (image == mono_defaults.corlib) {
8954 * Avoid relocations in AOT and save some space by using a
8955 * version of helper_ldstr specialized to mscorlib.
8957 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
8958 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
8960 /* Avoid creating the string object */
8961 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8962 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
8963 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
8967 if (cfg->compile_aot) {
8968 NEW_LDSTRCONST (cfg, ins, image, n);
8970 MONO_ADD_INS (bblock, ins);
8973 NEW_PCONST (cfg, ins, NULL);
8974 ins->type = STACK_OBJ;
8975 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
8977 OUT_OF_MEMORY_FAILURE;
8980 MONO_ADD_INS (bblock, ins);
8989 MonoInst *iargs [2];
8990 MonoMethodSignature *fsig;
8993 MonoInst *vtable_arg = NULL;
8996 token = read32 (ip + 1);
8997 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8998 if (!cmethod || mono_loader_get_last_error ())
9000 fsig = mono_method_get_signature (cmethod, image, token);
9004 mono_save_token_info (cfg, image, token, cmethod);
9006 if (!mono_class_init (cmethod->klass))
9007 TYPE_LOAD_ERROR (cmethod->klass);
9009 context_used = mini_method_check_context_used (cfg, cmethod);
9011 if (mono_security_cas_enabled ()) {
9012 if (check_linkdemand (cfg, method, cmethod))
9013 INLINE_FAILURE ("linkdemand");
9014 CHECK_CFG_EXCEPTION;
9015 } else if (mono_security_core_clr_enabled ()) {
9016 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9019 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9020 emit_generic_class_init (cfg, cmethod->klass);
9021 CHECK_TYPELOAD (cmethod->klass);
9025 if (cfg->gsharedvt) {
9026 if (mini_is_gsharedvt_variable_signature (sig))
9027 GSHAREDVT_FAILURE (*ip);
9031 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
9032 mono_method_is_generic_sharable (cmethod, TRUE)) {
9033 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
9034 mono_class_vtable (cfg->domain, cmethod->klass);
9035 CHECK_TYPELOAD (cmethod->klass);
9037 vtable_arg = emit_get_rgctx_method (cfg, context_used,
9038 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9041 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
9042 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9044 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9046 CHECK_TYPELOAD (cmethod->klass);
9047 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9052 n = fsig->param_count;
9056 * Generate smaller code for the common newobj <exception> instruction in
9057 * argument checking code.
9059 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
9060 is_exception_class (cmethod->klass) && n <= 2 &&
9061 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
9062 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
9063 MonoInst *iargs [3];
9065 g_assert (!vtable_arg);
9069 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
9072 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
9076 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
9081 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
9084 g_assert_not_reached ();
9092 /* move the args to allow room for 'this' in the first position */
9098 /* check_call_signature () requires sp[0] to be set */
9099 this_ins.type = STACK_OBJ;
9101 if (check_call_signature (cfg, fsig, sp))
9106 if (mini_class_is_system_array (cmethod->klass)) {
9107 g_assert (!vtable_arg);
9109 *sp = emit_get_rgctx_method (cfg, context_used,
9110 cmethod, MONO_RGCTX_INFO_METHOD);
9112 /* Avoid varargs in the common case */
9113 if (fsig->param_count == 1)
9114 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
9115 else if (fsig->param_count == 2)
9116 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
9117 else if (fsig->param_count == 3)
9118 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
9119 else if (fsig->param_count == 4)
9120 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
9122 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
9123 } else if (cmethod->string_ctor) {
9124 g_assert (!context_used);
9125 g_assert (!vtable_arg);
9126 /* we simply pass a null pointer */
9127 EMIT_NEW_PCONST (cfg, *sp, NULL);
9128 /* now call the string ctor */
9129 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
9131 MonoInst* callvirt_this_arg = NULL;
9133 if (cmethod->klass->valuetype) {
9134 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
9135 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
9136 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
9141 * The code generated by mini_emit_virtual_call () expects
9142 * iargs [0] to be a boxed instance, but luckily the vcall
9143 * will be transformed into a normal call there.
9145 } else if (context_used) {
9146 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
9149 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9151 CHECK_TYPELOAD (cmethod->klass);
9154 * TypeInitializationExceptions thrown from the mono_runtime_class_init
9155 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
9156 * As a workaround, we call class cctors before allocating objects.
9158 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
9159 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
9160 if (cfg->verbose_level > 2)
9161 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
9162 class_inits = g_slist_prepend (class_inits, vtable);
9165 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
9168 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
9171 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
9173 /* Now call the actual ctor */
9174 /* Avoid virtual calls to ctors if possible */
9175 if (mono_class_is_marshalbyref (cmethod->klass))
9176 callvirt_this_arg = sp [0];
9179 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
9180 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9181 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9186 CHECK_CFG_EXCEPTION;
9187 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
9188 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
9189 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
9190 !g_list_find (dont_inline, cmethod)) {
9193 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
9194 cfg->real_offset += 5;
9197 inline_costs += costs - 5;
9199 INLINE_FAILURE ("inline failure");
9200 // FIXME-VT: Clean this up
9201 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
9202 GSHAREDVT_FAILURE(*ip);
9203 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
9205 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
9208 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
9209 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
9210 } else if (context_used &&
9211 (!mono_method_is_generic_sharable (cmethod, TRUE) ||
9212 !mono_class_generic_sharing_enabled (cmethod->klass))) {
9213 MonoInst *cmethod_addr;
9215 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
9216 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9218 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
9220 INLINE_FAILURE ("ctor call");
9221 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
9222 callvirt_this_arg, NULL, vtable_arg);
9226 if (alloc == NULL) {
9228 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
9229 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
9243 token = read32 (ip + 1);
9244 klass = mini_get_class (method, token, generic_context);
9245 CHECK_TYPELOAD (klass);
9246 if (sp [0]->type != STACK_OBJ)
9249 context_used = mini_class_check_context_used (cfg, klass);
9251 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9252 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
9259 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9262 if (cfg->compile_aot)
9263 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9265 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9267 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
9268 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
9271 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9272 MonoMethod *mono_castclass;
9273 MonoInst *iargs [1];
9276 mono_castclass = mono_marshal_get_castclass (klass);
9279 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
9280 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9281 CHECK_CFG_EXCEPTION;
9282 g_assert (costs > 0);
9285 cfg->real_offset += 5;
9290 inline_costs += costs;
9293 ins = handle_castclass (cfg, klass, *sp, context_used);
9294 CHECK_CFG_EXCEPTION;
9304 token = read32 (ip + 1);
9305 klass = mini_get_class (method, token, generic_context);
9306 CHECK_TYPELOAD (klass);
9307 if (sp [0]->type != STACK_OBJ)
9310 context_used = mini_class_check_context_used (cfg, klass);
9312 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9313 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
9320 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9323 if (cfg->compile_aot)
9324 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9326 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9328 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
9331 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9332 MonoMethod *mono_isinst;
9333 MonoInst *iargs [1];
9336 mono_isinst = mono_marshal_get_isinst (klass);
9339 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
9340 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9341 CHECK_CFG_EXCEPTION;
9342 g_assert (costs > 0);
9345 cfg->real_offset += 5;
9350 inline_costs += costs;
9353 ins = handle_isinst (cfg, klass, *sp, context_used);
9354 CHECK_CFG_EXCEPTION;
9361 case CEE_UNBOX_ANY: {
9365 token = read32 (ip + 1);
9366 klass = mini_get_class (method, token, generic_context);
9367 CHECK_TYPELOAD (klass);
9369 mono_save_token_info (cfg, image, token, klass);
9371 context_used = mini_class_check_context_used (cfg, klass);
9373 if (mini_is_gsharedvt_klass (cfg, klass)) {
9374 *sp = handle_unbox_gsharedvt (cfg, context_used, klass, *sp, &bblock);
9382 if (generic_class_is_reference_type (cfg, klass)) {
9383 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
9384 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9385 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
9392 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9395 /*FIXME AOT support*/
9396 if (cfg->compile_aot)
9397 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9399 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9401 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
9402 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
9405 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9406 MonoMethod *mono_castclass;
9407 MonoInst *iargs [1];
9410 mono_castclass = mono_marshal_get_castclass (klass);
9413 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
9414 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9415 CHECK_CFG_EXCEPTION;
9416 g_assert (costs > 0);
9419 cfg->real_offset += 5;
9423 inline_costs += costs;
9425 ins = handle_castclass (cfg, klass, *sp, context_used);
9426 CHECK_CFG_EXCEPTION;
9434 if (mono_class_is_nullable (klass)) {
9435 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
9442 ins = handle_unbox (cfg, klass, sp, context_used);
9448 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9461 token = read32 (ip + 1);
9462 klass = mini_get_class (method, token, generic_context);
9463 CHECK_TYPELOAD (klass);
9465 mono_save_token_info (cfg, image, token, klass);
9467 context_used = mini_class_check_context_used (cfg, klass);
9469 if (generic_class_is_reference_type (cfg, klass)) {
9475 if (klass == mono_defaults.void_class)
9477 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
9479 /* frequent check in generic code: box (struct), brtrue */
9481 // FIXME: LLVM can't handle the inconsistent bb linking
9482 if (!mono_class_is_nullable (klass) &&
9483 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
9484 (ip [5] == CEE_BRTRUE ||
9485 ip [5] == CEE_BRTRUE_S ||
9486 ip [5] == CEE_BRFALSE ||
9487 ip [5] == CEE_BRFALSE_S)) {
9488 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
9490 MonoBasicBlock *true_bb, *false_bb;
9494 if (cfg->verbose_level > 3) {
9495 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9496 printf ("<box+brtrue opt>\n");
9504 target = ip + 1 + (signed char)(*ip);
9511 target = ip + 4 + (gint)(read32 (ip));
9515 g_assert_not_reached ();
9519 * We need to link both bblocks, since it is needed for handling stack
9520 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
9521 * Branching to only one of them would lead to inconsistencies, so
9522 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
9524 GET_BBLOCK (cfg, true_bb, target);
9525 GET_BBLOCK (cfg, false_bb, ip);
9527 mono_link_bblock (cfg, cfg->cbb, true_bb);
9528 mono_link_bblock (cfg, cfg->cbb, false_bb);
9530 if (sp != stack_start) {
9531 handle_stack_args (cfg, stack_start, sp - stack_start);
9533 CHECK_UNVERIFIABLE (cfg);
9536 if (COMPILE_LLVM (cfg)) {
9537 dreg = alloc_ireg (cfg);
9538 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
9539 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
9541 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
9543 /* The JIT can't eliminate the iconst+compare */
9544 MONO_INST_NEW (cfg, ins, OP_BR);
9545 ins->inst_target_bb = is_true ? true_bb : false_bb;
9546 MONO_ADD_INS (cfg->cbb, ins);
9549 start_new_bblock = 1;
9553 *sp++ = handle_box (cfg, val, klass, context_used, &bblock);
9555 CHECK_CFG_EXCEPTION;
9564 token = read32 (ip + 1);
9565 klass = mini_get_class (method, token, generic_context);
9566 CHECK_TYPELOAD (klass);
9568 mono_save_token_info (cfg, image, token, klass);
9570 context_used = mini_class_check_context_used (cfg, klass);
9572 if (mono_class_is_nullable (klass)) {
9575 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
9576 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
9580 ins = handle_unbox (cfg, klass, sp, context_used);
9593 MonoClassField *field;
9594 #ifndef DISABLE_REMOTING
9598 gboolean is_instance;
9600 gpointer addr = NULL;
9601 gboolean is_special_static;
9603 MonoInst *store_val = NULL;
9606 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
9608 if (op == CEE_STFLD) {
9616 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
9618 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
9621 if (op == CEE_STSFLD) {
9629 token = read32 (ip + 1);
9630 if (method->wrapper_type != MONO_WRAPPER_NONE) {
9631 field = mono_method_get_wrapper_data (method, token);
9632 klass = field->parent;
9635 field = mono_field_from_token (image, token, &klass, generic_context);
9639 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
9640 FIELD_ACCESS_FAILURE;
9641 mono_class_init (klass);
9643 if (is_instance && *ip != CEE_LDFLDA && is_magic_tls_access (field))
9646 /* if the class is Critical then transparent code cannot access it's fields */
9647 if (!is_instance && mono_security_core_clr_enabled ())
9648 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
9650 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
9651 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
9652 if (mono_security_core_clr_enabled ())
9653 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
9657 * LDFLD etc. is usable on static fields as well, so convert those cases to
9660 if (is_instance && field->type->attrs & FIELD_ATTRIBUTE_STATIC) {
9672 g_assert_not_reached ();
9674 is_instance = FALSE;
9677 context_used = mini_class_check_context_used (cfg, klass);
9681 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
9682 if (op == CEE_STFLD) {
9683 if (target_type_is_incompatible (cfg, field->type, sp [1]))
9685 #ifndef DISABLE_REMOTING
9686 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
9687 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
9688 MonoInst *iargs [5];
9690 GSHAREDVT_FAILURE (op);
9693 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9694 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
9695 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
9699 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
9700 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
9701 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9702 CHECK_CFG_EXCEPTION;
9703 g_assert (costs > 0);
9705 cfg->real_offset += 5;
9708 inline_costs += costs;
9710 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
9717 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
9719 if (mini_is_gsharedvt_klass (cfg, klass)) {
9720 MonoInst *offset_ins;
9722 context_used = mini_class_check_context_used (cfg, klass);
9724 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9725 dreg = alloc_ireg_mp (cfg);
9726 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
9727 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
9728 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
9730 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
9732 if (sp [0]->opcode != OP_LDADDR)
9733 store->flags |= MONO_INST_FAULT;
9735 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
9736 /* insert call to write barrier */
9740 dreg = alloc_ireg_mp (cfg);
9741 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
9742 emit_write_barrier (cfg, ptr, sp [1], -1);
9745 store->flags |= ins_flag;
9752 #ifndef DISABLE_REMOTING
9753 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
9754 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
9755 MonoInst *iargs [4];
9757 GSHAREDVT_FAILURE (op);
9760 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9761 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
9762 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
9763 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
9764 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
9765 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9766 CHECK_CFG_EXCEPTION;
9768 g_assert (costs > 0);
9770 cfg->real_offset += 5;
9774 inline_costs += costs;
9776 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
9782 if (sp [0]->type == STACK_VTYPE) {
9785 /* Have to compute the address of the variable */
9787 var = get_vreg_to_inst (cfg, sp [0]->dreg);
9789 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
9791 g_assert (var->klass == klass);
9793 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
9797 if (op == CEE_LDFLDA) {
9798 if (is_magic_tls_access (field)) {
9799 GSHAREDVT_FAILURE (*ip);
9801 *sp++ = create_magic_tls_access (cfg, field, &cached_tls_addr, ins);
9803 if (sp [0]->type == STACK_OBJ) {
9804 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
9805 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
9808 dreg = alloc_ireg_mp (cfg);
9810 if (mini_is_gsharedvt_klass (cfg, klass)) {
9811 MonoInst *offset_ins;
9813 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9814 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
9816 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
9818 ins->klass = mono_class_from_mono_type (field->type);
9819 ins->type = STACK_MP;
9825 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
9827 if (mini_is_gsharedvt_klass (cfg, klass)) {
9828 MonoInst *offset_ins;
9830 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9831 dreg = alloc_ireg_mp (cfg);
9832 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
9833 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
9835 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
9837 load->flags |= ins_flag;
9838 if (sp [0]->opcode != OP_LDADDR)
9839 load->flags |= MONO_INST_FAULT;
9853 * We can only support shared generic static
9854 * field access on architectures where the
9855 * trampoline code has been extended to handle
9856 * the generic class init.
9858 #ifndef MONO_ARCH_VTABLE_REG
9859 GENERIC_SHARING_FAILURE (op);
9862 context_used = mini_class_check_context_used (cfg, klass);
9864 ftype = mono_field_get_type (field);
9866 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
9869 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
9870 * to be called here.
9872 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
9873 mono_class_vtable (cfg->domain, klass);
9874 CHECK_TYPELOAD (klass);
9876 mono_domain_lock (cfg->domain);
9877 if (cfg->domain->special_static_fields)
9878 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
9879 mono_domain_unlock (cfg->domain);
9881 is_special_static = mono_class_field_is_special_static (field);
9883 /* Generate IR to compute the field address */
9884 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
9886 * Fast access to TLS data
9887 * Inline version of get_thread_static_data () in
9891 int idx, static_data_reg, array_reg, dreg;
9892 MonoInst *thread_ins;
9894 GSHAREDVT_FAILURE (op);
9896 // offset &= 0x7fffffff;
9897 // idx = (offset >> 24) - 1;
9898 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
9900 thread_ins = mono_get_thread_intrinsic (cfg);
9901 MONO_ADD_INS (cfg->cbb, thread_ins);
9902 static_data_reg = alloc_ireg (cfg);
9903 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
9905 if (cfg->compile_aot) {
9906 int offset_reg, offset2_reg, idx_reg;
9908 /* For TLS variables, this will return the TLS offset */
9909 EMIT_NEW_SFLDACONST (cfg, ins, field);
9910 offset_reg = ins->dreg;
9911 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
9912 idx_reg = alloc_ireg (cfg);
9913 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
9914 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
9915 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
9916 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
9917 array_reg = alloc_ireg (cfg);
9918 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
9919 offset2_reg = alloc_ireg (cfg);
9920 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
9921 dreg = alloc_ireg (cfg);
9922 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
9924 offset = (gsize)addr & 0x7fffffff;
9925 idx = (offset >> 24) - 1;
9927 array_reg = alloc_ireg (cfg);
9928 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
9929 dreg = alloc_ireg (cfg);
9930 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
9932 } else if ((cfg->opt & MONO_OPT_SHARED) ||
9933 (cfg->compile_aot && is_special_static) ||
9934 (context_used && is_special_static)) {
9935 MonoInst *iargs [2];
9937 g_assert (field->parent);
9938 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9940 iargs [1] = emit_get_rgctx_field (cfg, context_used,
9941 field, MONO_RGCTX_INFO_CLASS_FIELD);
9943 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
9945 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
9946 } else if (context_used) {
9947 MonoInst *static_data;
9950 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
9951 method->klass->name_space, method->klass->name, method->name,
9952 depth, field->offset);
9955 if (mono_class_needs_cctor_run (klass, method))
9956 emit_generic_class_init (cfg, klass);
9959 * The pointer we're computing here is
9961 * super_info.static_data + field->offset
9963 static_data = emit_get_rgctx_klass (cfg, context_used,
9964 klass, MONO_RGCTX_INFO_STATIC_DATA);
9966 if (mini_is_gsharedvt_klass (cfg, klass)) {
9967 MonoInst *offset_ins;
9969 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9970 dreg = alloc_ireg_mp (cfg);
9971 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
9972 } else if (field->offset == 0) {
9975 int addr_reg = mono_alloc_preg (cfg);
9976 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
9978 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
9979 MonoInst *iargs [2];
9981 g_assert (field->parent);
9982 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9983 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
9984 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
9986 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
9988 CHECK_TYPELOAD (klass);
9990 if (mini_field_access_needs_cctor_run (cfg, method, vtable)) {
9991 if (!(g_slist_find (class_inits, vtable))) {
9992 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
9993 if (cfg->verbose_level > 2)
9994 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
9995 class_inits = g_slist_prepend (class_inits, vtable);
9998 if (cfg->run_cctors) {
10000 /* This makes so that inline cannot trigger */
10001 /* .cctors: too many apps depend on them */
10002 /* running with a specific order... */
10003 if (! vtable->initialized)
10004 INLINE_FAILURE ("class init");
10005 ex = mono_runtime_class_init_full (vtable, FALSE);
10007 set_exception_object (cfg, ex);
10008 goto exception_exit;
10012 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10014 if (cfg->compile_aot)
10015 EMIT_NEW_SFLDACONST (cfg, ins, field);
10017 EMIT_NEW_PCONST (cfg, ins, addr);
10019 MonoInst *iargs [1];
10020 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
10021 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
10025 /* Generate IR to do the actual load/store operation */
10027 if (op == CEE_LDSFLDA) {
10028 ins->klass = mono_class_from_mono_type (ftype);
10029 ins->type = STACK_PTR;
10031 } else if (op == CEE_STSFLD) {
10034 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
10035 store->flags |= ins_flag;
10037 gboolean is_const = FALSE;
10038 MonoVTable *vtable = NULL;
10039 gpointer addr = NULL;
10041 if (!context_used) {
10042 vtable = mono_class_vtable (cfg->domain, klass);
10043 CHECK_TYPELOAD (klass);
10045 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
10046 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
10047 int ro_type = ftype->type;
10049 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10050 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
10051 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
10054 GSHAREDVT_FAILURE (op);
10056 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
10059 case MONO_TYPE_BOOLEAN:
10061 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
10065 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
10068 case MONO_TYPE_CHAR:
10070 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
10074 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
10079 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
10083 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
10088 case MONO_TYPE_PTR:
10089 case MONO_TYPE_FNPTR:
10090 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10091 type_to_eval_stack_type ((cfg), field->type, *sp);
10094 case MONO_TYPE_STRING:
10095 case MONO_TYPE_OBJECT:
10096 case MONO_TYPE_CLASS:
10097 case MONO_TYPE_SZARRAY:
10098 case MONO_TYPE_ARRAY:
10099 if (!mono_gc_is_moving ()) {
10100 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10101 type_to_eval_stack_type ((cfg), field->type, *sp);
10109 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
10114 case MONO_TYPE_VALUETYPE:
10124 CHECK_STACK_OVF (1);
10126 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
10127 load->flags |= ins_flag;
10140 token = read32 (ip + 1);
10141 klass = mini_get_class (method, token, generic_context);
10142 CHECK_TYPELOAD (klass);
10143 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
10144 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
10145 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
10146 generic_class_is_reference_type (cfg, klass)) {
10147 /* insert call to write barrier */
10148 emit_write_barrier (cfg, sp [0], sp [1], -1);
10160 const char *data_ptr;
10162 guint32 field_token;
10168 token = read32 (ip + 1);
10170 klass = mini_get_class (method, token, generic_context);
10171 CHECK_TYPELOAD (klass);
10173 context_used = mini_class_check_context_used (cfg, klass);
10175 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
10176 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
10177 ins->sreg1 = sp [0]->dreg;
10178 ins->type = STACK_I4;
10179 ins->dreg = alloc_ireg (cfg);
10180 MONO_ADD_INS (cfg->cbb, ins);
10181 *sp = mono_decompose_opcode (cfg, ins);
10184 if (context_used) {
10185 MonoInst *args [3];
10186 MonoClass *array_class = mono_array_class_get (klass, 1);
10187 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
10189 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
10192 args [0] = emit_get_rgctx_klass (cfg, context_used,
10193 array_class, MONO_RGCTX_INFO_VTABLE);
10198 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
10200 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
10202 if (cfg->opt & MONO_OPT_SHARED) {
10203 /* Decompose now to avoid problems with references to the domainvar */
10204 MonoInst *iargs [3];
10206 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10207 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10208 iargs [2] = sp [0];
10210 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
10212 /* Decompose later since it is needed by abcrem */
10213 MonoClass *array_type = mono_array_class_get (klass, 1);
10214 mono_class_vtable (cfg->domain, array_type);
10215 CHECK_TYPELOAD (array_type);
10217 MONO_INST_NEW (cfg, ins, OP_NEWARR);
10218 ins->dreg = alloc_ireg_ref (cfg);
10219 ins->sreg1 = sp [0]->dreg;
10220 ins->inst_newa_class = klass;
10221 ins->type = STACK_OBJ;
10222 ins->klass = array_type;
10223 MONO_ADD_INS (cfg->cbb, ins);
10224 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10225 cfg->cbb->has_array_access = TRUE;
10227 /* Needed so mono_emit_load_get_addr () gets called */
10228 mono_get_got_var (cfg);
10238 * we inline/optimize the initialization sequence if possible.
10239 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
10240 * for small sizes open code the memcpy
10241 * ensure the rva field is big enough
10243 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
10244 MonoMethod *memcpy_method = get_memcpy_method ();
10245 MonoInst *iargs [3];
10246 int add_reg = alloc_ireg_mp (cfg);
10248 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
10249 if (cfg->compile_aot) {
10250 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
10252 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
10254 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
10255 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
10264 if (sp [0]->type != STACK_OBJ)
10267 MONO_INST_NEW (cfg, ins, OP_LDLEN);
10268 ins->dreg = alloc_preg (cfg);
10269 ins->sreg1 = sp [0]->dreg;
10270 ins->type = STACK_I4;
10271 /* This flag will be inherited by the decomposition */
10272 ins->flags |= MONO_INST_FAULT;
10273 MONO_ADD_INS (cfg->cbb, ins);
10274 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10275 cfg->cbb->has_array_access = TRUE;
10283 if (sp [0]->type != STACK_OBJ)
10286 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10288 klass = mini_get_class (method, read32 (ip + 1), generic_context);
10289 CHECK_TYPELOAD (klass);
10290 /* we need to make sure that this array is exactly the type it needs
10291 * to be for correctness. the wrappers are lax with their usage
10292 * so we need to ignore them here
10294 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
10295 MonoClass *array_class = mono_array_class_get (klass, 1);
10296 mini_emit_check_array_type (cfg, sp [0], array_class);
10297 CHECK_TYPELOAD (array_class);
10301 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10306 case CEE_LDELEM_I1:
10307 case CEE_LDELEM_U1:
10308 case CEE_LDELEM_I2:
10309 case CEE_LDELEM_U2:
10310 case CEE_LDELEM_I4:
10311 case CEE_LDELEM_U4:
10312 case CEE_LDELEM_I8:
10314 case CEE_LDELEM_R4:
10315 case CEE_LDELEM_R8:
10316 case CEE_LDELEM_REF: {
10322 if (*ip == CEE_LDELEM) {
10324 token = read32 (ip + 1);
10325 klass = mini_get_class (method, token, generic_context);
10326 CHECK_TYPELOAD (klass);
10327 mono_class_init (klass);
10330 klass = array_access_to_klass (*ip);
10332 if (sp [0]->type != STACK_OBJ)
10335 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10337 if (mini_is_gsharedvt_klass (cfg, klass)) {
10338 // FIXME-VT: OP_ICONST optimization
10339 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10340 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10341 ins->opcode = OP_LOADV_MEMBASE;
10342 } else if (sp [1]->opcode == OP_ICONST) {
10343 int array_reg = sp [0]->dreg;
10344 int index_reg = sp [1]->dreg;
10345 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
10347 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
10348 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
10350 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10351 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10354 if (*ip == CEE_LDELEM)
10361 case CEE_STELEM_I1:
10362 case CEE_STELEM_I2:
10363 case CEE_STELEM_I4:
10364 case CEE_STELEM_I8:
10365 case CEE_STELEM_R4:
10366 case CEE_STELEM_R8:
10367 case CEE_STELEM_REF:
10372 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10374 if (*ip == CEE_STELEM) {
10376 token = read32 (ip + 1);
10377 klass = mini_get_class (method, token, generic_context);
10378 CHECK_TYPELOAD (klass);
10379 mono_class_init (klass);
10382 klass = array_access_to_klass (*ip);
10384 if (sp [0]->type != STACK_OBJ)
10387 emit_array_store (cfg, klass, sp, TRUE);
10389 if (*ip == CEE_STELEM)
10396 case CEE_CKFINITE: {
10400 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
10401 ins->sreg1 = sp [0]->dreg;
10402 ins->dreg = alloc_freg (cfg);
10403 ins->type = STACK_R8;
10404 MONO_ADD_INS (bblock, ins);
10406 *sp++ = mono_decompose_opcode (cfg, ins);
10411 case CEE_REFANYVAL: {
10412 MonoInst *src_var, *src;
10414 int klass_reg = alloc_preg (cfg);
10415 int dreg = alloc_preg (cfg);
10417 GSHAREDVT_FAILURE (*ip);
10420 MONO_INST_NEW (cfg, ins, *ip);
10423 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
10424 CHECK_TYPELOAD (klass);
10425 mono_class_init (klass);
10427 context_used = mini_class_check_context_used (cfg, klass);
10430 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10432 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
10433 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
10434 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
10436 if (context_used) {
10437 MonoInst *klass_ins;
10439 klass_ins = emit_get_rgctx_klass (cfg, context_used,
10440 klass, MONO_RGCTX_INFO_KLASS);
10443 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
10444 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
10446 mini_emit_class_check (cfg, klass_reg, klass);
10448 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
10449 ins->type = STACK_MP;
10454 case CEE_MKREFANY: {
10455 MonoInst *loc, *addr;
10457 GSHAREDVT_FAILURE (*ip);
10460 MONO_INST_NEW (cfg, ins, *ip);
10463 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
10464 CHECK_TYPELOAD (klass);
10465 mono_class_init (klass);
10467 context_used = mini_class_check_context_used (cfg, klass);
10469 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
10470 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
10472 if (context_used) {
10473 MonoInst *const_ins;
10474 int type_reg = alloc_preg (cfg);
10476 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
10477 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
10478 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
10479 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
10480 } else if (cfg->compile_aot) {
10481 int const_reg = alloc_preg (cfg);
10482 int type_reg = alloc_preg (cfg);
10484 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
10485 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
10486 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
10487 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
10489 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
10490 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
10492 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
10494 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
10495 ins->type = STACK_VTYPE;
10496 ins->klass = mono_defaults.typed_reference_class;
10501 case CEE_LDTOKEN: {
10503 MonoClass *handle_class;
10505 CHECK_STACK_OVF (1);
10508 n = read32 (ip + 1);
10510 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
10511 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
10512 handle = mono_method_get_wrapper_data (method, n);
10513 handle_class = mono_method_get_wrapper_data (method, n + 1);
10514 if (handle_class == mono_defaults.typehandle_class)
10515 handle = &((MonoClass*)handle)->byval_arg;
10518 handle = mono_ldtoken (image, n, &handle_class, generic_context);
10522 mono_class_init (handle_class);
10523 if (cfg->generic_sharing_context) {
10524 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
10525 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
10526 /* This case handles ldtoken
10527 of an open type, like for
10530 } else if (handle_class == mono_defaults.typehandle_class) {
10531 /* If we get a MONO_TYPE_CLASS
10532 then we need to provide the
10534 instantiation of it. */
10535 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
10538 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type (handle));
10539 } else if (handle_class == mono_defaults.fieldhandle_class)
10540 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
10541 else if (handle_class == mono_defaults.methodhandle_class)
10542 context_used = mini_method_check_context_used (cfg, handle);
10544 g_assert_not_reached ();
10547 if ((cfg->opt & MONO_OPT_SHARED) &&
10548 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
10549 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
10550 MonoInst *addr, *vtvar, *iargs [3];
10551 int method_context_used;
10553 method_context_used = mini_method_check_context_used (cfg, method);
10555 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
10557 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10558 EMIT_NEW_ICONST (cfg, iargs [1], n);
10559 if (method_context_used) {
10560 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
10561 method, MONO_RGCTX_INFO_METHOD);
10562 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
10564 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
10565 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
10567 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
10569 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
10571 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
10573 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
10574 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
10575 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
10576 (cmethod->klass == mono_defaults.monotype_class->parent) &&
10577 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
10578 MonoClass *tclass = mono_class_from_mono_type (handle);
10580 mono_class_init (tclass);
10581 if (context_used) {
10582 ins = emit_get_rgctx_klass (cfg, context_used,
10583 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
10584 } else if (cfg->compile_aot) {
10585 if (method->wrapper_type) {
10586 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
10587 /* Special case for static synchronized wrappers */
10588 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
10590 /* FIXME: n is not a normal token */
10591 cfg->disable_aot = TRUE;
10592 EMIT_NEW_PCONST (cfg, ins, NULL);
10595 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
10598 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
10600 ins->type = STACK_OBJ;
10601 ins->klass = cmethod->klass;
10604 MonoInst *addr, *vtvar;
10606 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
10608 if (context_used) {
10609 if (handle_class == mono_defaults.typehandle_class) {
10610 ins = emit_get_rgctx_klass (cfg, context_used,
10611 mono_class_from_mono_type (handle),
10612 MONO_RGCTX_INFO_TYPE);
10613 } else if (handle_class == mono_defaults.methodhandle_class) {
10614 ins = emit_get_rgctx_method (cfg, context_used,
10615 handle, MONO_RGCTX_INFO_METHOD);
10616 } else if (handle_class == mono_defaults.fieldhandle_class) {
10617 ins = emit_get_rgctx_field (cfg, context_used,
10618 handle, MONO_RGCTX_INFO_CLASS_FIELD);
10620 g_assert_not_reached ();
10622 } else if (cfg->compile_aot) {
10623 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
10625 EMIT_NEW_PCONST (cfg, ins, handle);
10627 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
10628 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
10629 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
10639 MONO_INST_NEW (cfg, ins, OP_THROW);
10641 ins->sreg1 = sp [0]->dreg;
10643 bblock->out_of_line = TRUE;
10644 MONO_ADD_INS (bblock, ins);
10645 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
10646 MONO_ADD_INS (bblock, ins);
10649 link_bblock (cfg, bblock, end_bblock);
10650 start_new_bblock = 1;
10652 case CEE_ENDFINALLY:
10653 /* mono_save_seq_point_info () depends on this */
10654 if (sp != stack_start)
10655 emit_seq_point (cfg, method, ip, FALSE);
10656 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
10657 MONO_ADD_INS (bblock, ins);
10659 start_new_bblock = 1;
10662 * Control will leave the method so empty the stack, otherwise
10663 * the next basic block will start with a nonempty stack.
10665 while (sp != stack_start) {
10670 case CEE_LEAVE_S: {
10673 if (*ip == CEE_LEAVE) {
10675 target = ip + 5 + (gint32)read32(ip + 1);
10678 target = ip + 2 + (signed char)(ip [1]);
10681 /* empty the stack */
10682 while (sp != stack_start) {
10687 * If this leave statement is in a catch block, check for a
10688 * pending exception, and rethrow it if necessary.
10689 * We avoid doing this in runtime invoke wrappers, since those are called
10690 * by native code which excepts the wrapper to catch all exceptions.
10692 for (i = 0; i < header->num_clauses; ++i) {
10693 MonoExceptionClause *clause = &header->clauses [i];
10696 * Use <= in the final comparison to handle clauses with multiple
10697 * leave statements, like in bug #78024.
10698 * The ordering of the exception clauses guarantees that we find the
10699 * innermost clause.
10701 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
10703 MonoBasicBlock *dont_throw;
10708 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
10711 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
10713 NEW_BBLOCK (cfg, dont_throw);
10716 * Currently, we always rethrow the abort exception, despite the
10717 * fact that this is not correct. See thread6.cs for an example.
10718 * But propagating the abort exception is more important than
10719 * getting the sematics right.
10721 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
10722 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
10723 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
10725 MONO_START_BB (cfg, dont_throw);
10730 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
10732 MonoExceptionClause *clause;
10734 for (tmp = handlers; tmp; tmp = tmp->next) {
10735 clause = tmp->data;
10736 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
10738 link_bblock (cfg, bblock, tblock);
10739 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
10740 ins->inst_target_bb = tblock;
10741 ins->inst_eh_block = clause;
10742 MONO_ADD_INS (bblock, ins);
10743 bblock->has_call_handler = 1;
10744 if (COMPILE_LLVM (cfg)) {
10745 MonoBasicBlock *target_bb;
10748 * Link the finally bblock with the target, since it will
10749 * conceptually branch there.
10750 * FIXME: Have to link the bblock containing the endfinally.
10752 GET_BBLOCK (cfg, target_bb, target);
10753 link_bblock (cfg, tblock, target_bb);
10756 g_list_free (handlers);
10759 MONO_INST_NEW (cfg, ins, OP_BR);
10760 MONO_ADD_INS (bblock, ins);
10761 GET_BBLOCK (cfg, tblock, target);
10762 link_bblock (cfg, bblock, tblock);
10763 ins->inst_target_bb = tblock;
10764 start_new_bblock = 1;
10766 if (*ip == CEE_LEAVE)
10775 * Mono specific opcodes
10777 case MONO_CUSTOM_PREFIX: {
10779 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10783 case CEE_MONO_ICALL: {
10785 MonoJitICallInfo *info;
10787 token = read32 (ip + 2);
10788 func = mono_method_get_wrapper_data (method, token);
10789 info = mono_find_jit_icall_by_addr (func);
10791 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
10794 CHECK_STACK (info->sig->param_count);
10795 sp -= info->sig->param_count;
10797 ins = mono_emit_jit_icall (cfg, info->func, sp);
10798 if (!MONO_TYPE_IS_VOID (info->sig->ret))
10802 inline_costs += 10 * num_calls++;
10806 case CEE_MONO_LDPTR: {
10809 CHECK_STACK_OVF (1);
10811 token = read32 (ip + 2);
10813 ptr = mono_method_get_wrapper_data (method, token);
10814 /* FIXME: Generalize this */
10815 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
10816 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
10821 EMIT_NEW_PCONST (cfg, ins, ptr);
10824 inline_costs += 10 * num_calls++;
10825 /* Can't embed random pointers into AOT code */
10826 cfg->disable_aot = 1;
10829 case CEE_MONO_JIT_ICALL_ADDR: {
10830 MonoJitICallInfo *callinfo;
10833 CHECK_STACK_OVF (1);
10835 token = read32 (ip + 2);
10837 ptr = mono_method_get_wrapper_data (method, token);
10838 callinfo = mono_find_jit_icall_by_addr (ptr);
10839 g_assert (callinfo);
10840 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
10843 inline_costs += 10 * num_calls++;
10846 case CEE_MONO_ICALL_ADDR: {
10847 MonoMethod *cmethod;
10850 CHECK_STACK_OVF (1);
10852 token = read32 (ip + 2);
10854 cmethod = mono_method_get_wrapper_data (method, token);
10856 if (cfg->compile_aot) {
10857 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
10859 ptr = mono_lookup_internal_call (cmethod);
10861 EMIT_NEW_PCONST (cfg, ins, ptr);
10867 case CEE_MONO_VTADDR: {
10868 MonoInst *src_var, *src;
10874 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10875 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
10880 case CEE_MONO_NEWOBJ: {
10881 MonoInst *iargs [2];
10883 CHECK_STACK_OVF (1);
10885 token = read32 (ip + 2);
10886 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
10887 mono_class_init (klass);
10888 NEW_DOMAINCONST (cfg, iargs [0]);
10889 MONO_ADD_INS (cfg->cbb, iargs [0]);
10890 NEW_CLASSCONST (cfg, iargs [1], klass);
10891 MONO_ADD_INS (cfg->cbb, iargs [1]);
10892 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
10894 inline_costs += 10 * num_calls++;
10897 case CEE_MONO_OBJADDR:
10900 MONO_INST_NEW (cfg, ins, OP_MOVE);
10901 ins->dreg = alloc_ireg_mp (cfg);
10902 ins->sreg1 = sp [0]->dreg;
10903 ins->type = STACK_MP;
10904 MONO_ADD_INS (cfg->cbb, ins);
10908 case CEE_MONO_LDNATIVEOBJ:
10910 * Similar to LDOBJ, but instead load the unmanaged
10911 * representation of the vtype to the stack.
10916 token = read32 (ip + 2);
10917 klass = mono_method_get_wrapper_data (method, token);
10918 g_assert (klass->valuetype);
10919 mono_class_init (klass);
10922 MonoInst *src, *dest, *temp;
10925 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
10926 temp->backend.is_pinvoke = 1;
10927 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
10928 mini_emit_stobj (cfg, dest, src, klass, TRUE);
10930 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
10931 dest->type = STACK_VTYPE;
10932 dest->klass = klass;
10938 case CEE_MONO_RETOBJ: {
10940 * Same as RET, but return the native representation of a vtype
10943 g_assert (cfg->ret);
10944 g_assert (mono_method_signature (method)->pinvoke);
10949 token = read32 (ip + 2);
10950 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
10952 if (!cfg->vret_addr) {
10953 g_assert (cfg->ret_var_is_local);
10955 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
10957 EMIT_NEW_RETLOADA (cfg, ins);
10959 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
10961 if (sp != stack_start)
10964 MONO_INST_NEW (cfg, ins, OP_BR);
10965 ins->inst_target_bb = end_bblock;
10966 MONO_ADD_INS (bblock, ins);
10967 link_bblock (cfg, bblock, end_bblock);
10968 start_new_bblock = 1;
10972 case CEE_MONO_CISINST:
10973 case CEE_MONO_CCASTCLASS: {
10978 token = read32 (ip + 2);
10979 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
10980 if (ip [1] == CEE_MONO_CISINST)
10981 ins = handle_cisinst (cfg, klass, sp [0]);
10983 ins = handle_ccastclass (cfg, klass, sp [0]);
10989 case CEE_MONO_SAVE_LMF:
10990 case CEE_MONO_RESTORE_LMF:
10991 #ifdef MONO_ARCH_HAVE_LMF_OPS
10992 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
10993 MONO_ADD_INS (bblock, ins);
10994 cfg->need_lmf_area = TRUE;
10998 case CEE_MONO_CLASSCONST:
10999 CHECK_STACK_OVF (1);
11001 token = read32 (ip + 2);
11002 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
11005 inline_costs += 10 * num_calls++;
11007 case CEE_MONO_NOT_TAKEN:
11008 bblock->out_of_line = TRUE;
11012 CHECK_STACK_OVF (1);
11014 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
11015 ins->dreg = alloc_preg (cfg);
11016 ins->inst_offset = (gint32)read32 (ip + 2);
11017 ins->type = STACK_PTR;
11018 MONO_ADD_INS (bblock, ins);
11022 case CEE_MONO_DYN_CALL: {
11023 MonoCallInst *call;
11025 /* It would be easier to call a trampoline, but that would put an
11026 * extra frame on the stack, confusing exception handling. So
11027 * implement it inline using an opcode for now.
11030 if (!cfg->dyn_call_var) {
11031 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11032 /* prevent it from being register allocated */
11033 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
11036 /* Has to use a call inst since it local regalloc expects it */
11037 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
11038 ins = (MonoInst*)call;
11040 ins->sreg1 = sp [0]->dreg;
11041 ins->sreg2 = sp [1]->dreg;
11042 MONO_ADD_INS (bblock, ins);
11044 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
11045 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
11049 inline_costs += 10 * num_calls++;
11053 case CEE_MONO_MEMORY_BARRIER: {
11055 emit_memory_barrier (cfg, (int)read32 (ip + 1));
11059 case CEE_MONO_JIT_ATTACH: {
11060 MonoInst *args [16];
11061 MonoInst *ad_ins, *lmf_ins;
11062 MonoBasicBlock *next_bb = NULL;
11064 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11066 EMIT_NEW_PCONST (cfg, ins, NULL);
11067 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11073 ad_ins = mono_get_domain_intrinsic (cfg);
11074 lmf_ins = mono_get_lmf_intrinsic (cfg);
11077 if (MONO_ARCH_HAVE_TLS_GET && ad_ins && lmf_ins) {
11078 NEW_BBLOCK (cfg, next_bb);
11080 MONO_ADD_INS (cfg->cbb, ad_ins);
11081 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ad_ins->dreg, 0);
11082 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
11084 MONO_ADD_INS (cfg->cbb, lmf_ins);
11085 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, lmf_ins->dreg, 0);
11086 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
11089 if (cfg->compile_aot) {
11090 /* AOT code is only used in the root domain */
11091 EMIT_NEW_PCONST (cfg, args [0], NULL);
11093 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
11095 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
11096 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11099 MONO_START_BB (cfg, next_bb);
11105 case CEE_MONO_JIT_DETACH: {
11106 MonoInst *args [16];
11108 /* Restore the original domain */
11109 dreg = alloc_ireg (cfg);
11110 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
11111 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
11116 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
11122 case CEE_PREFIX1: {
11125 case CEE_ARGLIST: {
11126 /* somewhat similar to LDTOKEN */
11127 MonoInst *addr, *vtvar;
11128 CHECK_STACK_OVF (1);
11129 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
11131 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11132 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
11134 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11135 ins->type = STACK_VTYPE;
11136 ins->klass = mono_defaults.argumenthandle_class;
11149 * The following transforms:
11150 * CEE_CEQ into OP_CEQ
11151 * CEE_CGT into OP_CGT
11152 * CEE_CGT_UN into OP_CGT_UN
11153 * CEE_CLT into OP_CLT
11154 * CEE_CLT_UN into OP_CLT_UN
11156 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
11158 MONO_INST_NEW (cfg, ins, cmp->opcode);
11160 cmp->sreg1 = sp [0]->dreg;
11161 cmp->sreg2 = sp [1]->dreg;
11162 type_from_op (cmp, sp [0], sp [1]);
11164 if ((sp [0]->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
11165 cmp->opcode = OP_LCOMPARE;
11166 else if (sp [0]->type == STACK_R8)
11167 cmp->opcode = OP_FCOMPARE;
11169 cmp->opcode = OP_ICOMPARE;
11170 MONO_ADD_INS (bblock, cmp);
11171 ins->type = STACK_I4;
11172 ins->dreg = alloc_dreg (cfg, ins->type);
11173 type_from_op (ins, sp [0], sp [1]);
11175 if (cmp->opcode == OP_FCOMPARE) {
11177 * The backends expect the fceq opcodes to do the
11180 cmp->opcode = OP_NOP;
11181 ins->sreg1 = cmp->sreg1;
11182 ins->sreg2 = cmp->sreg2;
11184 MONO_ADD_INS (bblock, ins);
11190 MonoInst *argconst;
11191 MonoMethod *cil_method;
11193 CHECK_STACK_OVF (1);
11195 n = read32 (ip + 2);
11196 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
11197 if (!cmethod || mono_loader_get_last_error ())
11199 mono_class_init (cmethod->klass);
11201 mono_save_token_info (cfg, image, n, cmethod);
11203 context_used = mini_method_check_context_used (cfg, cmethod);
11205 cil_method = cmethod;
11206 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
11207 METHOD_ACCESS_FAILURE;
11209 if (mono_security_cas_enabled ()) {
11210 if (check_linkdemand (cfg, method, cmethod))
11211 INLINE_FAILURE ("linkdemand");
11212 CHECK_CFG_EXCEPTION;
11213 } else if (mono_security_core_clr_enabled ()) {
11214 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
11218 * Optimize the common case of ldftn+delegate creation
11220 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
11221 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
11222 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
11223 MonoInst *target_ins;
11224 MonoMethod *invoke;
11225 int invoke_context_used;
11227 invoke = mono_get_delegate_invoke (ctor_method->klass);
11228 if (!invoke || !mono_method_signature (invoke))
11231 invoke_context_used = mini_method_check_context_used (cfg, invoke);
11233 target_ins = sp [-1];
11235 if (mono_security_core_clr_enabled ())
11236 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
11238 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
11239 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
11240 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
11241 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
11242 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
11246 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
11247 /* FIXME: SGEN support */
11248 if (invoke_context_used == 0) {
11250 if (cfg->verbose_level > 3)
11251 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
11253 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
11254 CHECK_CFG_EXCEPTION;
11263 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
11264 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
11268 inline_costs += 10 * num_calls++;
11271 case CEE_LDVIRTFTN: {
11272 MonoInst *args [2];
11276 n = read32 (ip + 2);
11277 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
11278 if (!cmethod || mono_loader_get_last_error ())
11280 mono_class_init (cmethod->klass);
11282 context_used = mini_method_check_context_used (cfg, cmethod);
11284 if (mono_security_cas_enabled ()) {
11285 if (check_linkdemand (cfg, method, cmethod))
11286 INLINE_FAILURE ("linkdemand");
11287 CHECK_CFG_EXCEPTION;
11288 } else if (mono_security_core_clr_enabled ()) {
11289 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
11295 args [1] = emit_get_rgctx_method (cfg, context_used,
11296 cmethod, MONO_RGCTX_INFO_METHOD);
11299 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
11301 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
11304 inline_costs += 10 * num_calls++;
11308 CHECK_STACK_OVF (1);
11310 n = read16 (ip + 2);
11312 EMIT_NEW_ARGLOAD (cfg, ins, n);
11317 CHECK_STACK_OVF (1);
11319 n = read16 (ip + 2);
11321 NEW_ARGLOADA (cfg, ins, n);
11322 MONO_ADD_INS (cfg->cbb, ins);
11330 n = read16 (ip + 2);
11332 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
11334 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
11338 CHECK_STACK_OVF (1);
11340 n = read16 (ip + 2);
11342 EMIT_NEW_LOCLOAD (cfg, ins, n);
11347 unsigned char *tmp_ip;
11348 CHECK_STACK_OVF (1);
11350 n = read16 (ip + 2);
11353 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
11359 EMIT_NEW_LOCLOADA (cfg, ins, n);
11368 n = read16 (ip + 2);
11370 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
11372 emit_stloc_ir (cfg, sp, header, n);
11379 if (sp != stack_start)
11381 if (cfg->method != method)
11383 * Inlining this into a loop in a parent could lead to
11384 * stack overflows which is different behavior than the
11385 * non-inlined case, thus disable inlining in this case.
11387 goto inline_failure;
11389 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
11390 ins->dreg = alloc_preg (cfg);
11391 ins->sreg1 = sp [0]->dreg;
11392 ins->type = STACK_PTR;
11393 MONO_ADD_INS (cfg->cbb, ins);
11395 cfg->flags |= MONO_CFG_HAS_ALLOCA;
11397 ins->flags |= MONO_INST_INIT;
11402 case CEE_ENDFILTER: {
11403 MonoExceptionClause *clause, *nearest;
11404 int cc, nearest_num;
11408 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
11410 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
11411 ins->sreg1 = (*sp)->dreg;
11412 MONO_ADD_INS (bblock, ins);
11413 start_new_bblock = 1;
11418 for (cc = 0; cc < header->num_clauses; ++cc) {
11419 clause = &header->clauses [cc];
11420 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
11421 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
11422 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
11427 g_assert (nearest);
11428 if ((ip - header->code) != nearest->handler_offset)
11433 case CEE_UNALIGNED_:
11434 ins_flag |= MONO_INST_UNALIGNED;
11435 /* FIXME: record alignment? we can assume 1 for now */
11439 case CEE_VOLATILE_:
11440 ins_flag |= MONO_INST_VOLATILE;
11444 ins_flag |= MONO_INST_TAILCALL;
11445 cfg->flags |= MONO_CFG_HAS_TAIL;
11446 /* Can't inline tail calls at this time */
11447 inline_costs += 100000;
11454 token = read32 (ip + 2);
11455 klass = mini_get_class (method, token, generic_context);
11456 CHECK_TYPELOAD (klass);
11457 if (generic_class_is_reference_type (cfg, klass))
11458 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
11460 mini_emit_initobj (cfg, *sp, NULL, klass);
11464 case CEE_CONSTRAINED_:
11466 token = read32 (ip + 2);
11467 constrained_call = mini_get_class (method, token, generic_context);
11468 CHECK_TYPELOAD (constrained_call);
11472 case CEE_INITBLK: {
11473 MonoInst *iargs [3];
11477 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
11478 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
11479 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
11480 /* emit_memset only works when val == 0 */
11481 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
11483 iargs [0] = sp [0];
11484 iargs [1] = sp [1];
11485 iargs [2] = sp [2];
11486 if (ip [1] == CEE_CPBLK) {
11487 MonoMethod *memcpy_method = get_memcpy_method ();
11488 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11490 MonoMethod *memset_method = get_memset_method ();
11491 mono_emit_method_call (cfg, memset_method, iargs, NULL);
11501 ins_flag |= MONO_INST_NOTYPECHECK;
11503 ins_flag |= MONO_INST_NORANGECHECK;
11504 /* we ignore the no-nullcheck for now since we
11505 * really do it explicitly only when doing callvirt->call
11509 case CEE_RETHROW: {
11511 int handler_offset = -1;
11513 for (i = 0; i < header->num_clauses; ++i) {
11514 MonoExceptionClause *clause = &header->clauses [i];
11515 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
11516 handler_offset = clause->handler_offset;
11521 bblock->flags |= BB_EXCEPTION_UNSAFE;
11523 g_assert (handler_offset != -1);
11525 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
11526 MONO_INST_NEW (cfg, ins, OP_RETHROW);
11527 ins->sreg1 = load->dreg;
11528 MONO_ADD_INS (bblock, ins);
11530 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11531 MONO_ADD_INS (bblock, ins);
11534 link_bblock (cfg, bblock, end_bblock);
11535 start_new_bblock = 1;
11543 GSHAREDVT_FAILURE (*ip);
11545 CHECK_STACK_OVF (1);
11547 token = read32 (ip + 2);
11548 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic && !generic_context) {
11549 MonoType *type = mono_type_create_from_typespec (image, token);
11550 val = mono_type_size (type, &ialign);
11552 MonoClass *klass = mono_class_get_full (image, token, generic_context);
11553 CHECK_TYPELOAD (klass);
11554 mono_class_init (klass);
11555 val = mono_type_size (&klass->byval_arg, &ialign);
11557 EMIT_NEW_ICONST (cfg, ins, val);
11562 case CEE_REFANYTYPE: {
11563 MonoInst *src_var, *src;
11565 GSHAREDVT_FAILURE (*ip);
11571 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11573 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11574 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11575 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
11580 case CEE_READONLY_:
11593 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
11603 g_warning ("opcode 0x%02x not handled", *ip);
11607 if (start_new_bblock != 1)
11610 bblock->cil_length = ip - bblock->cil_code;
11611 if (bblock->next_bb) {
11612 /* This could already be set because of inlining, #693905 */
11613 MonoBasicBlock *bb = bblock;
11615 while (bb->next_bb)
11617 bb->next_bb = end_bblock;
11619 bblock->next_bb = end_bblock;
11622 if (cfg->method == method && cfg->domainvar) {
11624 MonoInst *get_domain;
11626 cfg->cbb = init_localsbb;
11628 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
11629 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
11632 get_domain->dreg = alloc_preg (cfg);
11633 MONO_ADD_INS (cfg->cbb, get_domain);
11635 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
11636 MONO_ADD_INS (cfg->cbb, store);
11639 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
11640 if (cfg->compile_aot)
11641 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
11642 mono_get_got_var (cfg);
11645 if (cfg->method == method && cfg->got_var)
11646 mono_emit_load_got_addr (cfg);
11651 cfg->cbb = init_localsbb;
11653 for (i = 0; i < header->num_locals; ++i) {
11654 MonoType *ptype = header->locals [i];
11655 int t = ptype->type;
11656 dreg = cfg->locals [i]->dreg;
11658 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
11659 t = mono_class_enum_basetype (ptype->data.klass)->type;
11660 if (ptype->byref) {
11661 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
11662 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
11663 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
11664 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
11665 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
11666 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
11667 MONO_INST_NEW (cfg, ins, OP_R8CONST);
11668 ins->type = STACK_R8;
11669 ins->inst_p0 = (void*)&r8_0;
11670 ins->dreg = alloc_dreg (cfg, STACK_R8);
11671 MONO_ADD_INS (init_localsbb, ins);
11672 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
11673 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
11674 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
11675 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
11676 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, ptype)) {
11677 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
11679 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
11684 if (cfg->init_ref_vars && cfg->method == method) {
11685 /* Emit initialization for ref vars */
11686 // FIXME: Avoid duplication initialization for IL locals.
11687 for (i = 0; i < cfg->num_varinfo; ++i) {
11688 MonoInst *ins = cfg->varinfo [i];
11690 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
11691 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
11696 MonoBasicBlock *bb;
11699 * Make seq points at backward branch targets interruptable.
11701 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
11702 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
11703 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
11706 /* Add a sequence point for method entry/exit events */
11708 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
11709 MONO_ADD_INS (init_localsbb, ins);
11710 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
11711 MONO_ADD_INS (cfg->bb_exit, ins);
11715 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
11716 * the code they refer to was dead (#11880).
11718 if (sym_seq_points) {
11719 for (i = 0; i < header->code_size; ++i) {
11720 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
11723 NEW_SEQ_POINT (cfg, ins, i, FALSE);
11724 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
11731 if (cfg->method == method) {
11732 MonoBasicBlock *bb;
11733 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
11734 bb->region = mono_find_block_region (cfg, bb->real_offset);
11736 mono_create_spvar_for_region (cfg, bb->region);
11737 if (cfg->verbose_level > 2)
11738 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
11742 g_slist_free (class_inits);
11743 dont_inline = g_list_remove (dont_inline, method);
11745 if (inline_costs < 0) {
11748 /* Method is too large */
11749 mname = mono_method_full_name (method, TRUE);
11750 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
11751 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
11753 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
11754 mono_basic_block_free (original_bb);
11758 if ((cfg->verbose_level > 2) && (cfg->method == method))
11759 mono_print_code (cfg, "AFTER METHOD-TO-IR");
11761 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
11762 mono_basic_block_free (original_bb);
11763 return inline_costs;
11766 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
11773 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
11777 set_exception_type_from_invalid_il (cfg, method, ip);
11781 g_slist_free (class_inits);
11782 mono_basic_block_free (original_bb);
11783 dont_inline = g_list_remove (dont_inline, method);
11784 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
11789 store_membase_reg_to_store_membase_imm (int opcode)
11792 case OP_STORE_MEMBASE_REG:
11793 return OP_STORE_MEMBASE_IMM;
11794 case OP_STOREI1_MEMBASE_REG:
11795 return OP_STOREI1_MEMBASE_IMM;
11796 case OP_STOREI2_MEMBASE_REG:
11797 return OP_STOREI2_MEMBASE_IMM;
11798 case OP_STOREI4_MEMBASE_REG:
11799 return OP_STOREI4_MEMBASE_IMM;
11800 case OP_STOREI8_MEMBASE_REG:
11801 return OP_STOREI8_MEMBASE_IMM;
11803 g_assert_not_reached ();
11810 mono_op_to_op_imm (int opcode)
11814 return OP_IADD_IMM;
11816 return OP_ISUB_IMM;
11818 return OP_IDIV_IMM;
11820 return OP_IDIV_UN_IMM;
11822 return OP_IREM_IMM;
11824 return OP_IREM_UN_IMM;
11826 return OP_IMUL_IMM;
11828 return OP_IAND_IMM;
11832 return OP_IXOR_IMM;
11834 return OP_ISHL_IMM;
11836 return OP_ISHR_IMM;
11838 return OP_ISHR_UN_IMM;
11841 return OP_LADD_IMM;
11843 return OP_LSUB_IMM;
11845 return OP_LAND_IMM;
11849 return OP_LXOR_IMM;
11851 return OP_LSHL_IMM;
11853 return OP_LSHR_IMM;
11855 return OP_LSHR_UN_IMM;
11858 return OP_COMPARE_IMM;
11860 return OP_ICOMPARE_IMM;
11862 return OP_LCOMPARE_IMM;
11864 case OP_STORE_MEMBASE_REG:
11865 return OP_STORE_MEMBASE_IMM;
11866 case OP_STOREI1_MEMBASE_REG:
11867 return OP_STOREI1_MEMBASE_IMM;
11868 case OP_STOREI2_MEMBASE_REG:
11869 return OP_STOREI2_MEMBASE_IMM;
11870 case OP_STOREI4_MEMBASE_REG:
11871 return OP_STOREI4_MEMBASE_IMM;
11873 #if defined(TARGET_X86) || defined (TARGET_AMD64)
11875 return OP_X86_PUSH_IMM;
11876 case OP_X86_COMPARE_MEMBASE_REG:
11877 return OP_X86_COMPARE_MEMBASE_IMM;
11879 #if defined(TARGET_AMD64)
11880 case OP_AMD64_ICOMPARE_MEMBASE_REG:
11881 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
11883 case OP_VOIDCALL_REG:
11884 return OP_VOIDCALL;
11892 return OP_LOCALLOC_IMM;
11899 ldind_to_load_membase (int opcode)
11903 return OP_LOADI1_MEMBASE;
11905 return OP_LOADU1_MEMBASE;
11907 return OP_LOADI2_MEMBASE;
11909 return OP_LOADU2_MEMBASE;
11911 return OP_LOADI4_MEMBASE;
11913 return OP_LOADU4_MEMBASE;
11915 return OP_LOAD_MEMBASE;
11916 case CEE_LDIND_REF:
11917 return OP_LOAD_MEMBASE;
11919 return OP_LOADI8_MEMBASE;
11921 return OP_LOADR4_MEMBASE;
11923 return OP_LOADR8_MEMBASE;
11925 g_assert_not_reached ();
11932 stind_to_store_membase (int opcode)
11936 return OP_STOREI1_MEMBASE_REG;
11938 return OP_STOREI2_MEMBASE_REG;
11940 return OP_STOREI4_MEMBASE_REG;
11942 case CEE_STIND_REF:
11943 return OP_STORE_MEMBASE_REG;
11945 return OP_STOREI8_MEMBASE_REG;
11947 return OP_STORER4_MEMBASE_REG;
11949 return OP_STORER8_MEMBASE_REG;
11951 g_assert_not_reached ();
11958 mono_load_membase_to_load_mem (int opcode)
11960 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
11961 #if defined(TARGET_X86) || defined(TARGET_AMD64)
11963 case OP_LOAD_MEMBASE:
11964 return OP_LOAD_MEM;
11965 case OP_LOADU1_MEMBASE:
11966 return OP_LOADU1_MEM;
11967 case OP_LOADU2_MEMBASE:
11968 return OP_LOADU2_MEM;
11969 case OP_LOADI4_MEMBASE:
11970 return OP_LOADI4_MEM;
11971 case OP_LOADU4_MEMBASE:
11972 return OP_LOADU4_MEM;
11973 #if SIZEOF_REGISTER == 8
11974 case OP_LOADI8_MEMBASE:
11975 return OP_LOADI8_MEM;
11984 op_to_op_dest_membase (int store_opcode, int opcode)
11986 #if defined(TARGET_X86)
11987 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
11992 return OP_X86_ADD_MEMBASE_REG;
11994 return OP_X86_SUB_MEMBASE_REG;
11996 return OP_X86_AND_MEMBASE_REG;
11998 return OP_X86_OR_MEMBASE_REG;
12000 return OP_X86_XOR_MEMBASE_REG;
12003 return OP_X86_ADD_MEMBASE_IMM;
12006 return OP_X86_SUB_MEMBASE_IMM;
12009 return OP_X86_AND_MEMBASE_IMM;
12012 return OP_X86_OR_MEMBASE_IMM;
12015 return OP_X86_XOR_MEMBASE_IMM;
12021 #if defined(TARGET_AMD64)
12022 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
12027 return OP_X86_ADD_MEMBASE_REG;
12029 return OP_X86_SUB_MEMBASE_REG;
12031 return OP_X86_AND_MEMBASE_REG;
12033 return OP_X86_OR_MEMBASE_REG;
12035 return OP_X86_XOR_MEMBASE_REG;
12037 return OP_X86_ADD_MEMBASE_IMM;
12039 return OP_X86_SUB_MEMBASE_IMM;
12041 return OP_X86_AND_MEMBASE_IMM;
12043 return OP_X86_OR_MEMBASE_IMM;
12045 return OP_X86_XOR_MEMBASE_IMM;
12047 return OP_AMD64_ADD_MEMBASE_REG;
12049 return OP_AMD64_SUB_MEMBASE_REG;
12051 return OP_AMD64_AND_MEMBASE_REG;
12053 return OP_AMD64_OR_MEMBASE_REG;
12055 return OP_AMD64_XOR_MEMBASE_REG;
12058 return OP_AMD64_ADD_MEMBASE_IMM;
12061 return OP_AMD64_SUB_MEMBASE_IMM;
12064 return OP_AMD64_AND_MEMBASE_IMM;
12067 return OP_AMD64_OR_MEMBASE_IMM;
12070 return OP_AMD64_XOR_MEMBASE_IMM;
12080 op_to_op_store_membase (int store_opcode, int opcode)
12082 #if defined(TARGET_X86) || defined(TARGET_AMD64)
12085 if (store_opcode == OP_STOREI1_MEMBASE_REG)
12086 return OP_X86_SETEQ_MEMBASE;
12088 if (store_opcode == OP_STOREI1_MEMBASE_REG)
12089 return OP_X86_SETNE_MEMBASE;
12097 op_to_op_src1_membase (int load_opcode, int opcode)
12100 /* FIXME: This has sign extension issues */
12102 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
12103 return OP_X86_COMPARE_MEMBASE8_IMM;
12106 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
12111 return OP_X86_PUSH_MEMBASE;
12112 case OP_COMPARE_IMM:
12113 case OP_ICOMPARE_IMM:
12114 return OP_X86_COMPARE_MEMBASE_IMM;
12117 return OP_X86_COMPARE_MEMBASE_REG;
12121 #ifdef TARGET_AMD64
12122 /* FIXME: This has sign extension issues */
12124 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
12125 return OP_X86_COMPARE_MEMBASE8_IMM;
12130 #ifdef __mono_ilp32__
12131 if (load_opcode == OP_LOADI8_MEMBASE)
12133 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12135 return OP_X86_PUSH_MEMBASE;
12137 /* FIXME: This only works for 32 bit immediates
12138 case OP_COMPARE_IMM:
12139 case OP_LCOMPARE_IMM:
12140 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12141 return OP_AMD64_COMPARE_MEMBASE_IMM;
12143 case OP_ICOMPARE_IMM:
12144 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
12145 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
12149 #ifdef __mono_ilp32__
12150 if (load_opcode == OP_LOAD_MEMBASE)
12151 return OP_AMD64_ICOMPARE_MEMBASE_REG;
12152 if (load_opcode == OP_LOADI8_MEMBASE)
12154 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12156 return OP_AMD64_COMPARE_MEMBASE_REG;
12159 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
12160 return OP_AMD64_ICOMPARE_MEMBASE_REG;
12169 op_to_op_src2_membase (int load_opcode, int opcode)
12172 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
12178 return OP_X86_COMPARE_REG_MEMBASE;
12180 return OP_X86_ADD_REG_MEMBASE;
12182 return OP_X86_SUB_REG_MEMBASE;
12184 return OP_X86_AND_REG_MEMBASE;
12186 return OP_X86_OR_REG_MEMBASE;
12188 return OP_X86_XOR_REG_MEMBASE;
12192 #ifdef TARGET_AMD64
12193 #ifdef __mono_ilp32__
12194 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
12196 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
12200 return OP_AMD64_ICOMPARE_REG_MEMBASE;
12202 return OP_X86_ADD_REG_MEMBASE;
12204 return OP_X86_SUB_REG_MEMBASE;
12206 return OP_X86_AND_REG_MEMBASE;
12208 return OP_X86_OR_REG_MEMBASE;
12210 return OP_X86_XOR_REG_MEMBASE;
12212 #ifdef __mono_ilp32__
12213 } else if (load_opcode == OP_LOADI8_MEMBASE) {
12215 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
12220 return OP_AMD64_COMPARE_REG_MEMBASE;
12222 return OP_AMD64_ADD_REG_MEMBASE;
12224 return OP_AMD64_SUB_REG_MEMBASE;
12226 return OP_AMD64_AND_REG_MEMBASE;
12228 return OP_AMD64_OR_REG_MEMBASE;
12230 return OP_AMD64_XOR_REG_MEMBASE;
12239 mono_op_to_op_imm_noemul (int opcode)
12242 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
12248 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
12255 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
12260 return mono_op_to_op_imm (opcode);
12265 * mono_handle_global_vregs:
12267 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
12271 mono_handle_global_vregs (MonoCompile *cfg)
12273 gint32 *vreg_to_bb;
12274 MonoBasicBlock *bb;
12277 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
12279 #ifdef MONO_ARCH_SIMD_INTRINSICS
12280 if (cfg->uses_simd_intrinsics)
12281 mono_simd_simplify_indirection (cfg);
12284 /* Find local vregs used in more than one bb */
12285 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12286 MonoInst *ins = bb->code;
12287 int block_num = bb->block_num;
12289 if (cfg->verbose_level > 2)
12290 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
12293 for (; ins; ins = ins->next) {
12294 const char *spec = INS_INFO (ins->opcode);
12295 int regtype = 0, regindex;
12298 if (G_UNLIKELY (cfg->verbose_level > 2))
12299 mono_print_ins (ins);
12301 g_assert (ins->opcode >= MONO_CEE_LAST);
12303 for (regindex = 0; regindex < 4; regindex ++) {
12306 if (regindex == 0) {
12307 regtype = spec [MONO_INST_DEST];
12308 if (regtype == ' ')
12311 } else if (regindex == 1) {
12312 regtype = spec [MONO_INST_SRC1];
12313 if (regtype == ' ')
12316 } else if (regindex == 2) {
12317 regtype = spec [MONO_INST_SRC2];
12318 if (regtype == ' ')
12321 } else if (regindex == 3) {
12322 regtype = spec [MONO_INST_SRC3];
12323 if (regtype == ' ')
12328 #if SIZEOF_REGISTER == 4
12329 /* In the LLVM case, the long opcodes are not decomposed */
12330 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
12332 * Since some instructions reference the original long vreg,
12333 * and some reference the two component vregs, it is quite hard
12334 * to determine when it needs to be global. So be conservative.
12336 if (!get_vreg_to_inst (cfg, vreg)) {
12337 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
12339 if (cfg->verbose_level > 2)
12340 printf ("LONG VREG R%d made global.\n", vreg);
12344 * Make the component vregs volatile since the optimizations can
12345 * get confused otherwise.
12347 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
12348 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
12352 g_assert (vreg != -1);
12354 prev_bb = vreg_to_bb [vreg];
12355 if (prev_bb == 0) {
12356 /* 0 is a valid block num */
12357 vreg_to_bb [vreg] = block_num + 1;
12358 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
12359 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
12362 if (!get_vreg_to_inst (cfg, vreg)) {
12363 if (G_UNLIKELY (cfg->verbose_level > 2))
12364 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
12368 if (vreg_is_ref (cfg, vreg))
12369 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
12371 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
12374 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
12377 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
12380 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
12383 g_assert_not_reached ();
12387 /* Flag as having been used in more than one bb */
12388 vreg_to_bb [vreg] = -1;
12394 /* If a variable is used in only one bblock, convert it into a local vreg */
12395 for (i = 0; i < cfg->num_varinfo; i++) {
12396 MonoInst *var = cfg->varinfo [i];
12397 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
12399 switch (var->type) {
12405 #if SIZEOF_REGISTER == 8
12408 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
12409 /* Enabling this screws up the fp stack on x86 */
12412 /* Arguments are implicitly global */
12413 /* Putting R4 vars into registers doesn't work currently */
12414 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
12415 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var) {
12417 * Make that the variable's liveness interval doesn't contain a call, since
12418 * that would cause the lvreg to be spilled, making the whole optimization
12421 /* This is too slow for JIT compilation */
12423 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
12425 int def_index, call_index, ins_index;
12426 gboolean spilled = FALSE;
12431 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
12432 const char *spec = INS_INFO (ins->opcode);
12434 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
12435 def_index = ins_index;
12437 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
12438 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
12439 if (call_index > def_index) {
12445 if (MONO_IS_CALL (ins))
12446 call_index = ins_index;
12456 if (G_UNLIKELY (cfg->verbose_level > 2))
12457 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
12458 var->flags |= MONO_INST_IS_DEAD;
12459 cfg->vreg_to_inst [var->dreg] = NULL;
12466 * Compress the varinfo and vars tables so the liveness computation is faster and
12467 * takes up less space.
12470 for (i = 0; i < cfg->num_varinfo; ++i) {
12471 MonoInst *var = cfg->varinfo [i];
12472 if (pos < i && cfg->locals_start == i)
12473 cfg->locals_start = pos;
12474 if (!(var->flags & MONO_INST_IS_DEAD)) {
12476 cfg->varinfo [pos] = cfg->varinfo [i];
12477 cfg->varinfo [pos]->inst_c0 = pos;
12478 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
12479 cfg->vars [pos].idx = pos;
12480 #if SIZEOF_REGISTER == 4
12481 if (cfg->varinfo [pos]->type == STACK_I8) {
12482 /* Modify the two component vars too */
12485 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
12486 var1->inst_c0 = pos;
12487 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
12488 var1->inst_c0 = pos;
12495 cfg->num_varinfo = pos;
12496 if (cfg->locals_start > cfg->num_varinfo)
12497 cfg->locals_start = cfg->num_varinfo;
12501 * mono_spill_global_vars:
12503 * Generate spill code for variables which are not allocated to registers,
12504 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
12505 * code is generated which could be optimized by the local optimization passes.
12508 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
12510 MonoBasicBlock *bb;
12512 int orig_next_vreg;
12513 guint32 *vreg_to_lvreg;
12515 guint32 i, lvregs_len;
12516 gboolean dest_has_lvreg = FALSE;
12517 guint32 stacktypes [128];
12518 MonoInst **live_range_start, **live_range_end;
12519 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
12520 int *gsharedvt_vreg_to_idx = NULL;
12522 *need_local_opts = FALSE;
12524 memset (spec2, 0, sizeof (spec2));
12526 /* FIXME: Move this function to mini.c */
12527 stacktypes ['i'] = STACK_PTR;
12528 stacktypes ['l'] = STACK_I8;
12529 stacktypes ['f'] = STACK_R8;
12530 #ifdef MONO_ARCH_SIMD_INTRINSICS
12531 stacktypes ['x'] = STACK_VTYPE;
12534 #if SIZEOF_REGISTER == 4
12535 /* Create MonoInsts for longs */
12536 for (i = 0; i < cfg->num_varinfo; i++) {
12537 MonoInst *ins = cfg->varinfo [i];
12539 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
12540 switch (ins->type) {
12545 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
12548 g_assert (ins->opcode == OP_REGOFFSET);
12550 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
12552 tree->opcode = OP_REGOFFSET;
12553 tree->inst_basereg = ins->inst_basereg;
12554 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
12556 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
12558 tree->opcode = OP_REGOFFSET;
12559 tree->inst_basereg = ins->inst_basereg;
12560 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
12570 if (cfg->compute_gc_maps) {
12571 /* registers need liveness info even for !non refs */
12572 for (i = 0; i < cfg->num_varinfo; i++) {
12573 MonoInst *ins = cfg->varinfo [i];
12575 if (ins->opcode == OP_REGVAR)
12576 ins->flags |= MONO_INST_GC_TRACK;
12580 if (cfg->gsharedvt) {
12581 gsharedvt_vreg_to_idx = g_new0 (int, cfg->next_vreg);
12583 for (i = 0; i < cfg->num_varinfo; ++i) {
12584 MonoInst *ins = cfg->varinfo [i];
12587 if (mini_is_gsharedvt_variable_type (cfg, ins->inst_vtype)) {
12588 if (i >= cfg->locals_start) {
12590 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
12591 gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
12592 ins->opcode = OP_GSHAREDVT_LOCAL;
12593 ins->inst_imm = idx;
12596 gsharedvt_vreg_to_idx [ins->dreg] = -1;
12597 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
12603 /* FIXME: widening and truncation */
12606 * As an optimization, when a variable allocated to the stack is first loaded into
12607 * an lvreg, we will remember the lvreg and use it the next time instead of loading
12608 * the variable again.
12610 orig_next_vreg = cfg->next_vreg;
12611 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
12612 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
12616 * These arrays contain the first and last instructions accessing a given
12618 * Since we emit bblocks in the same order we process them here, and we
12619 * don't split live ranges, these will precisely describe the live range of
12620 * the variable, i.e. the instruction range where a valid value can be found
12621 * in the variables location.
12622 * The live range is computed using the liveness info computed by the liveness pass.
12623 * We can't use vmv->range, since that is an abstract live range, and we need
12624 * one which is instruction precise.
12625 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
12627 /* FIXME: Only do this if debugging info is requested */
12628 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
12629 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
12630 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
12631 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
12633 /* Add spill loads/stores */
12634 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12637 if (cfg->verbose_level > 2)
12638 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
12640 /* Clear vreg_to_lvreg array */
12641 for (i = 0; i < lvregs_len; i++)
12642 vreg_to_lvreg [lvregs [i]] = 0;
12646 MONO_BB_FOR_EACH_INS (bb, ins) {
12647 const char *spec = INS_INFO (ins->opcode);
12648 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
12649 gboolean store, no_lvreg;
12650 int sregs [MONO_MAX_SRC_REGS];
12652 if (G_UNLIKELY (cfg->verbose_level > 2))
12653 mono_print_ins (ins);
12655 if (ins->opcode == OP_NOP)
12659 * We handle LDADDR here as well, since it can only be decomposed
12660 * when variable addresses are known.
12662 if (ins->opcode == OP_LDADDR) {
12663 MonoInst *var = ins->inst_p0;
12665 if (var->opcode == OP_VTARG_ADDR) {
12666 /* Happens on SPARC/S390 where vtypes are passed by reference */
12667 MonoInst *vtaddr = var->inst_left;
12668 if (vtaddr->opcode == OP_REGVAR) {
12669 ins->opcode = OP_MOVE;
12670 ins->sreg1 = vtaddr->dreg;
12672 else if (var->inst_left->opcode == OP_REGOFFSET) {
12673 ins->opcode = OP_LOAD_MEMBASE;
12674 ins->inst_basereg = vtaddr->inst_basereg;
12675 ins->inst_offset = vtaddr->inst_offset;
12678 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg] < 0) {
12679 /* gsharedvt arg passed by ref */
12680 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
12682 ins->opcode = OP_LOAD_MEMBASE;
12683 ins->inst_basereg = var->inst_basereg;
12684 ins->inst_offset = var->inst_offset;
12685 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg]) {
12686 MonoInst *load, *load2, *load3;
12687 int idx = gsharedvt_vreg_to_idx [var->dreg] - 1;
12688 int reg1, reg2, reg3;
12689 MonoInst *info_var = cfg->gsharedvt_info_var;
12690 MonoInst *locals_var = cfg->gsharedvt_locals_var;
12694 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
12697 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
12699 g_assert (info_var);
12700 g_assert (locals_var);
12702 /* Mark the instruction used to compute the locals var as used */
12703 cfg->gsharedvt_locals_var_ins = NULL;
12705 /* Load the offset */
12706 if (info_var->opcode == OP_REGOFFSET) {
12707 reg1 = alloc_ireg (cfg);
12708 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
12709 } else if (info_var->opcode == OP_REGVAR) {
12711 reg1 = info_var->dreg;
12713 g_assert_not_reached ();
12715 reg2 = alloc_ireg (cfg);
12716 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
12717 /* Load the locals area address */
12718 reg3 = alloc_ireg (cfg);
12719 if (locals_var->opcode == OP_REGOFFSET) {
12720 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
12721 } else if (locals_var->opcode == OP_REGVAR) {
12722 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
12724 g_assert_not_reached ();
12726 /* Compute the address */
12727 ins->opcode = OP_PADD;
12731 mono_bblock_insert_before_ins (bb, ins, load3);
12732 mono_bblock_insert_before_ins (bb, load3, load2);
12734 mono_bblock_insert_before_ins (bb, load2, load);
12736 g_assert (var->opcode == OP_REGOFFSET);
12738 ins->opcode = OP_ADD_IMM;
12739 ins->sreg1 = var->inst_basereg;
12740 ins->inst_imm = var->inst_offset;
12743 *need_local_opts = TRUE;
12744 spec = INS_INFO (ins->opcode);
12747 if (ins->opcode < MONO_CEE_LAST) {
12748 mono_print_ins (ins);
12749 g_assert_not_reached ();
12753 * Store opcodes have destbasereg in the dreg, but in reality, it is an
12757 if (MONO_IS_STORE_MEMBASE (ins)) {
12758 tmp_reg = ins->dreg;
12759 ins->dreg = ins->sreg2;
12760 ins->sreg2 = tmp_reg;
12763 spec2 [MONO_INST_DEST] = ' ';
12764 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
12765 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
12766 spec2 [MONO_INST_SRC3] = ' ';
12768 } else if (MONO_IS_STORE_MEMINDEX (ins))
12769 g_assert_not_reached ();
12774 if (G_UNLIKELY (cfg->verbose_level > 2)) {
12775 printf ("\t %.3s %d", spec, ins->dreg);
12776 num_sregs = mono_inst_get_src_registers (ins, sregs);
12777 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
12778 printf (" %d", sregs [srcindex]);
12785 regtype = spec [MONO_INST_DEST];
12786 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
12789 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
12790 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
12791 MonoInst *store_ins;
12793 MonoInst *def_ins = ins;
12794 int dreg = ins->dreg; /* The original vreg */
12796 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
12798 if (var->opcode == OP_REGVAR) {
12799 ins->dreg = var->dreg;
12800 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
12802 * Instead of emitting a load+store, use a _membase opcode.
12804 g_assert (var->opcode == OP_REGOFFSET);
12805 if (ins->opcode == OP_MOVE) {
12809 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
12810 ins->inst_basereg = var->inst_basereg;
12811 ins->inst_offset = var->inst_offset;
12814 spec = INS_INFO (ins->opcode);
12818 g_assert (var->opcode == OP_REGOFFSET);
12820 prev_dreg = ins->dreg;
12822 /* Invalidate any previous lvreg for this vreg */
12823 vreg_to_lvreg [ins->dreg] = 0;
12827 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
12829 store_opcode = OP_STOREI8_MEMBASE_REG;
12832 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
12834 #if SIZEOF_REGISTER != 8
12835 if (regtype == 'l') {
12836 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
12837 mono_bblock_insert_after_ins (bb, ins, store_ins);
12838 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
12839 mono_bblock_insert_after_ins (bb, ins, store_ins);
12840 def_ins = store_ins;
12845 g_assert (store_opcode != OP_STOREV_MEMBASE);
12847 /* Try to fuse the store into the instruction itself */
12848 /* FIXME: Add more instructions */
12849 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
12850 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
12851 ins->inst_imm = ins->inst_c0;
12852 ins->inst_destbasereg = var->inst_basereg;
12853 ins->inst_offset = var->inst_offset;
12854 spec = INS_INFO (ins->opcode);
12855 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
12856 ins->opcode = store_opcode;
12857 ins->inst_destbasereg = var->inst_basereg;
12858 ins->inst_offset = var->inst_offset;
12862 tmp_reg = ins->dreg;
12863 ins->dreg = ins->sreg2;
12864 ins->sreg2 = tmp_reg;
12867 spec2 [MONO_INST_DEST] = ' ';
12868 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
12869 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
12870 spec2 [MONO_INST_SRC3] = ' ';
12872 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
12873 // FIXME: The backends expect the base reg to be in inst_basereg
12874 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
12876 ins->inst_basereg = var->inst_basereg;
12877 ins->inst_offset = var->inst_offset;
12878 spec = INS_INFO (ins->opcode);
12880 /* printf ("INS: "); mono_print_ins (ins); */
12881 /* Create a store instruction */
12882 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
12884 /* Insert it after the instruction */
12885 mono_bblock_insert_after_ins (bb, ins, store_ins);
12887 def_ins = store_ins;
12890 * We can't assign ins->dreg to var->dreg here, since the
12891 * sregs could use it. So set a flag, and do it after
12894 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
12895 dest_has_lvreg = TRUE;
12900 if (def_ins && !live_range_start [dreg]) {
12901 live_range_start [dreg] = def_ins;
12902 live_range_start_bb [dreg] = bb;
12905 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
12908 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
12909 tmp->inst_c1 = dreg;
12910 mono_bblock_insert_after_ins (bb, def_ins, tmp);
12917 num_sregs = mono_inst_get_src_registers (ins, sregs);
12918 for (srcindex = 0; srcindex < 3; ++srcindex) {
12919 regtype = spec [MONO_INST_SRC1 + srcindex];
12920 sreg = sregs [srcindex];
12922 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
12923 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
12924 MonoInst *var = get_vreg_to_inst (cfg, sreg);
12925 MonoInst *use_ins = ins;
12926 MonoInst *load_ins;
12927 guint32 load_opcode;
12929 if (var->opcode == OP_REGVAR) {
12930 sregs [srcindex] = var->dreg;
12931 //mono_inst_set_src_registers (ins, sregs);
12932 live_range_end [sreg] = use_ins;
12933 live_range_end_bb [sreg] = bb;
12935 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
12938 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
12939 /* var->dreg is a hreg */
12940 tmp->inst_c1 = sreg;
12941 mono_bblock_insert_after_ins (bb, ins, tmp);
12947 g_assert (var->opcode == OP_REGOFFSET);
12949 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
12951 g_assert (load_opcode != OP_LOADV_MEMBASE);
12953 if (vreg_to_lvreg [sreg]) {
12954 g_assert (vreg_to_lvreg [sreg] != -1);
12956 /* The variable is already loaded to an lvreg */
12957 if (G_UNLIKELY (cfg->verbose_level > 2))
12958 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
12959 sregs [srcindex] = vreg_to_lvreg [sreg];
12960 //mono_inst_set_src_registers (ins, sregs);
12964 /* Try to fuse the load into the instruction */
12965 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
12966 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
12967 sregs [0] = var->inst_basereg;
12968 //mono_inst_set_src_registers (ins, sregs);
12969 ins->inst_offset = var->inst_offset;
12970 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
12971 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
12972 sregs [1] = var->inst_basereg;
12973 //mono_inst_set_src_registers (ins, sregs);
12974 ins->inst_offset = var->inst_offset;
12976 if (MONO_IS_REAL_MOVE (ins)) {
12977 ins->opcode = OP_NOP;
12980 //printf ("%d ", srcindex); mono_print_ins (ins);
12982 sreg = alloc_dreg (cfg, stacktypes [regtype]);
12984 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
12985 if (var->dreg == prev_dreg) {
12987 * sreg refers to the value loaded by the load
12988 * emitted below, but we need to use ins->dreg
12989 * since it refers to the store emitted earlier.
12993 g_assert (sreg != -1);
12994 vreg_to_lvreg [var->dreg] = sreg;
12995 g_assert (lvregs_len < 1024);
12996 lvregs [lvregs_len ++] = var->dreg;
13000 sregs [srcindex] = sreg;
13001 //mono_inst_set_src_registers (ins, sregs);
13003 #if SIZEOF_REGISTER != 8
13004 if (regtype == 'l') {
13005 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
13006 mono_bblock_insert_before_ins (bb, ins, load_ins);
13007 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
13008 mono_bblock_insert_before_ins (bb, ins, load_ins);
13009 use_ins = load_ins;
13014 #if SIZEOF_REGISTER == 4
13015 g_assert (load_opcode != OP_LOADI8_MEMBASE);
13017 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
13018 mono_bblock_insert_before_ins (bb, ins, load_ins);
13019 use_ins = load_ins;
13023 if (var->dreg < orig_next_vreg) {
13024 live_range_end [var->dreg] = use_ins;
13025 live_range_end_bb [var->dreg] = bb;
13028 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
13031 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
13032 tmp->inst_c1 = var->dreg;
13033 mono_bblock_insert_after_ins (bb, ins, tmp);
13037 mono_inst_set_src_registers (ins, sregs);
13039 if (dest_has_lvreg) {
13040 g_assert (ins->dreg != -1);
13041 vreg_to_lvreg [prev_dreg] = ins->dreg;
13042 g_assert (lvregs_len < 1024);
13043 lvregs [lvregs_len ++] = prev_dreg;
13044 dest_has_lvreg = FALSE;
13048 tmp_reg = ins->dreg;
13049 ins->dreg = ins->sreg2;
13050 ins->sreg2 = tmp_reg;
13053 if (MONO_IS_CALL (ins)) {
13054 /* Clear vreg_to_lvreg array */
13055 for (i = 0; i < lvregs_len; i++)
13056 vreg_to_lvreg [lvregs [i]] = 0;
13058 } else if (ins->opcode == OP_NOP) {
13060 MONO_INST_NULLIFY_SREGS (ins);
13063 if (cfg->verbose_level > 2)
13064 mono_print_ins_index (1, ins);
13067 /* Extend the live range based on the liveness info */
13068 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
13069 for (i = 0; i < cfg->num_varinfo; i ++) {
13070 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
13072 if (vreg_is_volatile (cfg, vi->vreg))
13073 /* The liveness info is incomplete */
13076 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
13077 /* Live from at least the first ins of this bb */
13078 live_range_start [vi->vreg] = bb->code;
13079 live_range_start_bb [vi->vreg] = bb;
13082 if (mono_bitset_test_fast (bb->live_out_set, i)) {
13083 /* Live at least until the last ins of this bb */
13084 live_range_end [vi->vreg] = bb->last_ins;
13085 live_range_end_bb [vi->vreg] = bb;
13091 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
13093 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
13094 * by storing the current native offset into MonoMethodVar->live_range_start/end.
13096 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
13097 for (i = 0; i < cfg->num_varinfo; ++i) {
13098 int vreg = MONO_VARINFO (cfg, i)->vreg;
13101 if (live_range_start [vreg]) {
13102 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
13104 ins->inst_c1 = vreg;
13105 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
13107 if (live_range_end [vreg]) {
13108 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
13110 ins->inst_c1 = vreg;
13111 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
13112 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
13114 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
13120 if (cfg->gsharedvt_locals_var_ins) {
13121 /* Nullify if unused */
13122 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
13123 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
13126 g_free (live_range_start);
13127 g_free (live_range_end);
13128 g_free (live_range_start_bb);
13129 g_free (live_range_end_bb);
13134 * - use 'iadd' instead of 'int_add'
13135 * - handling ovf opcodes: decompose in method_to_ir.
13136 * - unify iregs/fregs
13137 * -> partly done, the missing parts are:
13138 * - a more complete unification would involve unifying the hregs as well, so
13139 * code wouldn't need if (fp) all over the place. but that would mean the hregs
13140 * would no longer map to the machine hregs, so the code generators would need to
13141 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
13142 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
13143 * fp/non-fp branches speeds it up by about 15%.
13144 * - use sext/zext opcodes instead of shifts
13146 * - get rid of TEMPLOADs if possible and use vregs instead
13147 * - clean up usage of OP_P/OP_ opcodes
13148 * - cleanup usage of DUMMY_USE
13149 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
13151 * - set the stack type and allocate a dreg in the EMIT_NEW macros
13152 * - get rid of all the <foo>2 stuff when the new JIT is ready.
13153 * - make sure handle_stack_args () is called before the branch is emitted
13154 * - when the new IR is done, get rid of all unused stuff
13155 * - COMPARE/BEQ as separate instructions or unify them ?
13156 * - keeping them separate allows specialized compare instructions like
13157 * compare_imm, compare_membase
13158 * - most back ends unify fp compare+branch, fp compare+ceq
13159 * - integrate mono_save_args into inline_method
13160 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
13161 * - handle long shift opts on 32 bit platforms somehow: they require
13162 * 3 sregs (2 for arg1 and 1 for arg2)
13163 * - make byref a 'normal' type.
13164 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
13165 * variable if needed.
13166 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
13167 * like inline_method.
13168 * - remove inlining restrictions
13169 * - fix LNEG and enable cfold of INEG
13170 * - generalize x86 optimizations like ldelema as a peephole optimization
13171 * - add store_mem_imm for amd64
13172 * - optimize the loading of the interruption flag in the managed->native wrappers
13173 * - avoid special handling of OP_NOP in passes
13174 * - move code inserting instructions into one function/macro.
13175 * - try a coalescing phase after liveness analysis
13176 * - add float -> vreg conversion + local optimizations on !x86
13177 * - figure out how to handle decomposed branches during optimizations, ie.
13178 * compare+branch, op_jump_table+op_br etc.
13179 * - promote RuntimeXHandles to vregs
13180 * - vtype cleanups:
13181 * - add a NEW_VARLOADA_VREG macro
13182 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
13183 * accessing vtype fields.
13184 * - get rid of I8CONST on 64 bit platforms
13185 * - dealing with the increase in code size due to branches created during opcode
13187 * - use extended basic blocks
13188 * - all parts of the JIT
13189 * - handle_global_vregs () && local regalloc
13190 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
13191 * - sources of increase in code size:
13194 * - isinst and castclass
13195 * - lvregs not allocated to global registers even if used multiple times
13196 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
13198 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
13199 * - add all micro optimizations from the old JIT
13200 * - put tree optimizations into the deadce pass
13201 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
13202 * specific function.
13203 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
13204 * fcompare + branchCC.
13205 * - create a helper function for allocating a stack slot, taking into account
13206 * MONO_CFG_HAS_SPILLUP.
13208 * - merge the ia64 switch changes.
13209 * - optimize mono_regstate2_alloc_int/float.
13210 * - fix the pessimistic handling of variables accessed in exception handler blocks.
13211 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
13212 * parts of the tree could be separated by other instructions, killing the tree
13213 * arguments, or stores killing loads etc. Also, should we fold loads into other
13214 * instructions if the result of the load is used multiple times ?
13215 * - make the REM_IMM optimization in mini-x86.c arch-independent.
13216 * - LAST MERGE: 108395.
13217 * - when returning vtypes in registers, generate IR and append it to the end of the
13218 * last bb instead of doing it in the epilog.
13219 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
13227 - When to decompose opcodes:
13228 - earlier: this makes some optimizations hard to implement, since the low level IR
13229 no longer contains the neccessary information. But it is easier to do.
13230 - later: harder to implement, enables more optimizations.
13231 - Branches inside bblocks:
13232 - created when decomposing complex opcodes.
13233 - branches to another bblock: harmless, but not tracked by the branch
13234 optimizations, so need to branch to a label at the start of the bblock.
13235 - branches to inside the same bblock: very problematic, trips up the local
13236 reg allocator. Can be fixed by spitting the current bblock, but that is a
13237 complex operation, since some local vregs can become global vregs etc.
13238 - Local/global vregs:
13239 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
13240 local register allocator.
13241 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
13242 structure, created by mono_create_var (). Assigned to hregs or the stack by
13243 the global register allocator.
13244 - When to do optimizations like alu->alu_imm:
13245 - earlier -> saves work later on since the IR will be smaller/simpler
13246 - later -> can work on more instructions
13247 - Handling of valuetypes:
13248 - When a vtype is pushed on the stack, a new temporary is created, an
13249 instruction computing its address (LDADDR) is emitted and pushed on
13250 the stack. Need to optimize cases when the vtype is used immediately as in
13251 argument passing, stloc etc.
13252 - Instead of the to_end stuff in the old JIT, simply call the function handling
13253 the values on the stack before emitting the last instruction of the bb.
13256 #endif /* DISABLE_JIT */