2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/assembly.h>
38 #include <mono/metadata/attrdefs.h>
39 #include <mono/metadata/loader.h>
40 #include <mono/metadata/tabledefs.h>
41 #include <mono/metadata/class.h>
42 #include <mono/metadata/object.h>
43 #include <mono/metadata/exception.h>
44 #include <mono/metadata/opcodes.h>
45 #include <mono/metadata/mono-endian.h>
46 #include <mono/metadata/tokentype.h>
47 #include <mono/metadata/tabledefs.h>
48 #include <mono/metadata/marshal.h>
49 #include <mono/metadata/debug-helpers.h>
50 #include <mono/metadata/mono-debug.h>
51 #include <mono/metadata/gc-internal.h>
52 #include <mono/metadata/security-manager.h>
53 #include <mono/metadata/threads-types.h>
54 #include <mono/metadata/security-core-clr.h>
55 #include <mono/metadata/monitor.h>
56 #include <mono/metadata/profiler-private.h>
57 #include <mono/metadata/profiler.h>
58 #include <mono/metadata/debug-mono-symfile.h>
59 #include <mono/utils/mono-compiler.h>
60 #include <mono/utils/mono-memory-model.h>
61 #include <mono/metadata/mono-basic-block.h>
68 #include "jit-icalls.h"
70 #include "debugger-agent.h"
72 #define BRANCH_COST 10
73 #define INLINE_LENGTH_LIMIT 20
74 #define INLINE_FAILURE(msg) do { \
75 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE)) { \
76 if (cfg->verbose_level >= 2) \
77 printf ("inline failed: %s\n", msg); \
78 goto inline_failure; \
81 #define CHECK_CFG_EXCEPTION do {\
82 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
85 #define METHOD_ACCESS_FAILURE do { \
86 char *method_fname = mono_method_full_name (method, TRUE); \
87 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
88 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS); \
89 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
90 g_free (method_fname); \
91 g_free (cil_method_fname); \
92 goto exception_exit; \
94 #define FIELD_ACCESS_FAILURE do { \
95 char *method_fname = mono_method_full_name (method, TRUE); \
96 char *field_fname = mono_field_full_name (field); \
97 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS); \
98 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
99 g_free (method_fname); \
100 g_free (field_fname); \
101 goto exception_exit; \
103 #define GENERIC_SHARING_FAILURE(opcode) do { \
104 if (cfg->generic_sharing_context) { \
105 if (cfg->verbose_level > 2) \
106 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
107 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
108 goto exception_exit; \
111 #define GSHAREDVT_FAILURE(opcode) do { \
112 if (cfg->gsharedvt) { \
113 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __FILE__, __LINE__); \
114 if (cfg->verbose_level >= 2) \
115 printf ("%s\n", cfg->exception_message); \
116 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
117 goto exception_exit; \
120 #define OUT_OF_MEMORY_FAILURE do { \
121 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
122 goto exception_exit; \
124 /* Determine whenever 'ins' represents a load of the 'this' argument */
125 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
127 static int ldind_to_load_membase (int opcode);
128 static int stind_to_store_membase (int opcode);
130 int mono_op_to_op_imm (int opcode);
131 int mono_op_to_op_imm_noemul (int opcode);
133 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
135 /* helper methods signatures */
136 static MonoMethodSignature *helper_sig_class_init_trampoline = NULL;
137 static MonoMethodSignature *helper_sig_domain_get = NULL;
138 static MonoMethodSignature *helper_sig_generic_class_init_trampoline = NULL;
139 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm = NULL;
140 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline = NULL;
141 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline = NULL;
142 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm = NULL;
145 * Instruction metadata
153 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
154 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
160 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
165 /* keep in sync with the enum in mini.h */
168 #include "mini-ops.h"
173 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
174 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
176 * This should contain the index of the last sreg + 1. This is not the same
177 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
179 const gint8 ins_sreg_counts[] = {
180 #include "mini-ops.h"
185 #define MONO_INIT_VARINFO(vi,id) do { \
186 (vi)->range.first_use.pos.bid = 0xffff; \
192 mono_inst_set_src_registers (MonoInst *ins, int *regs)
194 ins->sreg1 = regs [0];
195 ins->sreg2 = regs [1];
196 ins->sreg3 = regs [2];
200 mono_alloc_ireg (MonoCompile *cfg)
202 return alloc_ireg (cfg);
206 mono_alloc_freg (MonoCompile *cfg)
208 return alloc_freg (cfg);
212 mono_alloc_preg (MonoCompile *cfg)
214 return alloc_preg (cfg);
218 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
220 return alloc_dreg (cfg, stack_type);
224 * mono_alloc_ireg_ref:
226 * Allocate an IREG, and mark it as holding a GC ref.
229 mono_alloc_ireg_ref (MonoCompile *cfg)
231 return alloc_ireg_ref (cfg);
235 * mono_alloc_ireg_mp:
237 * Allocate an IREG, and mark it as holding a managed pointer.
240 mono_alloc_ireg_mp (MonoCompile *cfg)
242 return alloc_ireg_mp (cfg);
246 * mono_alloc_ireg_copy:
248 * Allocate an IREG with the same GC type as VREG.
251 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
253 if (vreg_is_ref (cfg, vreg))
254 return alloc_ireg_ref (cfg);
255 else if (vreg_is_mp (cfg, vreg))
256 return alloc_ireg_mp (cfg);
258 return alloc_ireg (cfg);
262 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
268 switch (type->type) {
271 case MONO_TYPE_BOOLEAN:
283 case MONO_TYPE_FNPTR:
285 case MONO_TYPE_CLASS:
286 case MONO_TYPE_STRING:
287 case MONO_TYPE_OBJECT:
288 case MONO_TYPE_SZARRAY:
289 case MONO_TYPE_ARRAY:
293 #if SIZEOF_REGISTER == 8
302 case MONO_TYPE_VALUETYPE:
303 if (type->data.klass->enumtype) {
304 type = mono_class_enum_basetype (type->data.klass);
307 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
310 case MONO_TYPE_TYPEDBYREF:
312 case MONO_TYPE_GENERICINST:
313 type = &type->data.generic_class->container_class->byval_arg;
317 g_assert (cfg->generic_sharing_context);
318 if (mini_type_var_is_vt (cfg, type))
323 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
329 mono_print_bb (MonoBasicBlock *bb, const char *msg)
334 printf ("\n%s %d: [IN: ", msg, bb->block_num);
335 for (i = 0; i < bb->in_count; ++i)
336 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
338 for (i = 0; i < bb->out_count; ++i)
339 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
341 for (tree = bb->code; tree; tree = tree->next)
342 mono_print_ins_index (-1, tree);
346 mono_create_helper_signatures (void)
348 helper_sig_domain_get = mono_create_icall_signature ("ptr");
349 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
350 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
351 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
352 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
353 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
354 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
358 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
359 * foo<T> (int i) { ldarg.0; box T; }
361 #define UNVERIFIED do { \
362 if (cfg->gsharedvt) { \
363 if (cfg->verbose_level > 2) \
364 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
365 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
366 goto exception_exit; \
368 if (mini_get_debug_options ()->break_on_unverified) \
374 #define LOAD_ERROR do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto load_error; } while (0)
376 #define TYPE_LOAD_ERROR(klass) do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else { cfg->exception_ptr = klass; goto load_error; } } while (0)
378 #define GET_BBLOCK(cfg,tblock,ip) do { \
379 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
381 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
382 NEW_BBLOCK (cfg, (tblock)); \
383 (tblock)->cil_code = (ip); \
384 ADD_BBLOCK (cfg, (tblock)); \
388 #if defined(TARGET_X86) || defined(TARGET_AMD64)
389 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
390 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
391 (dest)->dreg = alloc_ireg_mp ((cfg)); \
392 (dest)->sreg1 = (sr1); \
393 (dest)->sreg2 = (sr2); \
394 (dest)->inst_imm = (imm); \
395 (dest)->backend.shift_amount = (shift); \
396 MONO_ADD_INS ((cfg)->cbb, (dest)); \
400 #if SIZEOF_REGISTER == 8
401 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
402 /* FIXME: Need to add many more cases */ \
403 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
405 int dr = alloc_preg (cfg); \
406 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
407 (ins)->sreg2 = widen->dreg; \
411 #define ADD_WIDEN_OP(ins, arg1, arg2)
414 #define ADD_BINOP(op) do { \
415 MONO_INST_NEW (cfg, ins, (op)); \
417 ins->sreg1 = sp [0]->dreg; \
418 ins->sreg2 = sp [1]->dreg; \
419 type_from_op (ins, sp [0], sp [1]); \
421 /* Have to insert a widening op */ \
422 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
423 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
424 MONO_ADD_INS ((cfg)->cbb, (ins)); \
425 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
428 #define ADD_UNOP(op) do { \
429 MONO_INST_NEW (cfg, ins, (op)); \
431 ins->sreg1 = sp [0]->dreg; \
432 type_from_op (ins, sp [0], NULL); \
434 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
435 MONO_ADD_INS ((cfg)->cbb, (ins)); \
436 *sp++ = mono_decompose_opcode (cfg, ins); \
439 #define ADD_BINCOND(next_block) do { \
442 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
443 cmp->sreg1 = sp [0]->dreg; \
444 cmp->sreg2 = sp [1]->dreg; \
445 type_from_op (cmp, sp [0], sp [1]); \
447 type_from_op (ins, sp [0], sp [1]); \
448 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
449 GET_BBLOCK (cfg, tblock, target); \
450 link_bblock (cfg, bblock, tblock); \
451 ins->inst_true_bb = tblock; \
452 if ((next_block)) { \
453 link_bblock (cfg, bblock, (next_block)); \
454 ins->inst_false_bb = (next_block); \
455 start_new_bblock = 1; \
457 GET_BBLOCK (cfg, tblock, ip); \
458 link_bblock (cfg, bblock, tblock); \
459 ins->inst_false_bb = tblock; \
460 start_new_bblock = 2; \
462 if (sp != stack_start) { \
463 handle_stack_args (cfg, stack_start, sp - stack_start); \
464 CHECK_UNVERIFIABLE (cfg); \
466 MONO_ADD_INS (bblock, cmp); \
467 MONO_ADD_INS (bblock, ins); \
471 * link_bblock: Links two basic blocks
473 * links two basic blocks in the control flow graph, the 'from'
474 * argument is the starting block and the 'to' argument is the block
475 * the control flow ends to after 'from'.
478 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
480 MonoBasicBlock **newa;
484 if (from->cil_code) {
486 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
488 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
491 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
493 printf ("edge from entry to exit\n");
498 for (i = 0; i < from->out_count; ++i) {
499 if (to == from->out_bb [i]) {
505 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
506 for (i = 0; i < from->out_count; ++i) {
507 newa [i] = from->out_bb [i];
515 for (i = 0; i < to->in_count; ++i) {
516 if (from == to->in_bb [i]) {
522 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
523 for (i = 0; i < to->in_count; ++i) {
524 newa [i] = to->in_bb [i];
533 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
535 link_bblock (cfg, from, to);
539 * mono_find_block_region:
541 * We mark each basic block with a region ID. We use that to avoid BB
542 * optimizations when blocks are in different regions.
545 * A region token that encodes where this region is, and information
546 * about the clause owner for this block.
548 * The region encodes the try/catch/filter clause that owns this block
549 * as well as the type. -1 is a special value that represents a block
550 * that is in none of try/catch/filter.
553 mono_find_block_region (MonoCompile *cfg, int offset)
555 MonoMethodHeader *header = cfg->header;
556 MonoExceptionClause *clause;
559 for (i = 0; i < header->num_clauses; ++i) {
560 clause = &header->clauses [i];
561 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
562 (offset < (clause->handler_offset)))
563 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
565 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
566 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
567 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
568 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
569 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
571 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
574 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
575 return ((i + 1) << 8) | clause->flags;
582 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
584 MonoMethodHeader *header = cfg->header;
585 MonoExceptionClause *clause;
589 for (i = 0; i < header->num_clauses; ++i) {
590 clause = &header->clauses [i];
591 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
592 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
593 if (clause->flags == type)
594 res = g_list_append (res, clause);
601 mono_create_spvar_for_region (MonoCompile *cfg, int region)
605 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
609 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
610 /* prevent it from being register allocated */
611 var->flags |= MONO_INST_INDIRECT;
613 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
617 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
619 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
623 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
627 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
631 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
632 /* prevent it from being register allocated */
633 var->flags |= MONO_INST_INDIRECT;
635 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
641 * Returns the type used in the eval stack when @type is loaded.
642 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
645 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
649 inst->klass = klass = mono_class_from_mono_type (type);
651 inst->type = STACK_MP;
656 switch (type->type) {
658 inst->type = STACK_INV;
662 case MONO_TYPE_BOOLEAN:
668 inst->type = STACK_I4;
673 case MONO_TYPE_FNPTR:
674 inst->type = STACK_PTR;
676 case MONO_TYPE_CLASS:
677 case MONO_TYPE_STRING:
678 case MONO_TYPE_OBJECT:
679 case MONO_TYPE_SZARRAY:
680 case MONO_TYPE_ARRAY:
681 inst->type = STACK_OBJ;
685 inst->type = STACK_I8;
689 inst->type = STACK_R8;
691 case MONO_TYPE_VALUETYPE:
692 if (type->data.klass->enumtype) {
693 type = mono_class_enum_basetype (type->data.klass);
697 inst->type = STACK_VTYPE;
700 case MONO_TYPE_TYPEDBYREF:
701 inst->klass = mono_defaults.typed_reference_class;
702 inst->type = STACK_VTYPE;
704 case MONO_TYPE_GENERICINST:
705 type = &type->data.generic_class->container_class->byval_arg;
709 g_assert (cfg->generic_sharing_context);
710 if (mini_is_gsharedvt_type (cfg, type)) {
711 g_assert (cfg->gsharedvt);
712 inst->type = STACK_VTYPE;
714 inst->type = STACK_OBJ;
718 g_error ("unknown type 0x%02x in eval stack type", type->type);
723 * The following tables are used to quickly validate the IL code in type_from_op ().
726 bin_num_table [STACK_MAX] [STACK_MAX] = {
727 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
728 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
729 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
730 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
731 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
732 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
733 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
734 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
739 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
742 /* reduce the size of this table */
744 bin_int_table [STACK_MAX] [STACK_MAX] = {
745 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
746 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
747 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
748 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
749 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
750 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
751 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
752 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
756 bin_comp_table [STACK_MAX] [STACK_MAX] = {
757 /* Inv i L p F & O vt */
759 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
760 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
761 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
762 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
763 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
764 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
765 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
768 /* reduce the size of this table */
770 shift_table [STACK_MAX] [STACK_MAX] = {
771 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
772 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
773 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
774 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
775 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
776 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
777 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
778 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
782 * Tables to map from the non-specific opcode to the matching
783 * type-specific opcode.
785 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
787 binops_op_map [STACK_MAX] = {
788 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
791 /* handles from CEE_NEG to CEE_CONV_U8 */
793 unops_op_map [STACK_MAX] = {
794 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
797 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
799 ovfops_op_map [STACK_MAX] = {
800 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
803 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
805 ovf2ops_op_map [STACK_MAX] = {
806 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
809 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
811 ovf3ops_op_map [STACK_MAX] = {
812 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
815 /* handles from CEE_BEQ to CEE_BLT_UN */
817 beqops_op_map [STACK_MAX] = {
818 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
821 /* handles from CEE_CEQ to CEE_CLT_UN */
823 ceqops_op_map [STACK_MAX] = {
824 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
828 * Sets ins->type (the type on the eval stack) according to the
829 * type of the opcode and the arguments to it.
830 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
832 * FIXME: this function sets ins->type unconditionally in some cases, but
833 * it should set it to invalid for some types (a conv.x on an object)
836 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
838 switch (ins->opcode) {
845 /* FIXME: check unverifiable args for STACK_MP */
846 ins->type = bin_num_table [src1->type] [src2->type];
847 ins->opcode += binops_op_map [ins->type];
854 ins->type = bin_int_table [src1->type] [src2->type];
855 ins->opcode += binops_op_map [ins->type];
860 ins->type = shift_table [src1->type] [src2->type];
861 ins->opcode += binops_op_map [ins->type];
866 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
867 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
868 ins->opcode = OP_LCOMPARE;
869 else if (src1->type == STACK_R8)
870 ins->opcode = OP_FCOMPARE;
872 ins->opcode = OP_ICOMPARE;
874 case OP_ICOMPARE_IMM:
875 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
876 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
877 ins->opcode = OP_LCOMPARE_IMM;
889 ins->opcode += beqops_op_map [src1->type];
892 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
893 ins->opcode += ceqops_op_map [src1->type];
899 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
900 ins->opcode += ceqops_op_map [src1->type];
904 ins->type = neg_table [src1->type];
905 ins->opcode += unops_op_map [ins->type];
908 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
909 ins->type = src1->type;
911 ins->type = STACK_INV;
912 ins->opcode += unops_op_map [ins->type];
918 ins->type = STACK_I4;
919 ins->opcode += unops_op_map [src1->type];
922 ins->type = STACK_R8;
923 switch (src1->type) {
926 ins->opcode = OP_ICONV_TO_R_UN;
929 ins->opcode = OP_LCONV_TO_R_UN;
933 case CEE_CONV_OVF_I1:
934 case CEE_CONV_OVF_U1:
935 case CEE_CONV_OVF_I2:
936 case CEE_CONV_OVF_U2:
937 case CEE_CONV_OVF_I4:
938 case CEE_CONV_OVF_U4:
939 ins->type = STACK_I4;
940 ins->opcode += ovf3ops_op_map [src1->type];
942 case CEE_CONV_OVF_I_UN:
943 case CEE_CONV_OVF_U_UN:
944 ins->type = STACK_PTR;
945 ins->opcode += ovf2ops_op_map [src1->type];
947 case CEE_CONV_OVF_I1_UN:
948 case CEE_CONV_OVF_I2_UN:
949 case CEE_CONV_OVF_I4_UN:
950 case CEE_CONV_OVF_U1_UN:
951 case CEE_CONV_OVF_U2_UN:
952 case CEE_CONV_OVF_U4_UN:
953 ins->type = STACK_I4;
954 ins->opcode += ovf2ops_op_map [src1->type];
957 ins->type = STACK_PTR;
958 switch (src1->type) {
960 ins->opcode = OP_ICONV_TO_U;
964 #if SIZEOF_VOID_P == 8
965 ins->opcode = OP_LCONV_TO_U;
967 ins->opcode = OP_MOVE;
971 ins->opcode = OP_LCONV_TO_U;
974 ins->opcode = OP_FCONV_TO_U;
980 ins->type = STACK_I8;
981 ins->opcode += unops_op_map [src1->type];
983 case CEE_CONV_OVF_I8:
984 case CEE_CONV_OVF_U8:
985 ins->type = STACK_I8;
986 ins->opcode += ovf3ops_op_map [src1->type];
988 case CEE_CONV_OVF_U8_UN:
989 case CEE_CONV_OVF_I8_UN:
990 ins->type = STACK_I8;
991 ins->opcode += ovf2ops_op_map [src1->type];
995 ins->type = STACK_R8;
996 ins->opcode += unops_op_map [src1->type];
999 ins->type = STACK_R8;
1003 ins->type = STACK_I4;
1004 ins->opcode += ovfops_op_map [src1->type];
1007 case CEE_CONV_OVF_I:
1008 case CEE_CONV_OVF_U:
1009 ins->type = STACK_PTR;
1010 ins->opcode += ovfops_op_map [src1->type];
1013 case CEE_ADD_OVF_UN:
1015 case CEE_MUL_OVF_UN:
1017 case CEE_SUB_OVF_UN:
1018 ins->type = bin_num_table [src1->type] [src2->type];
1019 ins->opcode += ovfops_op_map [src1->type];
1020 if (ins->type == STACK_R8)
1021 ins->type = STACK_INV;
1023 case OP_LOAD_MEMBASE:
1024 ins->type = STACK_PTR;
1026 case OP_LOADI1_MEMBASE:
1027 case OP_LOADU1_MEMBASE:
1028 case OP_LOADI2_MEMBASE:
1029 case OP_LOADU2_MEMBASE:
1030 case OP_LOADI4_MEMBASE:
1031 case OP_LOADU4_MEMBASE:
1032 ins->type = STACK_PTR;
1034 case OP_LOADI8_MEMBASE:
1035 ins->type = STACK_I8;
1037 case OP_LOADR4_MEMBASE:
1038 case OP_LOADR8_MEMBASE:
1039 ins->type = STACK_R8;
1042 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1046 if (ins->type == STACK_MP)
1047 ins->klass = mono_defaults.object_class;
1052 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1058 param_table [STACK_MAX] [STACK_MAX] = {
1063 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1067 switch (args->type) {
1077 for (i = 0; i < sig->param_count; ++i) {
1078 switch (args [i].type) {
1082 if (!sig->params [i]->byref)
1086 if (sig->params [i]->byref)
1088 switch (sig->params [i]->type) {
1089 case MONO_TYPE_CLASS:
1090 case MONO_TYPE_STRING:
1091 case MONO_TYPE_OBJECT:
1092 case MONO_TYPE_SZARRAY:
1093 case MONO_TYPE_ARRAY:
1100 if (sig->params [i]->byref)
1102 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1111 /*if (!param_table [args [i].type] [sig->params [i]->type])
1119 * When we need a pointer to the current domain many times in a method, we
1120 * call mono_domain_get() once and we store the result in a local variable.
1121 * This function returns the variable that represents the MonoDomain*.
1123 inline static MonoInst *
1124 mono_get_domainvar (MonoCompile *cfg)
1126 if (!cfg->domainvar)
1127 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1128 return cfg->domainvar;
1132 * The got_var contains the address of the Global Offset Table when AOT
1136 mono_get_got_var (MonoCompile *cfg)
1138 #ifdef MONO_ARCH_NEED_GOT_VAR
1139 if (!cfg->compile_aot)
1141 if (!cfg->got_var) {
1142 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1144 return cfg->got_var;
1151 mono_get_vtable_var (MonoCompile *cfg)
1153 g_assert (cfg->generic_sharing_context);
1155 if (!cfg->rgctx_var) {
1156 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1157 /* force the var to be stack allocated */
1158 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1161 return cfg->rgctx_var;
1165 type_from_stack_type (MonoInst *ins) {
1166 switch (ins->type) {
1167 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1168 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1169 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1170 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1172 return &ins->klass->this_arg;
1173 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1174 case STACK_VTYPE: return &ins->klass->byval_arg;
1176 g_error ("stack type %d to monotype not handled\n", ins->type);
1181 static G_GNUC_UNUSED int
1182 type_to_stack_type (MonoType *t)
1184 t = mono_type_get_underlying_type (t);
1188 case MONO_TYPE_BOOLEAN:
1191 case MONO_TYPE_CHAR:
1198 case MONO_TYPE_FNPTR:
1200 case MONO_TYPE_CLASS:
1201 case MONO_TYPE_STRING:
1202 case MONO_TYPE_OBJECT:
1203 case MONO_TYPE_SZARRAY:
1204 case MONO_TYPE_ARRAY:
1212 case MONO_TYPE_VALUETYPE:
1213 case MONO_TYPE_TYPEDBYREF:
1215 case MONO_TYPE_GENERICINST:
1216 if (mono_type_generic_inst_is_valuetype (t))
1222 g_assert_not_reached ();
1229 array_access_to_klass (int opcode)
1233 return mono_defaults.byte_class;
1235 return mono_defaults.uint16_class;
1238 return mono_defaults.int_class;
1241 return mono_defaults.sbyte_class;
1244 return mono_defaults.int16_class;
1247 return mono_defaults.int32_class;
1249 return mono_defaults.uint32_class;
1252 return mono_defaults.int64_class;
1255 return mono_defaults.single_class;
1258 return mono_defaults.double_class;
1259 case CEE_LDELEM_REF:
1260 case CEE_STELEM_REF:
1261 return mono_defaults.object_class;
1263 g_assert_not_reached ();
1269 * We try to share variables when possible
1272 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1277 /* inlining can result in deeper stacks */
1278 if (slot >= cfg->header->max_stack)
1279 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1281 pos = ins->type - 1 + slot * STACK_MAX;
1283 switch (ins->type) {
1290 if ((vnum = cfg->intvars [pos]))
1291 return cfg->varinfo [vnum];
1292 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1293 cfg->intvars [pos] = res->inst_c0;
1296 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1302 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1305 * Don't use this if a generic_context is set, since that means AOT can't
1306 * look up the method using just the image+token.
1307 * table == 0 means this is a reference made from a wrapper.
1309 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1310 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1311 jump_info_token->image = image;
1312 jump_info_token->token = token;
1313 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1318 * This function is called to handle items that are left on the evaluation stack
1319 * at basic block boundaries. What happens is that we save the values to local variables
1320 * and we reload them later when first entering the target basic block (with the
1321 * handle_loaded_temps () function).
1322 * A single joint point will use the same variables (stored in the array bb->out_stack or
1323 * bb->in_stack, if the basic block is before or after the joint point).
1325 * This function needs to be called _before_ emitting the last instruction of
1326 * the bb (i.e. before emitting a branch).
1327 * If the stack merge fails at a join point, cfg->unverifiable is set.
1330 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1333 MonoBasicBlock *bb = cfg->cbb;
1334 MonoBasicBlock *outb;
1335 MonoInst *inst, **locals;
1340 if (cfg->verbose_level > 3)
1341 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1342 if (!bb->out_scount) {
1343 bb->out_scount = count;
1344 //printf ("bblock %d has out:", bb->block_num);
1346 for (i = 0; i < bb->out_count; ++i) {
1347 outb = bb->out_bb [i];
1348 /* exception handlers are linked, but they should not be considered for stack args */
1349 if (outb->flags & BB_EXCEPTION_HANDLER)
1351 //printf (" %d", outb->block_num);
1352 if (outb->in_stack) {
1354 bb->out_stack = outb->in_stack;
1360 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1361 for (i = 0; i < count; ++i) {
1363 * try to reuse temps already allocated for this purpouse, if they occupy the same
1364 * stack slot and if they are of the same type.
1365 * This won't cause conflicts since if 'local' is used to
1366 * store one of the values in the in_stack of a bblock, then
1367 * the same variable will be used for the same outgoing stack
1369 * This doesn't work when inlining methods, since the bblocks
1370 * in the inlined methods do not inherit their in_stack from
1371 * the bblock they are inlined to. See bug #58863 for an
1374 if (cfg->inlined_method)
1375 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1377 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1382 for (i = 0; i < bb->out_count; ++i) {
1383 outb = bb->out_bb [i];
1384 /* exception handlers are linked, but they should not be considered for stack args */
1385 if (outb->flags & BB_EXCEPTION_HANDLER)
1387 if (outb->in_scount) {
1388 if (outb->in_scount != bb->out_scount) {
1389 cfg->unverifiable = TRUE;
1392 continue; /* check they are the same locals */
1394 outb->in_scount = count;
1395 outb->in_stack = bb->out_stack;
1398 locals = bb->out_stack;
1400 for (i = 0; i < count; ++i) {
1401 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1402 inst->cil_code = sp [i]->cil_code;
1403 sp [i] = locals [i];
1404 if (cfg->verbose_level > 3)
1405 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1409 * It is possible that the out bblocks already have in_stack assigned, and
1410 * the in_stacks differ. In this case, we will store to all the different
1417 /* Find a bblock which has a different in_stack */
1419 while (bindex < bb->out_count) {
1420 outb = bb->out_bb [bindex];
1421 /* exception handlers are linked, but they should not be considered for stack args */
1422 if (outb->flags & BB_EXCEPTION_HANDLER) {
1426 if (outb->in_stack != locals) {
1427 for (i = 0; i < count; ++i) {
1428 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1429 inst->cil_code = sp [i]->cil_code;
1430 sp [i] = locals [i];
1431 if (cfg->verbose_level > 3)
1432 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1434 locals = outb->in_stack;
1443 /* Emit code which loads interface_offsets [klass->interface_id]
1444 * The array is stored in memory before vtable.
1447 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1449 if (cfg->compile_aot) {
1450 int ioffset_reg = alloc_preg (cfg);
1451 int iid_reg = alloc_preg (cfg);
1453 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1454 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1455 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1458 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1463 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1465 int ibitmap_reg = alloc_preg (cfg);
1466 #ifdef COMPRESSED_INTERFACE_BITMAP
1468 MonoInst *res, *ins;
1469 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1470 MONO_ADD_INS (cfg->cbb, ins);
1472 if (cfg->compile_aot)
1473 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1475 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1476 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1477 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1479 int ibitmap_byte_reg = alloc_preg (cfg);
1481 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1483 if (cfg->compile_aot) {
1484 int iid_reg = alloc_preg (cfg);
1485 int shifted_iid_reg = alloc_preg (cfg);
1486 int ibitmap_byte_address_reg = alloc_preg (cfg);
1487 int masked_iid_reg = alloc_preg (cfg);
1488 int iid_one_bit_reg = alloc_preg (cfg);
1489 int iid_bit_reg = alloc_preg (cfg);
1490 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1491 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1492 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1493 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1494 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1495 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1496 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1497 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1499 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1500 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1506 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1507 * stored in "klass_reg" implements the interface "klass".
1510 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1512 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1516 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1517 * stored in "vtable_reg" implements the interface "klass".
1520 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1522 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1526 * Emit code which checks whenever the interface id of @klass is smaller than
1527 * than the value given by max_iid_reg.
1530 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1531 MonoBasicBlock *false_target)
1533 if (cfg->compile_aot) {
1534 int iid_reg = alloc_preg (cfg);
1535 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1536 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1539 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1541 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1543 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1546 /* Same as above, but obtains max_iid from a vtable */
1548 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1549 MonoBasicBlock *false_target)
1551 int max_iid_reg = alloc_preg (cfg);
1553 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1554 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1557 /* Same as above, but obtains max_iid from a klass */
1559 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1560 MonoBasicBlock *false_target)
1562 int max_iid_reg = alloc_preg (cfg);
1564 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1565 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1569 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1571 int idepth_reg = alloc_preg (cfg);
1572 int stypes_reg = alloc_preg (cfg);
1573 int stype = alloc_preg (cfg);
1575 mono_class_setup_supertypes (klass);
1577 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1578 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1579 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1580 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1582 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1583 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1585 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1586 } else if (cfg->compile_aot) {
1587 int const_reg = alloc_preg (cfg);
1588 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1589 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1591 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1593 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1597 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1599 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1603 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1605 int intf_reg = alloc_preg (cfg);
1607 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1608 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1609 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1611 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1613 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1617 * Variant of the above that takes a register to the class, not the vtable.
1620 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1622 int intf_bit_reg = alloc_preg (cfg);
1624 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1625 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1626 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1628 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1630 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1634 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1637 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1638 } else if (cfg->compile_aot) {
1639 int const_reg = alloc_preg (cfg);
1640 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1641 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1643 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1645 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1649 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1651 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1655 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1657 if (cfg->compile_aot) {
1658 int const_reg = alloc_preg (cfg);
1659 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1660 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1662 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1664 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1668 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1671 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1674 int rank_reg = alloc_preg (cfg);
1675 int eclass_reg = alloc_preg (cfg);
1677 g_assert (!klass_inst);
1678 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1679 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1680 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1681 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1682 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1683 if (klass->cast_class == mono_defaults.object_class) {
1684 int parent_reg = alloc_preg (cfg);
1685 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1686 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1687 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1688 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1689 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1690 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1691 } else if (klass->cast_class == mono_defaults.enum_class) {
1692 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1693 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1694 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1696 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1697 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1700 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1701 /* Check that the object is a vector too */
1702 int bounds_reg = alloc_preg (cfg);
1703 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1704 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1705 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1708 int idepth_reg = alloc_preg (cfg);
1709 int stypes_reg = alloc_preg (cfg);
1710 int stype = alloc_preg (cfg);
1712 mono_class_setup_supertypes (klass);
1714 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1715 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1716 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1717 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1719 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1720 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1721 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1726 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1728 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1732 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1736 g_assert (val == 0);
1741 if ((size <= 4) && (size <= align)) {
1744 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1747 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1750 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1752 #if SIZEOF_REGISTER == 8
1754 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1760 val_reg = alloc_preg (cfg);
1762 if (SIZEOF_REGISTER == 8)
1763 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1765 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1768 /* This could be optimized further if neccesary */
1770 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1777 #if !NO_UNALIGNED_ACCESS
1778 if (SIZEOF_REGISTER == 8) {
1780 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1785 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1793 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1798 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1803 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1810 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1817 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1818 g_assert (size < 10000);
1821 /* This could be optimized further if neccesary */
1823 cur_reg = alloc_preg (cfg);
1824 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1825 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1832 #if !NO_UNALIGNED_ACCESS
1833 if (SIZEOF_REGISTER == 8) {
1835 cur_reg = alloc_preg (cfg);
1836 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1837 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1846 cur_reg = alloc_preg (cfg);
1847 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1848 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1854 cur_reg = alloc_preg (cfg);
1855 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1856 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1862 cur_reg = alloc_preg (cfg);
1863 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1864 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1874 * Emit IR to push the current LMF onto the LMF stack.
1877 emit_push_lmf (MonoCompile *cfg)
1879 #if defined(MONO_ARCH_ENABLE_LMF_IR)
1881 * Emit IR to push the LMF:
1882 * lmf_addr = <lmf_addr from tls>
1883 * lmf->lmf_addr = lmf_addr
1884 * lmf->prev_lmf = *lmf_addr
1887 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
1888 MonoInst *ins, *lmf_ins;
1890 if (!mono_arch_enable_lmf_ir (cfg))
1893 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
1895 MONO_ADD_INS (cfg->cbb, lmf_ins);
1897 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
1898 lmf_addr_reg = lmf_ins->dreg;
1900 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1901 lmf_reg = ins->dreg;
1903 if (!cfg->lmf_addr_var)
1904 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1905 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, cfg->lmf_addr_var->dreg, lmf_ins->dreg);
1906 prev_lmf_reg = alloc_preg (cfg);
1907 /* Save previous_lmf */
1908 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_addr_reg, 0);
1909 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
1911 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, lmf_reg);
1918 * Emit IR to pop the current LMF from the LMF stack.
1921 emit_pop_lmf (MonoCompile *cfg)
1923 #if defined(MONO_ARCH_ENABLE_LMF_IR)
1924 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
1927 if (!mono_arch_enable_lmf_ir (cfg))
1931 * Emit IR to pop the LMF:
1932 * *(lmf->lmf_addr) = lmf->prev_lmf
1934 cfg->cbb = cfg->bb_exit;
1935 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1936 lmf_reg = ins->dreg;
1937 /* This could be called before emit_push_lmf () */
1938 if (!cfg->lmf_addr_var)
1939 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1940 lmf_addr_reg = cfg->lmf_addr_var->dreg;
1941 prev_lmf_reg = alloc_preg (cfg);
1942 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
1943 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
1948 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1951 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1954 type = mini_get_basic_type_from_generic (gsctx, type);
1955 switch (type->type) {
1956 case MONO_TYPE_VOID:
1957 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1960 case MONO_TYPE_BOOLEAN:
1963 case MONO_TYPE_CHAR:
1966 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1970 case MONO_TYPE_FNPTR:
1971 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1972 case MONO_TYPE_CLASS:
1973 case MONO_TYPE_STRING:
1974 case MONO_TYPE_OBJECT:
1975 case MONO_TYPE_SZARRAY:
1976 case MONO_TYPE_ARRAY:
1977 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1980 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1983 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1984 case MONO_TYPE_VALUETYPE:
1985 if (type->data.klass->enumtype) {
1986 type = mono_class_enum_basetype (type->data.klass);
1989 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1990 case MONO_TYPE_TYPEDBYREF:
1991 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1992 case MONO_TYPE_GENERICINST:
1993 type = &type->data.generic_class->container_class->byval_arg;
1996 case MONO_TYPE_MVAR:
1998 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
2000 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2006 * target_type_is_incompatible:
2007 * @cfg: MonoCompile context
2009 * Check that the item @arg on the evaluation stack can be stored
2010 * in the target type (can be a local, or field, etc).
2011 * The cfg arg can be used to check if we need verification or just
2014 * Returns: non-0 value if arg can't be stored on a target.
2017 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2019 MonoType *simple_type;
2022 if (target->byref) {
2023 /* FIXME: check that the pointed to types match */
2024 if (arg->type == STACK_MP)
2025 return arg->klass != mono_class_from_mono_type (target);
2026 if (arg->type == STACK_PTR)
2031 simple_type = mono_type_get_underlying_type (target);
2032 switch (simple_type->type) {
2033 case MONO_TYPE_VOID:
2037 case MONO_TYPE_BOOLEAN:
2040 case MONO_TYPE_CHAR:
2043 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2047 /* STACK_MP is needed when setting pinned locals */
2048 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2053 case MONO_TYPE_FNPTR:
2055 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2056 * in native int. (#688008).
2058 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2061 case MONO_TYPE_CLASS:
2062 case MONO_TYPE_STRING:
2063 case MONO_TYPE_OBJECT:
2064 case MONO_TYPE_SZARRAY:
2065 case MONO_TYPE_ARRAY:
2066 if (arg->type != STACK_OBJ)
2068 /* FIXME: check type compatibility */
2072 if (arg->type != STACK_I8)
2077 if (arg->type != STACK_R8)
2080 case MONO_TYPE_VALUETYPE:
2081 if (arg->type != STACK_VTYPE)
2083 klass = mono_class_from_mono_type (simple_type);
2084 if (klass != arg->klass)
2087 case MONO_TYPE_TYPEDBYREF:
2088 if (arg->type != STACK_VTYPE)
2090 klass = mono_class_from_mono_type (simple_type);
2091 if (klass != arg->klass)
2094 case MONO_TYPE_GENERICINST:
2095 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2096 if (arg->type != STACK_VTYPE)
2098 klass = mono_class_from_mono_type (simple_type);
2099 if (klass != arg->klass)
2103 if (arg->type != STACK_OBJ)
2105 /* FIXME: check type compatibility */
2109 case MONO_TYPE_MVAR:
2110 g_assert (cfg->generic_sharing_context);
2111 if (mini_type_var_is_vt (cfg, simple_type)) {
2112 if (arg->type != STACK_VTYPE)
2115 if (arg->type != STACK_OBJ)
2120 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2126 * Prepare arguments for passing to a function call.
2127 * Return a non-zero value if the arguments can't be passed to the given
2129 * The type checks are not yet complete and some conversions may need
2130 * casts on 32 or 64 bit architectures.
2132 * FIXME: implement this using target_type_is_incompatible ()
2135 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2137 MonoType *simple_type;
2141 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2145 for (i = 0; i < sig->param_count; ++i) {
2146 if (sig->params [i]->byref) {
2147 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2151 simple_type = sig->params [i];
2152 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
2154 switch (simple_type->type) {
2155 case MONO_TYPE_VOID:
2160 case MONO_TYPE_BOOLEAN:
2163 case MONO_TYPE_CHAR:
2166 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2172 case MONO_TYPE_FNPTR:
2173 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2176 case MONO_TYPE_CLASS:
2177 case MONO_TYPE_STRING:
2178 case MONO_TYPE_OBJECT:
2179 case MONO_TYPE_SZARRAY:
2180 case MONO_TYPE_ARRAY:
2181 if (args [i]->type != STACK_OBJ)
2186 if (args [i]->type != STACK_I8)
2191 if (args [i]->type != STACK_R8)
2194 case MONO_TYPE_VALUETYPE:
2195 if (simple_type->data.klass->enumtype) {
2196 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2199 if (args [i]->type != STACK_VTYPE)
2202 case MONO_TYPE_TYPEDBYREF:
2203 if (args [i]->type != STACK_VTYPE)
2206 case MONO_TYPE_GENERICINST:
2207 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2210 case MONO_TYPE_MVAR:
2212 if (args [i]->type != STACK_VTYPE)
2216 g_error ("unknown type 0x%02x in check_call_signature",
2224 callvirt_to_call (int opcode)
2229 case OP_VOIDCALLVIRT:
2238 g_assert_not_reached ();
2245 callvirt_to_call_membase (int opcode)
2249 return OP_CALL_MEMBASE;
2250 case OP_VOIDCALLVIRT:
2251 return OP_VOIDCALL_MEMBASE;
2253 return OP_FCALL_MEMBASE;
2255 return OP_LCALL_MEMBASE;
2257 return OP_VCALL_MEMBASE;
2259 g_assert_not_reached ();
2265 #ifdef MONO_ARCH_HAVE_IMT
2266 /* Either METHOD or IMT_ARG needs to be set */
2268 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2272 if (COMPILE_LLVM (cfg)) {
2273 method_reg = alloc_preg (cfg);
2276 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2277 } else if (cfg->compile_aot) {
2278 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2281 MONO_INST_NEW (cfg, ins, OP_PCONST);
2282 ins->inst_p0 = method;
2283 ins->dreg = method_reg;
2284 MONO_ADD_INS (cfg->cbb, ins);
2288 call->imt_arg_reg = method_reg;
2290 #ifdef MONO_ARCH_IMT_REG
2291 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2293 /* Need this to keep the IMT arg alive */
2294 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2299 #ifdef MONO_ARCH_IMT_REG
2300 method_reg = alloc_preg (cfg);
2303 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2304 } else if (cfg->compile_aot) {
2305 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2308 MONO_INST_NEW (cfg, ins, OP_PCONST);
2309 ins->inst_p0 = method;
2310 ins->dreg = method_reg;
2311 MONO_ADD_INS (cfg->cbb, ins);
2314 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2316 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2321 static MonoJumpInfo *
2322 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2324 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2328 ji->data.target = target;
2334 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2336 if (cfg->generic_sharing_context)
2337 return mono_class_check_context_used (klass);
2343 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2345 if (cfg->generic_sharing_context)
2346 return mono_method_check_context_used (method);
2352 * check_method_sharing:
2354 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2357 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2359 gboolean pass_vtable = FALSE;
2360 gboolean pass_mrgctx = FALSE;
2362 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2363 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2364 gboolean sharable = FALSE;
2366 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2369 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2370 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
2371 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2373 sharable = sharing_enabled && context_sharable;
2377 * Pass vtable iff target method might
2378 * be shared, which means that sharing
2379 * is enabled for its class and its
2380 * context is sharable (and it's not a
2383 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2387 if (mini_method_get_context (cmethod) &&
2388 mini_method_get_context (cmethod)->method_inst) {
2389 g_assert (!pass_vtable);
2391 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2394 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2395 MonoGenericContext *context = mini_method_get_context (cmethod);
2396 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2398 if (sharing_enabled && context_sharable)
2400 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, mono_method_signature (cmethod)))
2405 if (out_pass_vtable)
2406 *out_pass_vtable = pass_vtable;
2407 if (out_pass_mrgctx)
2408 *out_pass_mrgctx = pass_mrgctx;
2411 inline static MonoCallInst *
2412 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2413 MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
2416 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2421 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2423 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2426 call->signature = sig;
2427 call->rgctx_reg = rgctx;
2429 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2432 if (mini_type_is_vtype (cfg, sig->ret)) {
2433 call->vret_var = cfg->vret_addr;
2434 //g_assert_not_reached ();
2436 } else if (mini_type_is_vtype (cfg, sig->ret)) {
2437 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2440 temp->backend.is_pinvoke = sig->pinvoke;
2443 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2444 * address of return value to increase optimization opportunities.
2445 * Before vtype decomposition, the dreg of the call ins itself represents the
2446 * fact the call modifies the return value. After decomposition, the call will
2447 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2448 * will be transformed into an LDADDR.
2450 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2451 loada->dreg = alloc_preg (cfg);
2452 loada->inst_p0 = temp;
2453 /* We reference the call too since call->dreg could change during optimization */
2454 loada->inst_p1 = call;
2455 MONO_ADD_INS (cfg->cbb, loada);
2457 call->inst.dreg = temp->dreg;
2459 call->vret_var = loada;
2460 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2461 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2463 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2464 if (COMPILE_SOFT_FLOAT (cfg)) {
2466 * If the call has a float argument, we would need to do an r8->r4 conversion using
2467 * an icall, but that cannot be done during the call sequence since it would clobber
2468 * the call registers + the stack. So we do it before emitting the call.
2470 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2472 MonoInst *in = call->args [i];
2474 if (i >= sig->hasthis)
2475 t = sig->params [i - sig->hasthis];
2477 t = &mono_defaults.int_class->byval_arg;
2478 t = mono_type_get_underlying_type (t);
2480 if (!t->byref && t->type == MONO_TYPE_R4) {
2481 MonoInst *iargs [1];
2485 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2487 /* The result will be in an int vreg */
2488 call->args [i] = conv;
2494 call->need_unbox_trampoline = unbox_trampoline;
2497 if (COMPILE_LLVM (cfg))
2498 mono_llvm_emit_call (cfg, call);
2500 mono_arch_emit_call (cfg, call);
2502 mono_arch_emit_call (cfg, call);
2505 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2506 cfg->flags |= MONO_CFG_HAS_CALLS;
2512 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2514 #ifdef MONO_ARCH_RGCTX_REG
2515 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2516 cfg->uses_rgctx_reg = TRUE;
2517 call->rgctx_reg = TRUE;
2519 call->rgctx_arg_reg = rgctx_reg;
2526 inline static MonoInst*
2527 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2533 rgctx_reg = mono_alloc_preg (cfg);
2534 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2537 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2539 call->inst.sreg1 = addr->dreg;
2542 emit_imt_argument (cfg, call, NULL, imt_arg);
2544 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2547 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2549 return (MonoInst*)call;
2553 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2556 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2558 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2561 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2562 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2564 #ifndef DISABLE_REMOTING
2565 gboolean might_be_remote = FALSE;
2567 gboolean virtual = this != NULL;
2568 gboolean enable_for_aot = TRUE;
2572 gboolean need_unbox_trampoline;
2575 sig = mono_method_signature (method);
2578 rgctx_reg = mono_alloc_preg (cfg);
2579 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2582 if (method->string_ctor) {
2583 /* Create the real signature */
2584 /* FIXME: Cache these */
2585 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2586 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2591 context_used = mini_method_check_context_used (cfg, method);
2593 #ifndef DISABLE_REMOTING
2594 might_be_remote = this && sig->hasthis &&
2595 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2596 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2598 if (might_be_remote && context_used) {
2601 g_assert (cfg->generic_sharing_context);
2603 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2605 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2609 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2611 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2613 #ifndef DISABLE_REMOTING
2614 if (might_be_remote)
2615 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2618 call->method = method;
2619 call->inst.flags |= MONO_INST_HAS_METHOD;
2620 call->inst.inst_left = this;
2621 call->tail_call = tail;
2624 int vtable_reg, slot_reg, this_reg;
2627 this_reg = this->dreg;
2629 if (ARCH_HAVE_DELEGATE_TRAMPOLINES && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2630 MonoInst *dummy_use;
2632 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2634 /* Make a call to delegate->invoke_impl */
2635 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2636 call->inst.inst_basereg = this_reg;
2637 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2638 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2640 /* We must emit a dummy use here because the delegate trampoline will
2641 replace the 'this' argument with the delegate target making this activation
2642 no longer a root for the delegate.
2643 This is an issue for delegates that target collectible code such as dynamic
2644 methods of GC'able assemblies.
2646 For a test case look into #667921.
2648 FIXME: a dummy use is not the best way to do it as the local register allocator
2649 will put it on a caller save register and spil it around the call.
2650 Ideally, we would either put it on a callee save register or only do the store part.
2652 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2654 return (MonoInst*)call;
2657 if ((!cfg->compile_aot || enable_for_aot) &&
2658 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2659 (MONO_METHOD_IS_FINAL (method) &&
2660 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2661 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2663 * the method is not virtual, we just need to ensure this is not null
2664 * and then we can call the method directly.
2666 #ifndef DISABLE_REMOTING
2667 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2669 * The check above ensures method is not gshared, this is needed since
2670 * gshared methods can't have wrappers.
2672 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2676 if (!method->string_ctor)
2677 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2679 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2680 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2682 * the method is virtual, but we can statically dispatch since either
2683 * it's class or the method itself are sealed.
2684 * But first we need to ensure it's not a null reference.
2686 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2688 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2690 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2692 vtable_reg = alloc_preg (cfg);
2693 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2694 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2696 #ifdef MONO_ARCH_HAVE_IMT
2698 guint32 imt_slot = mono_method_get_imt_slot (method);
2699 emit_imt_argument (cfg, call, call->method, imt_arg);
2700 slot_reg = vtable_reg;
2701 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2704 if (slot_reg == -1) {
2705 slot_reg = alloc_preg (cfg);
2706 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2707 offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2710 slot_reg = vtable_reg;
2711 offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2712 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2713 #ifdef MONO_ARCH_HAVE_IMT
2715 g_assert (mono_method_signature (method)->generic_param_count);
2716 emit_imt_argument (cfg, call, call->method, imt_arg);
2721 call->inst.sreg1 = slot_reg;
2722 call->inst.inst_offset = offset;
2723 call->virtual = TRUE;
2727 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2730 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2732 return (MonoInst*)call;
2736 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2738 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this, NULL, NULL);
2742 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2749 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2752 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2754 return (MonoInst*)call;
2758 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2760 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2764 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2768 * mono_emit_abs_call:
2770 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2772 inline static MonoInst*
2773 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2774 MonoMethodSignature *sig, MonoInst **args)
2776 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2780 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2783 if (cfg->abs_patches == NULL)
2784 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2785 g_hash_table_insert (cfg->abs_patches, ji, ji);
2786 ins = mono_emit_native_call (cfg, ji, sig, args);
2787 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2792 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2794 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2795 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2799 * Native code might return non register sized integers
2800 * without initializing the upper bits.
2802 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2803 case OP_LOADI1_MEMBASE:
2804 widen_op = OP_ICONV_TO_I1;
2806 case OP_LOADU1_MEMBASE:
2807 widen_op = OP_ICONV_TO_U1;
2809 case OP_LOADI2_MEMBASE:
2810 widen_op = OP_ICONV_TO_I2;
2812 case OP_LOADU2_MEMBASE:
2813 widen_op = OP_ICONV_TO_U2;
2819 if (widen_op != -1) {
2820 int dreg = alloc_preg (cfg);
2823 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2824 widen->type = ins->type;
2834 get_memcpy_method (void)
2836 static MonoMethod *memcpy_method = NULL;
2837 if (!memcpy_method) {
2838 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2840 g_error ("Old corlib found. Install a new one");
2842 return memcpy_method;
2846 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
2848 MonoClassField *field;
2849 gpointer iter = NULL;
2851 while ((field = mono_class_get_fields (klass, &iter))) {
2854 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2856 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2857 if (mini_type_is_reference (cfg, mono_field_get_type (field))) {
2858 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2859 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2861 MonoClass *field_class = mono_class_from_mono_type (field->type);
2862 if (field_class->has_references)
2863 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
2869 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
2871 int card_table_shift_bits;
2872 gpointer card_table_mask;
2874 MonoInst *dummy_use;
2875 int nursery_shift_bits;
2876 size_t nursery_size;
2877 gboolean has_card_table_wb = FALSE;
2879 if (!cfg->gen_write_barriers)
2882 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2884 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2886 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
2887 has_card_table_wb = TRUE;
2890 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0) {
2893 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2894 wbarrier->sreg1 = ptr->dreg;
2895 wbarrier->sreg2 = value->dreg;
2896 MONO_ADD_INS (cfg->cbb, wbarrier);
2897 } else if (card_table) {
2898 int offset_reg = alloc_preg (cfg);
2899 int card_reg = alloc_preg (cfg);
2902 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2903 if (card_table_mask)
2904 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
2906 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2907 * IMM's larger than 32bits.
2909 if (cfg->compile_aot) {
2910 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
2912 MONO_INST_NEW (cfg, ins, OP_PCONST);
2913 ins->inst_p0 = card_table;
2914 ins->dreg = card_reg;
2915 MONO_ADD_INS (cfg->cbb, ins);
2918 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
2919 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
2921 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2922 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
2925 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
2929 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
2931 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
2932 unsigned need_wb = 0;
2937 /*types with references can't have alignment smaller than sizeof(void*) */
2938 if (align < SIZEOF_VOID_P)
2941 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
2942 if (size > 32 * SIZEOF_VOID_P)
2945 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
2947 /* We don't unroll more than 5 stores to avoid code bloat. */
2948 if (size > 5 * SIZEOF_VOID_P) {
2949 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
2950 size += (SIZEOF_VOID_P - 1);
2951 size &= ~(SIZEOF_VOID_P - 1);
2953 EMIT_NEW_ICONST (cfg, iargs [2], size);
2954 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
2955 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
2959 destreg = iargs [0]->dreg;
2960 srcreg = iargs [1]->dreg;
2963 dest_ptr_reg = alloc_preg (cfg);
2964 tmp_reg = alloc_preg (cfg);
2967 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
2969 while (size >= SIZEOF_VOID_P) {
2970 MonoInst *load_inst;
2971 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
2972 load_inst->dreg = tmp_reg;
2973 load_inst->inst_basereg = srcreg;
2974 load_inst->inst_offset = offset;
2975 MONO_ADD_INS (cfg->cbb, load_inst);
2977 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
2980 emit_write_barrier (cfg, iargs [0], load_inst);
2982 offset += SIZEOF_VOID_P;
2983 size -= SIZEOF_VOID_P;
2986 /*tmp += sizeof (void*)*/
2987 if (size >= SIZEOF_VOID_P) {
2988 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
2989 MONO_ADD_INS (cfg->cbb, iargs [0]);
2993 /* Those cannot be references since size < sizeof (void*) */
2995 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
2996 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3002 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3003 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3009 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3010 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3019 * Emit code to copy a valuetype of type @klass whose address is stored in
3020 * @src->dreg to memory whose address is stored at @dest->dreg.
3023 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3025 MonoInst *iargs [4];
3026 int context_used, n;
3028 MonoMethod *memcpy_method;
3029 MonoInst *size_ins = NULL;
3030 MonoInst *memcpy_ins = NULL;
3034 * This check breaks with spilled vars... need to handle it during verification anyway.
3035 * g_assert (klass && klass == src->klass && klass == dest->klass);
3038 if (mini_is_gsharedvt_klass (cfg, klass)) {
3040 context_used = mini_class_check_context_used (cfg, klass);
3041 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3042 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3046 n = mono_class_native_size (klass, &align);
3048 n = mono_class_value_size (klass, &align);
3050 /* if native is true there should be no references in the struct */
3051 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3052 /* Avoid barriers when storing to the stack */
3053 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3054 (dest->opcode == OP_LDADDR))) {
3060 context_used = mini_class_check_context_used (cfg, klass);
3062 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3063 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3065 } else if (context_used) {
3066 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3068 if (cfg->compile_aot) {
3069 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
3071 EMIT_NEW_PCONST (cfg, iargs [2], klass);
3072 mono_class_compute_gc_descriptor (klass);
3077 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3079 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3084 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
3085 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3086 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3091 iargs [2] = size_ins;
3093 EMIT_NEW_ICONST (cfg, iargs [2], n);
3095 memcpy_method = get_memcpy_method ();
3097 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3099 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3104 get_memset_method (void)
3106 static MonoMethod *memset_method = NULL;
3107 if (!memset_method) {
3108 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3110 g_error ("Old corlib found. Install a new one");
3112 return memset_method;
3116 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3118 MonoInst *iargs [3];
3119 int n, context_used;
3121 MonoMethod *memset_method;
3122 MonoInst *size_ins = NULL;
3123 MonoInst *bzero_ins = NULL;
3124 static MonoMethod *bzero_method;
3126 /* FIXME: Optimize this for the case when dest is an LDADDR */
3128 mono_class_init (klass);
3129 if (mini_is_gsharedvt_klass (cfg, klass)) {
3130 context_used = mini_class_check_context_used (cfg, klass);
3131 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3132 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3134 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3135 g_assert (bzero_method);
3137 iargs [1] = size_ins;
3138 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3142 n = mono_class_value_size (klass, &align);
3144 if (n <= sizeof (gpointer) * 5) {
3145 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3148 memset_method = get_memset_method ();
3150 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3151 EMIT_NEW_ICONST (cfg, iargs [2], n);
3152 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3157 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3159 MonoInst *this = NULL;
3161 g_assert (cfg->generic_sharing_context);
3163 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3164 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3165 !method->klass->valuetype)
3166 EMIT_NEW_ARGLOAD (cfg, this, 0);
3168 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3169 MonoInst *mrgctx_loc, *mrgctx_var;
3172 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3174 mrgctx_loc = mono_get_vtable_var (cfg);
3175 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3178 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3179 MonoInst *vtable_loc, *vtable_var;
3183 vtable_loc = mono_get_vtable_var (cfg);
3184 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3186 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3187 MonoInst *mrgctx_var = vtable_var;
3190 vtable_reg = alloc_preg (cfg);
3191 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3192 vtable_var->type = STACK_PTR;
3200 vtable_reg = alloc_preg (cfg);
3201 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3206 static MonoJumpInfoRgctxEntry *
3207 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3209 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3210 res->method = method;
3211 res->in_mrgctx = in_mrgctx;
3212 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3213 res->data->type = patch_type;
3214 res->data->data.target = patch_data;
3215 res->info_type = info_type;
3220 static inline MonoInst*
3221 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3223 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3227 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3228 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3230 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3231 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3233 return emit_rgctx_fetch (cfg, rgctx, entry);
3237 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3238 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3240 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3241 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3243 return emit_rgctx_fetch (cfg, rgctx, entry);
3247 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3248 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3250 MonoJumpInfoGSharedVtCall *call_info;
3251 MonoJumpInfoRgctxEntry *entry;
3254 call_info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3255 call_info->sig = sig;
3256 call_info->method = cmethod;
3258 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3259 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3261 return emit_rgctx_fetch (cfg, rgctx, entry);
3266 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3267 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3269 MonoJumpInfoRgctxEntry *entry;
3272 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3273 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3275 return emit_rgctx_fetch (cfg, rgctx, entry);
3279 * emit_get_rgctx_method:
3281 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3282 * normal constants, else emit a load from the rgctx.
3285 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3286 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3288 if (!context_used) {
3291 switch (rgctx_type) {
3292 case MONO_RGCTX_INFO_METHOD:
3293 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3295 case MONO_RGCTX_INFO_METHOD_RGCTX:
3296 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3299 g_assert_not_reached ();
3302 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3303 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3305 return emit_rgctx_fetch (cfg, rgctx, entry);
3310 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3311 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3313 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3314 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3316 return emit_rgctx_fetch (cfg, rgctx, entry);
3320 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3322 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3323 MonoRuntimeGenericContextInfoTemplate *template;
3328 for (i = 0; i < info->entries->len; ++i) {
3329 MonoRuntimeGenericContextInfoTemplate *otemplate = g_ptr_array_index (info->entries, i);
3331 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3335 template = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate));
3336 template->info_type = rgctx_type;
3337 template->data = data;
3339 idx = info->entries->len;
3341 g_ptr_array_add (info->entries, template);
3347 * emit_get_gsharedvt_info:
3349 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3352 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3357 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3358 /* Load info->entries [idx] */
3359 dreg = alloc_preg (cfg);
3360 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3366 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3368 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3372 * On return the caller must check @klass for load errors.
3375 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3377 MonoInst *vtable_arg;
3381 context_used = mini_class_check_context_used (cfg, klass);
3384 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3385 klass, MONO_RGCTX_INFO_VTABLE);
3387 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3391 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3394 if (COMPILE_LLVM (cfg))
3395 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3397 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3398 #ifdef MONO_ARCH_VTABLE_REG
3399 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3400 cfg->uses_vtable_reg = TRUE;
3407 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc)
3411 if (cfg->gen_seq_points && cfg->method == method) {
3412 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3413 MONO_ADD_INS (cfg->cbb, ins);
3418 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
3420 if (mini_get_debug_options ()->better_cast_details) {
3421 int to_klass_reg = alloc_preg (cfg);
3422 int vtable_reg = alloc_preg (cfg);
3423 int klass_reg = alloc_preg (cfg);
3424 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3427 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3431 MONO_ADD_INS (cfg->cbb, tls_get);
3432 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3433 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3435 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3436 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3437 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3442 reset_cast_details (MonoCompile *cfg)
3444 /* Reset the variables holding the cast details */
3445 if (mini_get_debug_options ()->better_cast_details) {
3446 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3448 MONO_ADD_INS (cfg->cbb, tls_get);
3449 /* It is enough to reset the from field */
3450 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3455 * On return the caller must check @array_class for load errors
3458 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3460 int vtable_reg = alloc_preg (cfg);
3463 context_used = mini_class_check_context_used (cfg, array_class);
3465 save_cast_details (cfg, array_class, obj->dreg);
3467 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3469 if (cfg->opt & MONO_OPT_SHARED) {
3470 int class_reg = alloc_preg (cfg);
3471 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3472 if (cfg->compile_aot) {
3473 int klass_reg = alloc_preg (cfg);
3474 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3475 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3477 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3479 } else if (context_used) {
3480 MonoInst *vtable_ins;
3482 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3483 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3485 if (cfg->compile_aot) {
3489 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3491 vt_reg = alloc_preg (cfg);
3492 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3493 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3496 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3498 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3502 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3504 reset_cast_details (cfg);
3508 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3509 * generic code is generated.
3512 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3514 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3517 MonoInst *rgctx, *addr;
3519 /* FIXME: What if the class is shared? We might not
3520 have to get the address of the method from the
3522 addr = emit_get_rgctx_method (cfg, context_used, method,
3523 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3525 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3527 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3529 gboolean pass_vtable, pass_mrgctx;
3530 MonoInst *rgctx_arg = NULL;
3532 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3533 g_assert (!pass_mrgctx);
3536 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3539 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3542 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3547 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3551 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3552 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3553 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3554 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3556 obj_reg = sp [0]->dreg;
3557 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3558 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3560 /* FIXME: generics */
3561 g_assert (klass->rank == 0);
3564 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3565 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3567 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3568 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3571 MonoInst *element_class;
3573 /* This assertion is from the unboxcast insn */
3574 g_assert (klass->rank == 0);
3576 element_class = emit_get_rgctx_klass (cfg, context_used,
3577 klass->element_class, MONO_RGCTX_INFO_KLASS);
3579 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3580 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3582 save_cast_details (cfg, klass->element_class, obj_reg);
3583 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3584 reset_cast_details (cfg);
3587 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3588 MONO_ADD_INS (cfg->cbb, add);
3589 add->type = STACK_MP;
3596 handle_unbox_gsharedvt (MonoCompile *cfg, int context_used, MonoClass *klass, MonoInst *obj, MonoBasicBlock **out_cbb)
3598 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3599 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3603 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3609 args [1] = klass_inst;
3612 obj = mono_emit_jit_icall (cfg, mono_object_castclass, args);
3614 NEW_BBLOCK (cfg, is_ref_bb);
3615 NEW_BBLOCK (cfg, is_nullable_bb);
3616 NEW_BBLOCK (cfg, end_bb);
3617 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3618 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3619 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3621 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3622 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3624 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3625 addr_reg = alloc_dreg (cfg, STACK_MP);
3629 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3630 MONO_ADD_INS (cfg->cbb, addr);
3632 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3635 MONO_START_BB (cfg, is_ref_bb);
3637 /* Save the ref to a temporary */
3638 dreg = alloc_ireg (cfg);
3639 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3640 addr->dreg = addr_reg;
3641 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3642 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3645 MONO_START_BB (cfg, is_nullable_bb);
3648 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3649 MonoInst *unbox_call;
3650 MonoMethodSignature *unbox_sig;
3653 var = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
3655 unbox_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3656 unbox_sig->ret = &klass->byval_arg;
3657 unbox_sig->param_count = 1;
3658 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3659 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3661 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3662 addr->dreg = addr_reg;
3665 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3668 MONO_START_BB (cfg, end_bb);
3671 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3673 *out_cbb = cfg->cbb;
3679 * Returns NULL and set the cfg exception on error.
3682 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3684 MonoInst *iargs [2];
3690 MonoInst *iargs [2];
3692 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box);
3694 if (cfg->opt & MONO_OPT_SHARED)
3695 rgctx_info = MONO_RGCTX_INFO_KLASS;
3697 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3698 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3700 if (cfg->opt & MONO_OPT_SHARED) {
3701 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3703 alloc_ftn = mono_object_new;
3706 alloc_ftn = mono_object_new_specific;
3709 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED))
3710 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3712 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3715 if (cfg->opt & MONO_OPT_SHARED) {
3716 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3717 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3719 alloc_ftn = mono_object_new;
3720 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3721 /* This happens often in argument checking code, eg. throw new FooException... */
3722 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3723 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3724 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3726 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3727 MonoMethod *managed_alloc = NULL;
3731 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3732 cfg->exception_ptr = klass;
3736 #ifndef MONO_CROSS_COMPILE
3737 managed_alloc = mono_gc_get_managed_allocator (klass, for_box);
3740 if (managed_alloc) {
3741 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3742 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3744 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3746 guint32 lw = vtable->klass->instance_size;
3747 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3748 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3749 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3752 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3756 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3760 * Returns NULL and set the cfg exception on error.
3763 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoBasicBlock **out_cbb)
3765 MonoInst *alloc, *ins;
3767 *out_cbb = cfg->cbb;
3769 if (mono_class_is_nullable (klass)) {
3770 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3773 /* FIXME: What if the class is shared? We might not
3774 have to get the method address from the RGCTX. */
3775 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3776 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3777 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3779 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3781 gboolean pass_vtable, pass_mrgctx;
3782 MonoInst *rgctx_arg = NULL;
3784 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3785 g_assert (!pass_mrgctx);
3788 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3791 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3794 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3798 if (mini_is_gsharedvt_klass (cfg, klass)) {
3799 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3800 MonoInst *res, *is_ref, *src_var, *addr;
3803 dreg = alloc_ireg (cfg);
3805 NEW_BBLOCK (cfg, is_ref_bb);
3806 NEW_BBLOCK (cfg, is_nullable_bb);
3807 NEW_BBLOCK (cfg, end_bb);
3808 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3809 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3810 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3812 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3813 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3816 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3819 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3820 ins->opcode = OP_STOREV_MEMBASE;
3822 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
3823 res->type = STACK_OBJ;
3825 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3828 MONO_START_BB (cfg, is_ref_bb);
3829 addr_reg = alloc_ireg (cfg);
3831 /* val is a vtype, so has to load the value manually */
3832 src_var = get_vreg_to_inst (cfg, val->dreg);
3834 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
3835 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
3836 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
3837 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3840 MONO_START_BB (cfg, is_nullable_bb);
3843 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
3844 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
3846 MonoMethodSignature *box_sig;
3849 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
3850 * construct that method at JIT time, so have to do things by hand.
3852 box_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3853 box_sig->ret = &mono_defaults.object_class->byval_arg;
3854 box_sig->param_count = 1;
3855 box_sig->params [0] = &klass->byval_arg;
3856 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
3857 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
3858 res->type = STACK_OBJ;
3862 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3864 MONO_START_BB (cfg, end_bb);
3866 *out_cbb = cfg->cbb;
3870 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3874 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3881 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
3884 MonoGenericContainer *container;
3885 MonoGenericInst *ginst;
3887 if (klass->generic_class) {
3888 container = klass->generic_class->container_class->generic_container;
3889 ginst = klass->generic_class->context.class_inst;
3890 } else if (klass->generic_container && context_used) {
3891 container = klass->generic_container;
3892 ginst = container->context.class_inst;
3897 for (i = 0; i < container->type_argc; ++i) {
3899 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
3901 type = ginst->type_argv [i];
3902 if (mini_type_is_reference (cfg, type))
3908 // FIXME: This doesn't work yet (class libs tests fail?)
3909 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3912 * Returns NULL and set the cfg exception on error.
3915 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3917 MonoBasicBlock *is_null_bb;
3918 int obj_reg = src->dreg;
3919 int vtable_reg = alloc_preg (cfg);
3920 MonoInst *klass_inst = NULL;
3925 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
3926 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
3927 MonoInst *cache_ins;
3929 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3934 /* klass - it's the second element of the cache entry*/
3935 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3938 args [2] = cache_ins;
3940 return mono_emit_method_call (cfg, mono_castclass, args, NULL);
3943 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3946 NEW_BBLOCK (cfg, is_null_bb);
3948 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3949 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3951 save_cast_details (cfg, klass, obj_reg);
3953 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3954 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3955 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3957 int klass_reg = alloc_preg (cfg);
3959 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3961 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3962 /* the remoting code is broken, access the class for now */
3963 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3964 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3966 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3967 cfg->exception_ptr = klass;
3970 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3972 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3973 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3975 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3977 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3978 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3982 MONO_START_BB (cfg, is_null_bb);
3984 reset_cast_details (cfg);
3990 * Returns NULL and set the cfg exception on error.
3993 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3996 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3997 int obj_reg = src->dreg;
3998 int vtable_reg = alloc_preg (cfg);
3999 int res_reg = alloc_ireg_ref (cfg);
4000 MonoInst *klass_inst = NULL;
4005 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4006 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4007 MonoInst *cache_ins;
4009 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4014 /* klass - it's the second element of the cache entry*/
4015 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4018 args [2] = cache_ins;
4020 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4023 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4026 NEW_BBLOCK (cfg, is_null_bb);
4027 NEW_BBLOCK (cfg, false_bb);
4028 NEW_BBLOCK (cfg, end_bb);
4030 /* Do the assignment at the beginning, so the other assignment can be if converted */
4031 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4032 ins->type = STACK_OBJ;
4035 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4036 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4038 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4040 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4041 g_assert (!context_used);
4042 /* the is_null_bb target simply copies the input register to the output */
4043 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4045 int klass_reg = alloc_preg (cfg);
4048 int rank_reg = alloc_preg (cfg);
4049 int eclass_reg = alloc_preg (cfg);
4051 g_assert (!context_used);
4052 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4053 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4054 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4055 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4056 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
4057 if (klass->cast_class == mono_defaults.object_class) {
4058 int parent_reg = alloc_preg (cfg);
4059 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
4060 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4061 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4062 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4063 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4064 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4065 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4066 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4067 } else if (klass->cast_class == mono_defaults.enum_class) {
4068 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4069 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4070 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4071 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4073 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4074 /* Check that the object is a vector too */
4075 int bounds_reg = alloc_preg (cfg);
4076 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
4077 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4078 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4081 /* the is_null_bb target simply copies the input register to the output */
4082 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4084 } else if (mono_class_is_nullable (klass)) {
4085 g_assert (!context_used);
4086 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4087 /* the is_null_bb target simply copies the input register to the output */
4088 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4090 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4091 g_assert (!context_used);
4092 /* the remoting code is broken, access the class for now */
4093 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4094 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4096 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4097 cfg->exception_ptr = klass;
4100 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4102 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4103 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4105 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4106 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4108 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4109 /* the is_null_bb target simply copies the input register to the output */
4110 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4115 MONO_START_BB (cfg, false_bb);
4117 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4118 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4120 MONO_START_BB (cfg, is_null_bb);
4122 MONO_START_BB (cfg, end_bb);
4128 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4130 /* This opcode takes as input an object reference and a class, and returns:
4131 0) if the object is an instance of the class,
4132 1) if the object is not instance of the class,
4133 2) if the object is a proxy whose type cannot be determined */
4136 #ifndef DISABLE_REMOTING
4137 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4139 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4141 int obj_reg = src->dreg;
4142 int dreg = alloc_ireg (cfg);
4144 #ifndef DISABLE_REMOTING
4145 int klass_reg = alloc_preg (cfg);
4148 NEW_BBLOCK (cfg, true_bb);
4149 NEW_BBLOCK (cfg, false_bb);
4150 NEW_BBLOCK (cfg, end_bb);
4151 #ifndef DISABLE_REMOTING
4152 NEW_BBLOCK (cfg, false2_bb);
4153 NEW_BBLOCK (cfg, no_proxy_bb);
4156 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4157 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4159 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4160 #ifndef DISABLE_REMOTING
4161 NEW_BBLOCK (cfg, interface_fail_bb);
4164 tmp_reg = alloc_preg (cfg);
4165 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4166 #ifndef DISABLE_REMOTING
4167 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4168 MONO_START_BB (cfg, interface_fail_bb);
4169 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4171 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4173 tmp_reg = alloc_preg (cfg);
4174 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4175 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4176 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4178 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4181 #ifndef DISABLE_REMOTING
4182 tmp_reg = alloc_preg (cfg);
4183 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4184 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4186 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4187 tmp_reg = alloc_preg (cfg);
4188 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4189 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4191 tmp_reg = alloc_preg (cfg);
4192 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4193 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4194 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4196 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4197 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4199 MONO_START_BB (cfg, no_proxy_bb);
4201 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4203 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4207 MONO_START_BB (cfg, false_bb);
4209 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4210 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4212 #ifndef DISABLE_REMOTING
4213 MONO_START_BB (cfg, false2_bb);
4215 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4216 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4219 MONO_START_BB (cfg, true_bb);
4221 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4223 MONO_START_BB (cfg, end_bb);
4226 MONO_INST_NEW (cfg, ins, OP_ICONST);
4228 ins->type = STACK_I4;
4234 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4236 /* This opcode takes as input an object reference and a class, and returns:
4237 0) if the object is an instance of the class,
4238 1) if the object is a proxy whose type cannot be determined
4239 an InvalidCastException exception is thrown otherwhise*/
4242 #ifndef DISABLE_REMOTING
4243 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4245 MonoBasicBlock *ok_result_bb;
4247 int obj_reg = src->dreg;
4248 int dreg = alloc_ireg (cfg);
4249 int tmp_reg = alloc_preg (cfg);
4251 #ifndef DISABLE_REMOTING
4252 int klass_reg = alloc_preg (cfg);
4253 NEW_BBLOCK (cfg, end_bb);
4256 NEW_BBLOCK (cfg, ok_result_bb);
4258 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4259 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4261 save_cast_details (cfg, klass, obj_reg);
4263 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4264 #ifndef DISABLE_REMOTING
4265 NEW_BBLOCK (cfg, interface_fail_bb);
4267 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4268 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4269 MONO_START_BB (cfg, interface_fail_bb);
4270 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4272 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4274 tmp_reg = alloc_preg (cfg);
4275 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4276 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4277 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4279 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4280 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4282 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4283 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4284 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4287 #ifndef DISABLE_REMOTING
4288 NEW_BBLOCK (cfg, no_proxy_bb);
4290 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4291 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4292 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4294 tmp_reg = alloc_preg (cfg);
4295 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4296 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4298 tmp_reg = alloc_preg (cfg);
4299 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4300 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4301 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4303 NEW_BBLOCK (cfg, fail_1_bb);
4305 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4307 MONO_START_BB (cfg, fail_1_bb);
4309 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4310 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4312 MONO_START_BB (cfg, no_proxy_bb);
4314 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4316 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4320 MONO_START_BB (cfg, ok_result_bb);
4322 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4324 #ifndef DISABLE_REMOTING
4325 MONO_START_BB (cfg, end_bb);
4329 MONO_INST_NEW (cfg, ins, OP_ICONST);
4331 ins->type = STACK_I4;
4337 * Returns NULL and set the cfg exception on error.
4339 static G_GNUC_UNUSED MonoInst*
4340 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
4344 gpointer *trampoline;
4345 MonoInst *obj, *method_ins, *tramp_ins;
4349 obj = handle_alloc (cfg, klass, FALSE, 0);
4353 /* Inline the contents of mono_delegate_ctor */
4355 /* Set target field */
4356 /* Optimize away setting of NULL target */
4357 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
4358 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4359 if (cfg->gen_write_barriers) {
4360 dreg = alloc_preg (cfg);
4361 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target));
4362 emit_write_barrier (cfg, ptr, target);
4366 /* Set method field */
4367 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4368 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4369 if (cfg->gen_write_barriers) {
4370 dreg = alloc_preg (cfg);
4371 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method));
4372 emit_write_barrier (cfg, ptr, method_ins);
4375 * To avoid looking up the compiled code belonging to the target method
4376 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4377 * store it, and we fill it after the method has been compiled.
4379 if (!cfg->compile_aot && !method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4380 MonoInst *code_slot_ins;
4383 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4385 domain = mono_domain_get ();
4386 mono_domain_lock (domain);
4387 if (!domain_jit_info (domain)->method_code_hash)
4388 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4389 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4391 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
4392 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4394 mono_domain_unlock (domain);
4396 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
4398 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4401 /* Set invoke_impl field */
4402 if (cfg->compile_aot) {
4403 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
4405 trampoline = mono_create_delegate_trampoline (cfg->domain, klass);
4406 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4408 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4410 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4416 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4418 MonoJitICallInfo *info;
4420 /* Need to register the icall so it gets an icall wrapper */
4421 info = mono_get_array_new_va_icall (rank);
4423 cfg->flags |= MONO_CFG_HAS_VARARGS;
4425 /* mono_array_new_va () needs a vararg calling convention */
4426 cfg->disable_llvm = TRUE;
4428 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4429 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4433 mono_emit_load_got_addr (MonoCompile *cfg)
4435 MonoInst *getaddr, *dummy_use;
4437 if (!cfg->got_var || cfg->got_var_allocated)
4440 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
4441 getaddr->cil_code = cfg->header->code;
4442 getaddr->dreg = cfg->got_var->dreg;
4444 /* Add it to the start of the first bblock */
4445 if (cfg->bb_entry->code) {
4446 getaddr->next = cfg->bb_entry->code;
4447 cfg->bb_entry->code = getaddr;
4450 MONO_ADD_INS (cfg->bb_entry, getaddr);
4452 cfg->got_var_allocated = TRUE;
4455 * Add a dummy use to keep the got_var alive, since real uses might
4456 * only be generated by the back ends.
4457 * Add it to end_bblock, so the variable's lifetime covers the whole
4459 * It would be better to make the usage of the got var explicit in all
4460 * cases when the backend needs it (i.e. calls, throw etc.), so this
4461 * wouldn't be needed.
4463 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
4464 MONO_ADD_INS (cfg->bb_exit, dummy_use);
4467 static int inline_limit;
4468 static gboolean inline_limit_inited;
4471 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
4473 MonoMethodHeaderSummary header;
4475 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4476 MonoMethodSignature *sig = mono_method_signature (method);
4480 if (cfg->generic_sharing_context)
4483 if (cfg->inline_depth > 10)
4486 #ifdef MONO_ARCH_HAVE_LMF_OPS
4487 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
4488 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
4489 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
4494 if (!mono_method_get_header_summary (method, &header))
4497 /*runtime, icall and pinvoke are checked by summary call*/
4498 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
4499 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
4500 (mono_class_is_marshalbyref (method->klass)) ||
4504 /* also consider num_locals? */
4505 /* Do the size check early to avoid creating vtables */
4506 if (!inline_limit_inited) {
4507 if (getenv ("MONO_INLINELIMIT"))
4508 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
4510 inline_limit = INLINE_LENGTH_LIMIT;
4511 inline_limit_inited = TRUE;
4513 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
4517 * if we can initialize the class of the method right away, we do,
4518 * otherwise we don't allow inlining if the class needs initialization,
4519 * since it would mean inserting a call to mono_runtime_class_init()
4520 * inside the inlined code
4522 if (!(cfg->opt & MONO_OPT_SHARED)) {
4523 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
4524 if (cfg->run_cctors && method->klass->has_cctor) {
4525 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
4526 if (!method->klass->runtime_info)
4527 /* No vtable created yet */
4529 vtable = mono_class_vtable (cfg->domain, method->klass);
4532 /* This makes so that inline cannot trigger */
4533 /* .cctors: too many apps depend on them */
4534 /* running with a specific order... */
4535 if (! vtable->initialized)
4537 mono_runtime_class_init (vtable);
4539 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
4540 if (!method->klass->runtime_info)
4541 /* No vtable created yet */
4543 vtable = mono_class_vtable (cfg->domain, method->klass);
4546 if (!vtable->initialized)
4551 * If we're compiling for shared code
4552 * the cctor will need to be run at aot method load time, for example,
4553 * or at the end of the compilation of the inlining method.
4555 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
4560 * CAS - do not inline methods with declarative security
4561 * Note: this has to be before any possible return TRUE;
4563 if (mono_security_method_has_declsec (method))
4566 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4567 if (mono_arch_is_soft_float ()) {
4569 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4571 for (i = 0; i < sig->param_count; ++i)
4572 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4581 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
4583 if (vtable->initialized && !cfg->compile_aot)
4586 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
4589 if (!mono_class_needs_cctor_run (vtable->klass, method))
4592 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
4593 /* The initialization is already done before the method is called */
4600 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4604 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4607 if (mini_is_gsharedvt_klass (cfg, klass)) {
4610 mono_class_init (klass);
4611 size = mono_class_array_element_size (klass);
4614 mult_reg = alloc_preg (cfg);
4615 array_reg = arr->dreg;
4616 index_reg = index->dreg;
4618 #if SIZEOF_REGISTER == 8
4619 /* The array reg is 64 bits but the index reg is only 32 */
4620 if (COMPILE_LLVM (cfg)) {
4622 index2_reg = index_reg;
4624 index2_reg = alloc_preg (cfg);
4625 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4628 if (index->type == STACK_I8) {
4629 index2_reg = alloc_preg (cfg);
4630 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4632 index2_reg = index_reg;
4637 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4639 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4640 if (size == 1 || size == 2 || size == 4 || size == 8) {
4641 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4643 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
4644 ins->klass = mono_class_get_element_class (klass);
4645 ins->type = STACK_MP;
4651 add_reg = alloc_ireg_mp (cfg);
4654 MonoInst *rgctx_ins;
4657 g_assert (cfg->generic_sharing_context);
4658 context_used = mini_class_check_context_used (cfg, klass);
4659 g_assert (context_used);
4660 rgctx_ins = emit_get_gsharedvt_info (cfg, &klass->byval_arg, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
4661 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
4663 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4665 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4666 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4667 ins->klass = mono_class_get_element_class (klass);
4668 ins->type = STACK_MP;
4669 MONO_ADD_INS (cfg->cbb, ins);
4674 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4676 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4678 int bounds_reg = alloc_preg (cfg);
4679 int add_reg = alloc_ireg_mp (cfg);
4680 int mult_reg = alloc_preg (cfg);
4681 int mult2_reg = alloc_preg (cfg);
4682 int low1_reg = alloc_preg (cfg);
4683 int low2_reg = alloc_preg (cfg);
4684 int high1_reg = alloc_preg (cfg);
4685 int high2_reg = alloc_preg (cfg);
4686 int realidx1_reg = alloc_preg (cfg);
4687 int realidx2_reg = alloc_preg (cfg);
4688 int sum_reg = alloc_preg (cfg);
4689 int index1, index2, tmpreg;
4693 mono_class_init (klass);
4694 size = mono_class_array_element_size (klass);
4696 index1 = index_ins1->dreg;
4697 index2 = index_ins2->dreg;
4699 #if SIZEOF_REGISTER == 8
4700 /* The array reg is 64 bits but the index reg is only 32 */
4701 if (COMPILE_LLVM (cfg)) {
4704 tmpreg = alloc_preg (cfg);
4705 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
4707 tmpreg = alloc_preg (cfg);
4708 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
4712 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
4716 /* range checking */
4717 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4718 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4720 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4721 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4722 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4723 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4724 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4725 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4726 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4728 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4729 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4730 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4731 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4732 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
4733 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4734 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4736 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4737 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4738 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4739 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4740 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4742 ins->type = STACK_MP;
4744 MONO_ADD_INS (cfg->cbb, ins);
4751 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4755 MonoMethod *addr_method;
4758 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4761 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
4763 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4764 /* emit_ldelema_2 depends on OP_LMUL */
4765 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
4766 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
4770 element_size = mono_class_array_element_size (cmethod->klass->element_class);
4771 addr_method = mono_marshal_get_array_address (rank, element_size);
4772 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4777 static MonoBreakPolicy
4778 always_insert_breakpoint (MonoMethod *method)
4780 return MONO_BREAK_POLICY_ALWAYS;
4783 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4786 * mono_set_break_policy:
4787 * policy_callback: the new callback function
4789 * Allow embedders to decide wherther to actually obey breakpoint instructions
4790 * (both break IL instructions and Debugger.Break () method calls), for example
4791 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4792 * untrusted or semi-trusted code.
4794 * @policy_callback will be called every time a break point instruction needs to
4795 * be inserted with the method argument being the method that calls Debugger.Break()
4796 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4797 * if it wants the breakpoint to not be effective in the given method.
4798 * #MONO_BREAK_POLICY_ALWAYS is the default.
4801 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4803 if (policy_callback)
4804 break_policy_func = policy_callback;
4806 break_policy_func = always_insert_breakpoint;
4810 should_insert_brekpoint (MonoMethod *method) {
4811 switch (break_policy_func (method)) {
4812 case MONO_BREAK_POLICY_ALWAYS:
4814 case MONO_BREAK_POLICY_NEVER:
4816 case MONO_BREAK_POLICY_ON_DBG:
4817 return mono_debug_using_mono_debugger ();
4819 g_warning ("Incorrect value returned from break policy callback");
4824 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4826 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4828 MonoInst *addr, *store, *load;
4829 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4831 /* the bounds check is already done by the callers */
4832 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4834 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4835 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4836 if (mini_type_is_reference (cfg, fsig->params [2]))
4837 emit_write_barrier (cfg, addr, load);
4839 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4840 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4847 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4849 return mini_type_is_reference (cfg, &klass->byval_arg);
4853 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
4855 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
4856 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
4857 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
4858 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
4859 MonoInst *iargs [3];
4862 mono_class_setup_vtable (obj_array);
4863 g_assert (helper->slot);
4865 if (sp [0]->type != STACK_OBJ)
4867 if (sp [2]->type != STACK_OBJ)
4874 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
4878 if (mini_is_gsharedvt_klass (cfg, klass)) {
4881 // FIXME-VT: OP_ICONST optimization
4882 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
4883 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
4884 ins->opcode = OP_STOREV_MEMBASE;
4885 } else if (sp [1]->opcode == OP_ICONST) {
4886 int array_reg = sp [0]->dreg;
4887 int index_reg = sp [1]->dreg;
4888 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
4891 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
4892 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
4894 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
4895 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
4896 if (generic_class_is_reference_type (cfg, klass))
4897 emit_write_barrier (cfg, addr, sp [2]);
4904 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4909 eklass = mono_class_from_mono_type (fsig->params [2]);
4911 eklass = mono_class_from_mono_type (fsig->ret);
4915 return emit_array_store (cfg, eklass, args, FALSE);
4917 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4918 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
4924 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4926 MonoInst *ins = NULL;
4927 #ifdef MONO_ARCH_SIMD_INTRINSICS
4928 if (cfg->opt & MONO_OPT_SIMD) {
4929 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4939 emit_memory_barrier (MonoCompile *cfg, int kind)
4941 MonoInst *ins = NULL;
4942 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4943 MONO_ADD_INS (cfg->cbb, ins);
4944 ins->backend.memory_barrier_kind = kind;
4950 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4952 MonoInst *ins = NULL;
4955 /* The LLVM backend supports these intrinsics */
4956 if (cmethod->klass == mono_defaults.math_class) {
4957 if (strcmp (cmethod->name, "Sin") == 0) {
4959 } else if (strcmp (cmethod->name, "Cos") == 0) {
4961 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
4963 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
4968 MONO_INST_NEW (cfg, ins, opcode);
4969 ins->type = STACK_R8;
4970 ins->dreg = mono_alloc_freg (cfg);
4971 ins->sreg1 = args [0]->dreg;
4972 MONO_ADD_INS (cfg->cbb, ins);
4976 if (cfg->opt & MONO_OPT_CMOV) {
4977 if (strcmp (cmethod->name, "Min") == 0) {
4978 if (fsig->params [0]->type == MONO_TYPE_I4)
4980 if (fsig->params [0]->type == MONO_TYPE_U4)
4981 opcode = OP_IMIN_UN;
4982 else if (fsig->params [0]->type == MONO_TYPE_I8)
4984 else if (fsig->params [0]->type == MONO_TYPE_U8)
4985 opcode = OP_LMIN_UN;
4986 } else if (strcmp (cmethod->name, "Max") == 0) {
4987 if (fsig->params [0]->type == MONO_TYPE_I4)
4989 if (fsig->params [0]->type == MONO_TYPE_U4)
4990 opcode = OP_IMAX_UN;
4991 else if (fsig->params [0]->type == MONO_TYPE_I8)
4993 else if (fsig->params [0]->type == MONO_TYPE_U8)
4994 opcode = OP_LMAX_UN;
4999 MONO_INST_NEW (cfg, ins, opcode);
5000 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5001 ins->dreg = mono_alloc_ireg (cfg);
5002 ins->sreg1 = args [0]->dreg;
5003 ins->sreg2 = args [1]->dreg;
5004 MONO_ADD_INS (cfg->cbb, ins);
5012 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5014 if (cmethod->klass == mono_defaults.array_class) {
5015 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5016 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5017 if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5018 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5025 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5027 MonoInst *ins = NULL;
5029 static MonoClass *runtime_helpers_class = NULL;
5030 if (! runtime_helpers_class)
5031 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
5032 "System.Runtime.CompilerServices", "RuntimeHelpers");
5034 if (cmethod->klass == mono_defaults.string_class) {
5035 if (strcmp (cmethod->name, "get_Chars") == 0) {
5036 int dreg = alloc_ireg (cfg);
5037 int index_reg = alloc_preg (cfg);
5038 int mult_reg = alloc_preg (cfg);
5039 int add_reg = alloc_preg (cfg);
5041 #if SIZEOF_REGISTER == 8
5042 /* The array reg is 64 bits but the index reg is only 32 */
5043 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5045 index_reg = args [1]->dreg;
5047 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5049 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5050 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
5051 add_reg = ins->dreg;
5052 /* Avoid a warning */
5054 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5057 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5058 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5059 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5060 add_reg, G_STRUCT_OFFSET (MonoString, chars));
5062 type_from_op (ins, NULL, NULL);
5064 } else if (strcmp (cmethod->name, "get_Length") == 0) {
5065 int dreg = alloc_ireg (cfg);
5066 /* Decompose later to allow more optimizations */
5067 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5068 ins->type = STACK_I4;
5069 ins->flags |= MONO_INST_FAULT;
5070 cfg->cbb->has_array_access = TRUE;
5071 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5074 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
5075 int mult_reg = alloc_preg (cfg);
5076 int add_reg = alloc_preg (cfg);
5078 /* The corlib functions check for oob already. */
5079 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
5080 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5081 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
5082 return cfg->cbb->last_ins;
5085 } else if (cmethod->klass == mono_defaults.object_class) {
5087 if (strcmp (cmethod->name, "GetType") == 0) {
5088 int dreg = alloc_ireg_ref (cfg);
5089 int vt_reg = alloc_preg (cfg);
5090 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
5091 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
5092 type_from_op (ins, NULL, NULL);
5095 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
5096 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
5097 int dreg = alloc_ireg (cfg);
5098 int t1 = alloc_ireg (cfg);
5100 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5101 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5102 ins->type = STACK_I4;
5106 } else if (strcmp (cmethod->name, ".ctor") == 0) {
5107 MONO_INST_NEW (cfg, ins, OP_NOP);
5108 MONO_ADD_INS (cfg->cbb, ins);
5112 } else if (cmethod->klass == mono_defaults.array_class) {
5113 if (!cfg->gsharedvt && strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
5114 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
5116 #ifndef MONO_BIG_ARRAYS
5118 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5121 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5122 int dreg = alloc_ireg (cfg);
5123 int bounds_reg = alloc_ireg_mp (cfg);
5124 MonoBasicBlock *end_bb, *szarray_bb;
5125 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5127 NEW_BBLOCK (cfg, end_bb);
5128 NEW_BBLOCK (cfg, szarray_bb);
5130 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5131 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
5132 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5133 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5134 /* Non-szarray case */
5136 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5137 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
5139 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5140 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5141 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5142 MONO_START_BB (cfg, szarray_bb);
5145 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5146 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
5148 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5149 MONO_START_BB (cfg, end_bb);
5151 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5152 ins->type = STACK_I4;
5158 if (cmethod->name [0] != 'g')
5161 if (strcmp (cmethod->name, "get_Rank") == 0) {
5162 int dreg = alloc_ireg (cfg);
5163 int vtable_reg = alloc_preg (cfg);
5164 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5165 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
5166 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5167 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
5168 type_from_op (ins, NULL, NULL);
5171 } else if (strcmp (cmethod->name, "get_Length") == 0) {
5172 int dreg = alloc_ireg (cfg);
5174 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5175 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
5176 type_from_op (ins, NULL, NULL);
5181 } else if (cmethod->klass == runtime_helpers_class) {
5183 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
5184 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
5188 } else if (cmethod->klass == mono_defaults.thread_class) {
5189 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
5190 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5191 MONO_ADD_INS (cfg->cbb, ins);
5193 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
5194 return emit_memory_barrier (cfg, FullBarrier);
5196 } else if (cmethod->klass == mono_defaults.monitor_class) {
5198 /* FIXME this should be integrated to the check below once we support the trampoline version */
5199 #if defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
5200 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) {
5201 MonoMethod *fast_method = NULL;
5203 /* Avoid infinite recursion */
5204 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN && !strcmp (cfg->method->name, "FastMonitorEnterV4"))
5207 fast_method = mono_monitor_get_fast_path (cmethod);
5211 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
5215 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
5216 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
5219 if (COMPILE_LLVM (cfg)) {
5221 * Pass the argument normally, the LLVM backend will handle the
5222 * calling convention problems.
5224 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5226 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
5227 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5228 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5229 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5232 return (MonoInst*)call;
5233 } else if (strcmp (cmethod->name, "Exit") == 0) {
5236 if (COMPILE_LLVM (cfg)) {
5237 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5239 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
5240 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5241 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5242 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5245 return (MonoInst*)call;
5247 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
5249 MonoMethod *fast_method = NULL;
5251 /* Avoid infinite recursion */
5252 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
5253 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
5254 strcmp (cfg->method->name, "FastMonitorExit") == 0))
5257 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) ||
5258 strcmp (cmethod->name, "Exit") == 0)
5259 fast_method = mono_monitor_get_fast_path (cmethod);
5263 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
5266 } else if (cmethod->klass->image == mono_defaults.corlib &&
5267 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5268 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
5271 #if SIZEOF_REGISTER == 8
5272 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
5273 /* 64 bit reads are already atomic */
5274 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
5275 ins->dreg = mono_alloc_preg (cfg);
5276 ins->inst_basereg = args [0]->dreg;
5277 ins->inst_offset = 0;
5278 MONO_ADD_INS (cfg->cbb, ins);
5282 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
5283 if (strcmp (cmethod->name, "Increment") == 0) {
5284 MonoInst *ins_iconst;
5287 if (fsig->params [0]->type == MONO_TYPE_I4)
5288 opcode = OP_ATOMIC_ADD_NEW_I4;
5289 #if SIZEOF_REGISTER == 8
5290 else if (fsig->params [0]->type == MONO_TYPE_I8)
5291 opcode = OP_ATOMIC_ADD_NEW_I8;
5294 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5295 ins_iconst->inst_c0 = 1;
5296 ins_iconst->dreg = mono_alloc_ireg (cfg);
5297 MONO_ADD_INS (cfg->cbb, ins_iconst);
5299 MONO_INST_NEW (cfg, ins, opcode);
5300 ins->dreg = mono_alloc_ireg (cfg);
5301 ins->inst_basereg = args [0]->dreg;
5302 ins->inst_offset = 0;
5303 ins->sreg2 = ins_iconst->dreg;
5304 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5305 MONO_ADD_INS (cfg->cbb, ins);
5307 } else if (strcmp (cmethod->name, "Decrement") == 0) {
5308 MonoInst *ins_iconst;
5311 if (fsig->params [0]->type == MONO_TYPE_I4)
5312 opcode = OP_ATOMIC_ADD_NEW_I4;
5313 #if SIZEOF_REGISTER == 8
5314 else if (fsig->params [0]->type == MONO_TYPE_I8)
5315 opcode = OP_ATOMIC_ADD_NEW_I8;
5318 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5319 ins_iconst->inst_c0 = -1;
5320 ins_iconst->dreg = mono_alloc_ireg (cfg);
5321 MONO_ADD_INS (cfg->cbb, ins_iconst);
5323 MONO_INST_NEW (cfg, ins, opcode);
5324 ins->dreg = mono_alloc_ireg (cfg);
5325 ins->inst_basereg = args [0]->dreg;
5326 ins->inst_offset = 0;
5327 ins->sreg2 = ins_iconst->dreg;
5328 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5329 MONO_ADD_INS (cfg->cbb, ins);
5331 } else if (strcmp (cmethod->name, "Add") == 0) {
5334 if (fsig->params [0]->type == MONO_TYPE_I4)
5335 opcode = OP_ATOMIC_ADD_NEW_I4;
5336 #if SIZEOF_REGISTER == 8
5337 else if (fsig->params [0]->type == MONO_TYPE_I8)
5338 opcode = OP_ATOMIC_ADD_NEW_I8;
5342 MONO_INST_NEW (cfg, ins, opcode);
5343 ins->dreg = mono_alloc_ireg (cfg);
5344 ins->inst_basereg = args [0]->dreg;
5345 ins->inst_offset = 0;
5346 ins->sreg2 = args [1]->dreg;
5347 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5348 MONO_ADD_INS (cfg->cbb, ins);
5351 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
5353 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
5354 if (strcmp (cmethod->name, "Exchange") == 0) {
5356 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
5358 if (fsig->params [0]->type == MONO_TYPE_I4)
5359 opcode = OP_ATOMIC_EXCHANGE_I4;
5360 #if SIZEOF_REGISTER == 8
5361 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
5362 (fsig->params [0]->type == MONO_TYPE_I))
5363 opcode = OP_ATOMIC_EXCHANGE_I8;
5365 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
5366 opcode = OP_ATOMIC_EXCHANGE_I4;
5371 MONO_INST_NEW (cfg, ins, opcode);
5372 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
5373 ins->inst_basereg = args [0]->dreg;
5374 ins->inst_offset = 0;
5375 ins->sreg2 = args [1]->dreg;
5376 MONO_ADD_INS (cfg->cbb, ins);
5378 switch (fsig->params [0]->type) {
5380 ins->type = STACK_I4;
5384 ins->type = STACK_I8;
5386 case MONO_TYPE_OBJECT:
5387 ins->type = STACK_OBJ;
5390 g_assert_not_reached ();
5393 if (cfg->gen_write_barriers && is_ref)
5394 emit_write_barrier (cfg, args [0], args [1]);
5396 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
5398 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
5399 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
5401 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
5402 if (fsig->params [1]->type == MONO_TYPE_I4)
5404 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
5405 size = sizeof (gpointer);
5406 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
5409 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
5410 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5411 ins->sreg1 = args [0]->dreg;
5412 ins->sreg2 = args [1]->dreg;
5413 ins->sreg3 = args [2]->dreg;
5414 ins->type = STACK_I4;
5415 MONO_ADD_INS (cfg->cbb, ins);
5416 } else if (size == 8) {
5417 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
5418 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5419 ins->sreg1 = args [0]->dreg;
5420 ins->sreg2 = args [1]->dreg;
5421 ins->sreg3 = args [2]->dreg;
5422 ins->type = STACK_I8;
5423 MONO_ADD_INS (cfg->cbb, ins);
5425 /* g_assert_not_reached (); */
5427 if (cfg->gen_write_barriers && is_ref)
5428 emit_write_barrier (cfg, args [0], args [1]);
5430 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
5432 if (strcmp (cmethod->name, "MemoryBarrier") == 0)
5433 ins = emit_memory_barrier (cfg, FullBarrier);
5437 } else if (cmethod->klass->image == mono_defaults.corlib) {
5438 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
5439 && strcmp (cmethod->klass->name, "Debugger") == 0) {
5440 if (should_insert_brekpoint (cfg->method)) {
5441 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
5443 MONO_INST_NEW (cfg, ins, OP_NOP);
5444 MONO_ADD_INS (cfg->cbb, ins);
5448 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
5449 && strcmp (cmethod->klass->name, "Environment") == 0) {
5451 EMIT_NEW_ICONST (cfg, ins, 1);
5453 EMIT_NEW_ICONST (cfg, ins, 0);
5457 } else if (cmethod->klass == mono_defaults.math_class) {
5459 * There is general branches code for Min/Max, but it does not work for
5461 * http://everything2.com/?node_id=1051618
5465 #ifdef MONO_ARCH_SIMD_INTRINSICS
5466 if (cfg->opt & MONO_OPT_SIMD) {
5467 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5473 if (COMPILE_LLVM (cfg)) {
5474 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
5479 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
5483 * This entry point could be used later for arbitrary method
5486 inline static MonoInst*
5487 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
5488 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
5490 if (method->klass == mono_defaults.string_class) {
5491 /* managed string allocation support */
5492 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
5493 MonoInst *iargs [2];
5494 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
5495 MonoMethod *managed_alloc = NULL;
5497 g_assert (vtable); /*Should not fail since it System.String*/
5498 #ifndef MONO_CROSS_COMPILE
5499 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE);
5503 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
5504 iargs [1] = args [0];
5505 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
5512 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
5514 MonoInst *store, *temp;
5517 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
5518 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
5521 * FIXME: We should use *args++ = sp [0], but that would mean the arg
5522 * would be different than the MonoInst's used to represent arguments, and
5523 * the ldelema implementation can't deal with that.
5524 * Solution: When ldelema is used on an inline argument, create a var for
5525 * it, emit ldelema on that var, and emit the saving code below in
5526 * inline_method () if needed.
5528 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
5529 cfg->args [i] = temp;
5530 /* This uses cfg->args [i] which is set by the preceeding line */
5531 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
5532 store->cil_code = sp [0]->cil_code;
5537 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
5538 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
5540 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5542 check_inline_called_method_name_limit (MonoMethod *called_method)
5545 static char *limit = NULL;
5547 if (limit == NULL) {
5548 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
5550 if (limit_string != NULL)
5551 limit = limit_string;
5553 limit = (char *) "";
5556 if (limit [0] != '\0') {
5557 char *called_method_name = mono_method_full_name (called_method, TRUE);
5559 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
5560 g_free (called_method_name);
5562 //return (strncmp_result <= 0);
5563 return (strncmp_result == 0);
5570 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5572 check_inline_caller_method_name_limit (MonoMethod *caller_method)
5575 static char *limit = NULL;
5577 if (limit == NULL) {
5578 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
5579 if (limit_string != NULL) {
5580 limit = limit_string;
5582 limit = (char *) "";
5586 if (limit [0] != '\0') {
5587 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
5589 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
5590 g_free (caller_method_name);
5592 //return (strncmp_result <= 0);
5593 return (strncmp_result == 0);
5601 emit_init_rvar (MonoCompile *cfg, MonoInst *rvar, MonoType *rtype)
5603 static double r8_0 = 0.0;
5606 switch (rvar->type) {
5608 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
5611 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
5616 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
5619 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5620 ins->type = STACK_R8;
5621 ins->inst_p0 = (void*)&r8_0;
5622 ins->dreg = rvar->dreg;
5623 MONO_ADD_INS (cfg->cbb, ins);
5626 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (rtype));
5629 g_assert_not_reached ();
5634 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
5635 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_always)
5637 MonoInst *ins, *rvar = NULL;
5638 MonoMethodHeader *cheader;
5639 MonoBasicBlock *ebblock, *sbblock;
5641 MonoMethod *prev_inlined_method;
5642 MonoInst **prev_locals, **prev_args;
5643 MonoType **prev_arg_types;
5644 guint prev_real_offset;
5645 GHashTable *prev_cbb_hash;
5646 MonoBasicBlock **prev_cil_offset_to_bb;
5647 MonoBasicBlock *prev_cbb;
5648 unsigned char* prev_cil_start;
5649 guint32 prev_cil_offset_to_bb_len;
5650 MonoMethod *prev_current_method;
5651 MonoGenericContext *prev_generic_context;
5652 gboolean ret_var_set, prev_ret_var_set, virtual = FALSE;
5654 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
5656 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5657 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
5660 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5661 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
5665 if (cfg->verbose_level > 2)
5666 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
5668 if (!cmethod->inline_info) {
5669 cfg->stat_inlineable_methods++;
5670 cmethod->inline_info = 1;
5673 /* allocate local variables */
5674 cheader = mono_method_get_header (cmethod);
5676 if (cheader == NULL || mono_loader_get_last_error ()) {
5677 MonoLoaderError *error = mono_loader_get_last_error ();
5680 mono_metadata_free_mh (cheader);
5681 if (inline_always && error)
5682 mono_cfg_set_exception (cfg, error->exception_type);
5684 mono_loader_clear_error ();
5688 /*Must verify before creating locals as it can cause the JIT to assert.*/
5689 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
5690 mono_metadata_free_mh (cheader);
5694 /* allocate space to store the return value */
5695 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
5696 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
5699 prev_locals = cfg->locals;
5700 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
5701 for (i = 0; i < cheader->num_locals; ++i)
5702 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
5704 /* allocate start and end blocks */
5705 /* This is needed so if the inline is aborted, we can clean up */
5706 NEW_BBLOCK (cfg, sbblock);
5707 sbblock->real_offset = real_offset;
5709 NEW_BBLOCK (cfg, ebblock);
5710 ebblock->block_num = cfg->num_bblocks++;
5711 ebblock->real_offset = real_offset;
5713 prev_args = cfg->args;
5714 prev_arg_types = cfg->arg_types;
5715 prev_inlined_method = cfg->inlined_method;
5716 cfg->inlined_method = cmethod;
5717 cfg->ret_var_set = FALSE;
5718 cfg->inline_depth ++;
5719 prev_real_offset = cfg->real_offset;
5720 prev_cbb_hash = cfg->cbb_hash;
5721 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
5722 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
5723 prev_cil_start = cfg->cil_start;
5724 prev_cbb = cfg->cbb;
5725 prev_current_method = cfg->current_method;
5726 prev_generic_context = cfg->generic_context;
5727 prev_ret_var_set = cfg->ret_var_set;
5729 if (*ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
5732 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, virtual);
5734 ret_var_set = cfg->ret_var_set;
5736 cfg->inlined_method = prev_inlined_method;
5737 cfg->real_offset = prev_real_offset;
5738 cfg->cbb_hash = prev_cbb_hash;
5739 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
5740 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
5741 cfg->cil_start = prev_cil_start;
5742 cfg->locals = prev_locals;
5743 cfg->args = prev_args;
5744 cfg->arg_types = prev_arg_types;
5745 cfg->current_method = prev_current_method;
5746 cfg->generic_context = prev_generic_context;
5747 cfg->ret_var_set = prev_ret_var_set;
5748 cfg->inline_depth --;
5750 if ((costs >= 0 && costs < 60) || inline_always) {
5751 if (cfg->verbose_level > 2)
5752 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
5754 cfg->stat_inlined_methods++;
5756 /* always add some code to avoid block split failures */
5757 MONO_INST_NEW (cfg, ins, OP_NOP);
5758 MONO_ADD_INS (prev_cbb, ins);
5760 prev_cbb->next_bb = sbblock;
5761 link_bblock (cfg, prev_cbb, sbblock);
5764 * Get rid of the begin and end bblocks if possible to aid local
5767 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
5769 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
5770 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
5772 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
5773 MonoBasicBlock *prev = ebblock->in_bb [0];
5774 mono_merge_basic_blocks (cfg, prev, ebblock);
5776 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
5777 mono_merge_basic_blocks (cfg, prev_cbb, prev);
5778 cfg->cbb = prev_cbb;
5782 * Its possible that the rvar is set in some prev bblock, but not in others.
5788 for (i = 0; i < ebblock->in_count; ++i) {
5789 bb = ebblock->in_bb [i];
5791 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
5794 emit_init_rvar (cfg, rvar, fsig->ret);
5804 * If the inlined method contains only a throw, then the ret var is not
5805 * set, so set it to a dummy value.
5808 emit_init_rvar (cfg, rvar, fsig->ret);
5810 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
5813 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5816 if (cfg->verbose_level > 2)
5817 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
5818 cfg->exception_type = MONO_EXCEPTION_NONE;
5819 mono_loader_clear_error ();
5821 /* This gets rid of the newly added bblocks */
5822 cfg->cbb = prev_cbb;
5824 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5829 * Some of these comments may well be out-of-date.
5830 * Design decisions: we do a single pass over the IL code (and we do bblock
5831 * splitting/merging in the few cases when it's required: a back jump to an IL
5832 * address that was not already seen as bblock starting point).
5833 * Code is validated as we go (full verification is still better left to metadata/verify.c).
5834 * Complex operations are decomposed in simpler ones right away. We need to let the
5835 * arch-specific code peek and poke inside this process somehow (except when the
5836 * optimizations can take advantage of the full semantic info of coarse opcodes).
5837 * All the opcodes of the form opcode.s are 'normalized' to opcode.
5838 * MonoInst->opcode initially is the IL opcode or some simplification of that
5839 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
5840 * opcode with value bigger than OP_LAST.
5841 * At this point the IR can be handed over to an interpreter, a dumb code generator
5842 * or to the optimizing code generator that will translate it to SSA form.
5844 * Profiling directed optimizations.
5845 * We may compile by default with few or no optimizations and instrument the code
5846 * or the user may indicate what methods to optimize the most either in a config file
5847 * or through repeated runs where the compiler applies offline the optimizations to
5848 * each method and then decides if it was worth it.
5851 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
5852 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
5853 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
5854 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
5855 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
5856 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
5857 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
5858 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; LOAD_ERROR;}
5860 /* offset from br.s -> br like opcodes */
5861 #define BIG_BRANCH_OFFSET 13
5864 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
5866 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
5868 return b == NULL || b == bb;
5872 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
5874 unsigned char *ip = start;
5875 unsigned char *target;
5878 MonoBasicBlock *bblock;
5879 const MonoOpcode *opcode;
5882 cli_addr = ip - start;
5883 i = mono_opcode_value ((const guint8 **)&ip, end);
5886 opcode = &mono_opcodes [i];
5887 switch (opcode->argument) {
5888 case MonoInlineNone:
5891 case MonoInlineString:
5892 case MonoInlineType:
5893 case MonoInlineField:
5894 case MonoInlineMethod:
5897 case MonoShortInlineR:
5904 case MonoShortInlineVar:
5905 case MonoShortInlineI:
5908 case MonoShortInlineBrTarget:
5909 target = start + cli_addr + 2 + (signed char)ip [1];
5910 GET_BBLOCK (cfg, bblock, target);
5913 GET_BBLOCK (cfg, bblock, ip);
5915 case MonoInlineBrTarget:
5916 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
5917 GET_BBLOCK (cfg, bblock, target);
5920 GET_BBLOCK (cfg, bblock, ip);
5922 case MonoInlineSwitch: {
5923 guint32 n = read32 (ip + 1);
5926 cli_addr += 5 + 4 * n;
5927 target = start + cli_addr;
5928 GET_BBLOCK (cfg, bblock, target);
5930 for (j = 0; j < n; ++j) {
5931 target = start + cli_addr + (gint32)read32 (ip);
5932 GET_BBLOCK (cfg, bblock, target);
5942 g_assert_not_reached ();
5945 if (i == CEE_THROW) {
5946 unsigned char *bb_start = ip - 1;
5948 /* Find the start of the bblock containing the throw */
5950 while ((bb_start >= start) && !bblock) {
5951 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
5955 bblock->out_of_line = 1;
5965 static inline MonoMethod *
5966 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5970 if (m->wrapper_type != MONO_WRAPPER_NONE) {
5971 method = mono_method_get_wrapper_data (m, token);
5973 method = mono_class_inflate_generic_method (method, context);
5975 method = mono_get_method_full (m->klass->image, token, klass, context);
5981 static inline MonoMethod *
5982 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5984 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
5986 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
5992 static inline MonoClass*
5993 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
5997 if (method->wrapper_type != MONO_WRAPPER_NONE) {
5998 klass = mono_method_get_wrapper_data (method, token);
6000 klass = mono_class_inflate_generic_class (klass, context);
6002 klass = mono_class_get_full (method->klass->image, token, context);
6005 mono_class_init (klass);
6009 static inline MonoMethodSignature*
6010 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
6012 MonoMethodSignature *fsig;
6014 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6017 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6019 fsig = mono_inflate_generic_signature (fsig, context, &error);
6021 g_assert (mono_error_ok (&error));
6024 fsig = mono_metadata_parse_signature (method->klass->image, token);
6030 * Returns TRUE if the JIT should abort inlining because "callee"
6031 * is influenced by security attributes.
6034 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
6038 if ((cfg->method != caller) && mono_security_method_has_declsec (callee)) {
6042 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
6043 if (result == MONO_JIT_SECURITY_OK)
6046 if (result == MONO_JIT_LINKDEMAND_ECMA) {
6047 /* Generate code to throw a SecurityException before the actual call/link */
6048 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6051 NEW_ICONST (cfg, args [0], 4);
6052 NEW_METHODCONST (cfg, args [1], caller);
6053 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
6054 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
6055 /* don't hide previous results */
6056 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
6057 cfg->exception_data = result;
6065 throw_exception (void)
6067 static MonoMethod *method = NULL;
6070 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6071 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
6078 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
6080 MonoMethod *thrower = throw_exception ();
6083 EMIT_NEW_PCONST (cfg, args [0], ex);
6084 mono_emit_method_call (cfg, thrower, args, NULL);
6088 * Return the original method is a wrapper is specified. We can only access
6089 * the custom attributes from the original method.
6092 get_original_method (MonoMethod *method)
6094 if (method->wrapper_type == MONO_WRAPPER_NONE)
6097 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
6098 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
6101 /* in other cases we need to find the original method */
6102 return mono_marshal_method_from_wrapper (method);
6106 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
6107 MonoBasicBlock *bblock, unsigned char *ip)
6109 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6110 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
6112 emit_throw_exception (cfg, ex);
6116 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
6117 MonoBasicBlock *bblock, unsigned char *ip)
6119 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6120 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
6122 emit_throw_exception (cfg, ex);
6126 * Check that the IL instructions at ip are the array initialization
6127 * sequence and return the pointer to the data and the size.
6130 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
6133 * newarr[System.Int32]
6135 * ldtoken field valuetype ...
6136 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
6138 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
6139 guint32 token = read32 (ip + 7);
6140 guint32 field_token = read32 (ip + 2);
6141 guint32 field_index = field_token & 0xffffff;
6143 const char *data_ptr;
6145 MonoMethod *cmethod;
6146 MonoClass *dummy_class;
6147 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
6153 *out_field_token = field_token;
6155 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
6158 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
6160 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
6161 case MONO_TYPE_BOOLEAN:
6165 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
6166 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
6167 case MONO_TYPE_CHAR:
6177 return NULL; /* stupid ARM FP swapped format */
6187 if (size > mono_type_size (field->type, &dummy_align))
6190 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
6191 if (!method->klass->image->dynamic) {
6192 field_index = read32 (ip + 2) & 0xffffff;
6193 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
6194 data_ptr = mono_image_rva_map (method->klass->image, rva);
6195 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
6196 /* for aot code we do the lookup on load */
6197 if (aot && data_ptr)
6198 return GUINT_TO_POINTER (rva);
6200 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
6202 data_ptr = mono_field_get_data (field);
6210 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
6212 char *method_fname = mono_method_full_name (method, TRUE);
6214 MonoMethodHeader *header = mono_method_get_header (method);
6216 if (header->code_size == 0)
6217 method_code = g_strdup ("method body is empty.");
6219 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
6220 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
6221 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
6222 g_free (method_fname);
6223 g_free (method_code);
6224 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
6228 set_exception_object (MonoCompile *cfg, MonoException *exception)
6230 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
6231 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
6232 cfg->exception_ptr = exception;
6236 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
6239 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
6240 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
6241 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
6242 /* Optimize reg-reg moves away */
6244 * Can't optimize other opcodes, since sp[0] might point to
6245 * the last ins of a decomposed opcode.
6247 sp [0]->dreg = (cfg)->locals [n]->dreg;
6249 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
6254 * ldloca inhibits many optimizations so try to get rid of it in common
6257 static inline unsigned char *
6258 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
6267 local = read16 (ip + 2);
6271 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
6272 gboolean skip = FALSE;
6274 /* From the INITOBJ case */
6275 token = read32 (ip + 2);
6276 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
6277 CHECK_TYPELOAD (klass);
6278 if (mini_type_is_reference (cfg, &klass->byval_arg)) {
6279 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
6280 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
6281 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
6294 is_exception_class (MonoClass *class)
6297 if (class == mono_defaults.exception_class)
6299 class = class->parent;
6305 * is_jit_optimizer_disabled:
6307 * Determine whenever M's assembly has a DebuggableAttribute with the
6308 * IsJITOptimizerDisabled flag set.
6311 is_jit_optimizer_disabled (MonoMethod *m)
6313 MonoAssembly *ass = m->klass->image->assembly;
6314 MonoCustomAttrInfo* attrs;
6315 static MonoClass *klass;
6317 gboolean val = FALSE;
6320 if (ass->jit_optimizer_disabled_inited)
6321 return ass->jit_optimizer_disabled;
6324 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
6327 ass->jit_optimizer_disabled = FALSE;
6328 mono_memory_barrier ();
6329 ass->jit_optimizer_disabled_inited = TRUE;
6333 attrs = mono_custom_attrs_from_assembly (ass);
6335 for (i = 0; i < attrs->num_attrs; ++i) {
6336 MonoCustomAttrEntry *attr = &attrs->attrs [i];
6339 MonoMethodSignature *sig;
6341 if (!attr->ctor || attr->ctor->klass != klass)
6343 /* Decode the attribute. See reflection.c */
6344 len = attr->data_size;
6345 p = (const char*)attr->data;
6346 g_assert (read16 (p) == 0x0001);
6349 // FIXME: Support named parameters
6350 sig = mono_method_signature (attr->ctor);
6351 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
6353 /* Two boolean arguments */
6357 mono_custom_attrs_free (attrs);
6360 ass->jit_optimizer_disabled = val;
6361 mono_memory_barrier ();
6362 ass->jit_optimizer_disabled_inited = TRUE;
6368 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig)
6370 gboolean supported_tail_call;
6373 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6374 supported_tail_call = MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
6376 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6379 for (i = 0; i < fsig->param_count; ++i) {
6380 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
6381 /* These can point to the current method's stack */
6382 supported_tail_call = FALSE;
6384 if (fsig->hasthis && cmethod->klass->valuetype)
6385 /* this might point to the current method's stack */
6386 supported_tail_call = FALSE;
6387 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
6388 supported_tail_call = FALSE;
6389 if (cfg->method->save_lmf)
6390 supported_tail_call = FALSE;
6391 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
6392 supported_tail_call = FALSE;
6394 /* Debugging support */
6396 if (supported_tail_call) {
6397 if (!mono_debug_count ())
6398 supported_tail_call = FALSE;
6402 return supported_tail_call;
6405 /* the JIT intercepts ldflda instructions to the tlsdata field in ThreadLocal<T> and redirects
6406 * it to the thread local value based on the tls_offset field. Every other kind of access to
6407 * the field causes an assert.
6410 is_magic_tls_access (MonoClassField *field)
6412 if (strcmp (field->name, "tlsdata"))
6414 if (strcmp (field->parent->name, "ThreadLocal`1"))
6416 return field->parent->image == mono_defaults.corlib;
6419 /* emits the code needed to access a managed tls var (like ThreadStatic)
6420 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
6421 * pointer for the current thread.
6422 * Returns the MonoInst* representing the address of the tls var.
6425 emit_managed_static_data_access (MonoCompile *cfg, MonoInst *thread_ins, int offset_reg)
6428 int static_data_reg, array_reg, dreg;
6429 int offset2_reg, idx_reg;
6430 // inlined access to the tls data
6431 // idx = (offset >> 24) - 1;
6432 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
6433 static_data_reg = alloc_ireg (cfg);
6434 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
6435 idx_reg = alloc_ireg (cfg);
6436 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
6437 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
6438 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
6439 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
6440 array_reg = alloc_ireg (cfg);
6441 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
6442 offset2_reg = alloc_ireg (cfg);
6443 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
6444 dreg = alloc_ireg (cfg);
6445 EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
6450 * redirect access to the tlsdata field to the tls var given by the tls_offset field.
6451 * this address is cached per-method in cached_tls_addr.
6454 create_magic_tls_access (MonoCompile *cfg, MonoClassField *tls_field, MonoInst **cached_tls_addr, MonoInst *thread_local)
6456 MonoInst *load, *addr, *temp, *store, *thread_ins;
6457 MonoClassField *offset_field;
6459 if (*cached_tls_addr) {
6460 EMIT_NEW_TEMPLOAD (cfg, addr, (*cached_tls_addr)->inst_c0);
6463 thread_ins = mono_get_thread_intrinsic (cfg);
6464 offset_field = mono_class_get_field_from_name (tls_field->parent, "tls_offset");
6466 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, offset_field->type, thread_local->dreg, offset_field->offset);
6468 MONO_ADD_INS (cfg->cbb, thread_ins);
6470 MonoMethod *thread_method;
6471 thread_method = mono_class_get_method_from_name (mono_get_thread_class(), "CurrentInternalThread_internal", 0);
6472 thread_ins = mono_emit_method_call (cfg, thread_method, NULL, NULL);
6474 addr = emit_managed_static_data_access (cfg, thread_ins, load->dreg);
6475 addr->klass = mono_class_from_mono_type (tls_field->type);
6476 addr->type = STACK_MP;
6477 *cached_tls_addr = temp = mono_compile_create_var (cfg, type_from_stack_type (addr), OP_LOCAL);
6478 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, addr);
6480 EMIT_NEW_TEMPLOAD (cfg, addr, temp->inst_c0);
6485 * mono_method_to_ir:
6487 * Translate the .net IL into linear IR.
6490 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
6491 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
6492 guint inline_offset, gboolean is_virtual_call)
6495 MonoInst *ins, **sp, **stack_start;
6496 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
6497 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
6498 MonoMethod *cmethod, *method_definition;
6499 MonoInst **arg_array;
6500 MonoMethodHeader *header;
6502 guint32 token, ins_flag;
6504 MonoClass *constrained_call = NULL;
6505 unsigned char *ip, *end, *target, *err_pos;
6506 static double r8_0 = 0.0;
6507 MonoMethodSignature *sig;
6508 MonoGenericContext *generic_context = NULL;
6509 MonoGenericContainer *generic_container = NULL;
6510 MonoType **param_types;
6511 int i, n, start_new_bblock, dreg;
6512 int num_calls = 0, inline_costs = 0;
6513 int breakpoint_id = 0;
6515 MonoBoolean security, pinvoke;
6516 MonoSecurityManager* secman = NULL;
6517 MonoDeclSecurityActions actions;
6518 GSList *class_inits = NULL;
6519 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
6521 gboolean init_locals, seq_points, skip_dead_blocks;
6522 gboolean disable_inline, sym_seq_points = FALSE;
6523 MonoInst *cached_tls_addr = NULL;
6524 MonoDebugMethodInfo *minfo;
6525 MonoBitSet *seq_point_locs = NULL;
6526 MonoBitSet *seq_point_set_locs = NULL;
6528 disable_inline = is_jit_optimizer_disabled (method);
6530 /* serialization and xdomain stuff may need access to private fields and methods */
6531 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
6532 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
6533 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
6534 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
6535 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
6536 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
6538 dont_verify |= mono_security_smcs_hack_enabled ();
6540 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
6541 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
6542 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
6543 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
6544 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
6546 image = method->klass->image;
6547 header = mono_method_get_header (method);
6549 MonoLoaderError *error;
6551 if ((error = mono_loader_get_last_error ())) {
6552 mono_cfg_set_exception (cfg, error->exception_type);
6554 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
6555 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
6557 goto exception_exit;
6559 generic_container = mono_method_get_generic_container (method);
6560 sig = mono_method_signature (method);
6561 num_args = sig->hasthis + sig->param_count;
6562 ip = (unsigned char*)header->code;
6563 cfg->cil_start = ip;
6564 end = ip + header->code_size;
6565 cfg->stat_cil_code_size += header->code_size;
6566 init_locals = header->init_locals;
6568 seq_points = cfg->gen_seq_points && cfg->method == method;
6569 #ifdef PLATFORM_ANDROID
6570 seq_points &= cfg->method->wrapper_type == MONO_WRAPPER_NONE;
6573 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
6574 /* We could hit a seq point before attaching to the JIT (#8338) */
6578 if (cfg->gen_seq_points && cfg->method == method) {
6579 minfo = mono_debug_lookup_method (method);
6581 int i, n_il_offsets;
6585 mono_debug_symfile_get_line_numbers_full (minfo, NULL, NULL, &n_il_offsets, &il_offsets, &line_numbers, NULL, NULL);
6586 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6587 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6588 sym_seq_points = TRUE;
6589 for (i = 0; i < n_il_offsets; ++i) {
6590 if (il_offsets [i] < header->code_size)
6591 mono_bitset_set_fast (seq_point_locs, il_offsets [i]);
6597 * Methods without init_locals set could cause asserts in various passes
6602 method_definition = method;
6603 while (method_definition->is_inflated) {
6604 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
6605 method_definition = imethod->declaring;
6608 /* SkipVerification is not allowed if core-clr is enabled */
6609 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
6611 dont_verify_stloc = TRUE;
6614 if (mono_debug_using_mono_debugger ())
6615 cfg->keep_cil_nops = TRUE;
6617 if (sig->is_inflated)
6618 generic_context = mono_method_get_context (method);
6619 else if (generic_container)
6620 generic_context = &generic_container->context;
6621 cfg->generic_context = generic_context;
6623 if (!cfg->generic_sharing_context)
6624 g_assert (!sig->has_type_parameters);
6626 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
6627 g_assert (method->is_inflated);
6628 g_assert (mono_method_get_context (method)->method_inst);
6630 if (method->is_inflated && mono_method_get_context (method)->method_inst)
6631 g_assert (sig->generic_param_count);
6633 if (cfg->method == method) {
6634 cfg->real_offset = 0;
6636 cfg->real_offset = inline_offset;
6639 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
6640 cfg->cil_offset_to_bb_len = header->code_size;
6642 cfg->current_method = method;
6644 if (cfg->verbose_level > 2)
6645 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
6647 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
6649 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
6650 for (n = 0; n < sig->param_count; ++n)
6651 param_types [n + sig->hasthis] = sig->params [n];
6652 cfg->arg_types = param_types;
6654 dont_inline = g_list_prepend (dont_inline, method);
6655 if (cfg->method == method) {
6657 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
6658 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
6661 NEW_BBLOCK (cfg, start_bblock);
6662 cfg->bb_entry = start_bblock;
6663 start_bblock->cil_code = NULL;
6664 start_bblock->cil_length = 0;
6665 #if defined(__native_client_codegen__)
6666 MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
6667 ins->dreg = alloc_dreg (cfg, STACK_I4);
6668 MONO_ADD_INS (start_bblock, ins);
6672 NEW_BBLOCK (cfg, end_bblock);
6673 cfg->bb_exit = end_bblock;
6674 end_bblock->cil_code = NULL;
6675 end_bblock->cil_length = 0;
6676 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
6677 g_assert (cfg->num_bblocks == 2);
6679 arg_array = cfg->args;
6681 if (header->num_clauses) {
6682 cfg->spvars = g_hash_table_new (NULL, NULL);
6683 cfg->exvars = g_hash_table_new (NULL, NULL);
6685 /* handle exception clauses */
6686 for (i = 0; i < header->num_clauses; ++i) {
6687 MonoBasicBlock *try_bb;
6688 MonoExceptionClause *clause = &header->clauses [i];
6689 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
6690 try_bb->real_offset = clause->try_offset;
6691 try_bb->try_start = TRUE;
6692 try_bb->region = ((i + 1) << 8) | clause->flags;
6693 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
6694 tblock->real_offset = clause->handler_offset;
6695 tblock->flags |= BB_EXCEPTION_HANDLER;
6697 link_bblock (cfg, try_bb, tblock);
6699 if (*(ip + clause->handler_offset) == CEE_POP)
6700 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
6702 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
6703 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
6704 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
6705 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
6706 MONO_ADD_INS (tblock, ins);
6708 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) {
6709 /* finally clauses already have a seq point */
6710 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
6711 MONO_ADD_INS (tblock, ins);
6714 /* todo: is a fault block unsafe to optimize? */
6715 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
6716 tblock->flags |= BB_EXCEPTION_UNSAFE;
6720 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
6722 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
6724 /* catch and filter blocks get the exception object on the stack */
6725 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
6726 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
6727 MonoInst *dummy_use;
6729 /* mostly like handle_stack_args (), but just sets the input args */
6730 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
6731 tblock->in_scount = 1;
6732 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
6733 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
6736 * Add a dummy use for the exvar so its liveness info will be
6740 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
6742 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
6743 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
6744 tblock->flags |= BB_EXCEPTION_HANDLER;
6745 tblock->real_offset = clause->data.filter_offset;
6746 tblock->in_scount = 1;
6747 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
6748 /* The filter block shares the exvar with the handler block */
6749 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
6750 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
6751 MONO_ADD_INS (tblock, ins);
6755 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
6756 clause->data.catch_class &&
6757 cfg->generic_sharing_context &&
6758 mono_class_check_context_used (clause->data.catch_class)) {
6760 * In shared generic code with catch
6761 * clauses containing type variables
6762 * the exception handling code has to
6763 * be able to get to the rgctx.
6764 * Therefore we have to make sure that
6765 * the vtable/mrgctx argument (for
6766 * static or generic methods) or the
6767 * "this" argument (for non-static
6768 * methods) are live.
6770 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
6771 mini_method_get_context (method)->method_inst ||
6772 method->klass->valuetype) {
6773 mono_get_vtable_var (cfg);
6775 MonoInst *dummy_use;
6777 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
6782 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
6783 cfg->cbb = start_bblock;
6784 cfg->args = arg_array;
6785 mono_save_args (cfg, sig, inline_args);
6788 /* FIRST CODE BLOCK */
6789 NEW_BBLOCK (cfg, bblock);
6790 bblock->cil_code = ip;
6794 ADD_BBLOCK (cfg, bblock);
6796 if (cfg->method == method) {
6797 breakpoint_id = mono_debugger_method_has_breakpoint (method);
6798 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
6799 MONO_INST_NEW (cfg, ins, OP_BREAK);
6800 MONO_ADD_INS (bblock, ins);
6804 if (mono_security_cas_enabled ())
6805 secman = mono_security_manager_get_methods ();
6807 security = (secman && mono_security_method_has_declsec (method));
6808 /* at this point having security doesn't mean we have any code to generate */
6809 if (security && (cfg->method == method)) {
6810 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
6811 * And we do not want to enter the next section (with allocation) if we
6812 * have nothing to generate */
6813 security = mono_declsec_get_demands (method, &actions);
6816 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
6817 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
6819 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
6820 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6821 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
6823 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
6824 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
6828 mono_custom_attrs_free (custom);
6831 custom = mono_custom_attrs_from_class (wrapped->klass);
6832 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
6836 mono_custom_attrs_free (custom);
6839 /* not a P/Invoke after all */
6844 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
6845 /* we use a separate basic block for the initialization code */
6846 NEW_BBLOCK (cfg, init_localsbb);
6847 cfg->bb_init = init_localsbb;
6848 init_localsbb->real_offset = cfg->real_offset;
6849 start_bblock->next_bb = init_localsbb;
6850 init_localsbb->next_bb = bblock;
6851 link_bblock (cfg, start_bblock, init_localsbb);
6852 link_bblock (cfg, init_localsbb, bblock);
6854 cfg->cbb = init_localsbb;
6856 start_bblock->next_bb = bblock;
6857 link_bblock (cfg, start_bblock, bblock);
6860 if (cfg->gsharedvt && cfg->method == method) {
6861 MonoGSharedVtMethodInfo *info;
6862 MonoInst *var, *locals_var;
6865 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
6866 info->method = cfg->method;
6868 info->entries = g_ptr_array_new ();
6869 cfg->gsharedvt_info = info;
6871 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6872 /* prevent it from being register allocated */
6873 //var->flags |= MONO_INST_INDIRECT;
6874 cfg->gsharedvt_info_var = var;
6876 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
6877 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
6879 /* Allocate locals */
6880 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6881 /* prevent it from being register allocated */
6882 //locals_var->flags |= MONO_INST_INDIRECT;
6883 cfg->gsharedvt_locals_var = locals_var;
6885 dreg = alloc_ireg (cfg);
6886 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
6888 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
6889 ins->dreg = locals_var->dreg;
6891 MONO_ADD_INS (cfg->cbb, ins);
6892 cfg->gsharedvt_locals_var_ins = ins;
6894 cfg->flags |= MONO_CFG_HAS_ALLOCA;
6897 ins->flags |= MONO_INST_INIT;
6901 /* at this point we know, if security is TRUE, that some code needs to be generated */
6902 if (security && (cfg->method == method)) {
6905 cfg->stat_cas_demand_generation++;
6907 if (actions.demand.blob) {
6908 /* Add code for SecurityAction.Demand */
6909 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
6910 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
6911 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
6912 mono_emit_method_call (cfg, secman->demand, args, NULL);
6914 if (actions.noncasdemand.blob) {
6915 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
6916 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
6917 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
6918 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
6919 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
6920 mono_emit_method_call (cfg, secman->demand, args, NULL);
6922 if (actions.demandchoice.blob) {
6923 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
6924 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
6925 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
6926 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
6927 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
6931 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
6933 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
6936 if (mono_security_core_clr_enabled ()) {
6937 /* check if this is native code, e.g. an icall or a p/invoke */
6938 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
6939 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
6941 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
6942 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
6944 /* if this ia a native call then it can only be JITted from platform code */
6945 if ((icall || pinvk) && method->klass && method->klass->image) {
6946 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
6947 MonoException *ex = icall ? mono_get_exception_security () :
6948 mono_get_exception_method_access ();
6949 emit_throw_exception (cfg, ex);
6956 if (header->code_size == 0)
6959 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
6964 if (cfg->method == method)
6965 mono_debug_init_method (cfg, bblock, breakpoint_id);
6967 for (n = 0; n < header->num_locals; ++n) {
6968 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
6973 /* We force the vtable variable here for all shared methods
6974 for the possibility that they might show up in a stack
6975 trace where their exact instantiation is needed. */
6976 if (cfg->generic_sharing_context && method == cfg->method) {
6977 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
6978 mini_method_get_context (method)->method_inst ||
6979 method->klass->valuetype) {
6980 mono_get_vtable_var (cfg);
6982 /* FIXME: Is there a better way to do this?
6983 We need the variable live for the duration
6984 of the whole method. */
6985 cfg->args [0]->flags |= MONO_INST_INDIRECT;
6989 /* add a check for this != NULL to inlined methods */
6990 if (is_virtual_call) {
6993 NEW_ARGLOAD (cfg, arg_ins, 0);
6994 MONO_ADD_INS (cfg->cbb, arg_ins);
6995 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
6998 skip_dead_blocks = !dont_verify;
6999 if (skip_dead_blocks) {
7000 original_bb = bb = mono_basic_block_split (method, &error);
7001 if (!mono_error_ok (&error)) {
7002 mono_error_cleanup (&error);
7008 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
7009 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
7012 start_new_bblock = 0;
7015 if (cfg->method == method)
7016 cfg->real_offset = ip - header->code;
7018 cfg->real_offset = inline_offset;
7023 if (start_new_bblock) {
7024 bblock->cil_length = ip - bblock->cil_code;
7025 if (start_new_bblock == 2) {
7026 g_assert (ip == tblock->cil_code);
7028 GET_BBLOCK (cfg, tblock, ip);
7030 bblock->next_bb = tblock;
7033 start_new_bblock = 0;
7034 for (i = 0; i < bblock->in_scount; ++i) {
7035 if (cfg->verbose_level > 3)
7036 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
7037 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
7041 g_slist_free (class_inits);
7044 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
7045 link_bblock (cfg, bblock, tblock);
7046 if (sp != stack_start) {
7047 handle_stack_args (cfg, stack_start, sp - stack_start);
7049 CHECK_UNVERIFIABLE (cfg);
7051 bblock->next_bb = tblock;
7054 for (i = 0; i < bblock->in_scount; ++i) {
7055 if (cfg->verbose_level > 3)
7056 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
7057 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
7060 g_slist_free (class_inits);
7065 if (skip_dead_blocks) {
7066 int ip_offset = ip - header->code;
7068 if (ip_offset == bb->end)
7072 int op_size = mono_opcode_size (ip, end);
7073 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
7075 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
7077 if (ip_offset + op_size == bb->end) {
7078 MONO_INST_NEW (cfg, ins, OP_NOP);
7079 MONO_ADD_INS (bblock, ins);
7080 start_new_bblock = 1;
7088 * Sequence points are points where the debugger can place a breakpoint.
7089 * Currently, we generate these automatically at points where the IL
7092 if (seq_points && ((sp == stack_start) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
7094 * Make methods interruptable at the beginning, and at the targets of
7095 * backward branches.
7096 * Also, do this at the start of every bblock in methods with clauses too,
7097 * to be able to handle instructions with inprecise control flow like
7099 * Backward branches are handled at the end of method-to-ir ().
7101 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
7103 /* Avoid sequence points on empty IL like .volatile */
7104 // FIXME: Enable this
7105 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
7106 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
7107 MONO_ADD_INS (cfg->cbb, ins);
7110 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
7113 bblock->real_offset = cfg->real_offset;
7115 if ((cfg->method == method) && cfg->coverage_info) {
7116 guint32 cil_offset = ip - header->code;
7117 cfg->coverage_info->data [cil_offset].cil_code = ip;
7119 /* TODO: Use an increment here */
7120 #if defined(TARGET_X86)
7121 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
7122 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
7124 MONO_ADD_INS (cfg->cbb, ins);
7126 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
7127 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
7131 if (cfg->verbose_level > 3)
7132 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
7136 if (seq_points && !sym_seq_points && sp != stack_start) {
7138 * The C# compiler uses these nops to notify the JIT that it should
7139 * insert seq points.
7141 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
7142 MONO_ADD_INS (cfg->cbb, ins);
7144 if (cfg->keep_cil_nops)
7145 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
7147 MONO_INST_NEW (cfg, ins, OP_NOP);
7149 MONO_ADD_INS (bblock, ins);
7152 if (should_insert_brekpoint (cfg->method)) {
7153 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
7155 MONO_INST_NEW (cfg, ins, OP_NOP);
7158 MONO_ADD_INS (bblock, ins);
7164 CHECK_STACK_OVF (1);
7165 n = (*ip)-CEE_LDARG_0;
7167 EMIT_NEW_ARGLOAD (cfg, ins, n);
7175 CHECK_STACK_OVF (1);
7176 n = (*ip)-CEE_LDLOC_0;
7178 EMIT_NEW_LOCLOAD (cfg, ins, n);
7187 n = (*ip)-CEE_STLOC_0;
7190 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
7192 emit_stloc_ir (cfg, sp, header, n);
7199 CHECK_STACK_OVF (1);
7202 EMIT_NEW_ARGLOAD (cfg, ins, n);
7208 CHECK_STACK_OVF (1);
7211 NEW_ARGLOADA (cfg, ins, n);
7212 MONO_ADD_INS (cfg->cbb, ins);
7222 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
7224 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
7229 CHECK_STACK_OVF (1);
7232 EMIT_NEW_LOCLOAD (cfg, ins, n);
7236 case CEE_LDLOCA_S: {
7237 unsigned char *tmp_ip;
7239 CHECK_STACK_OVF (1);
7240 CHECK_LOCAL (ip [1]);
7242 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
7248 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
7257 CHECK_LOCAL (ip [1]);
7258 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
7260 emit_stloc_ir (cfg, sp, header, ip [1]);
7265 CHECK_STACK_OVF (1);
7266 EMIT_NEW_PCONST (cfg, ins, NULL);
7267 ins->type = STACK_OBJ;
7272 CHECK_STACK_OVF (1);
7273 EMIT_NEW_ICONST (cfg, ins, -1);
7286 CHECK_STACK_OVF (1);
7287 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
7293 CHECK_STACK_OVF (1);
7295 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
7301 CHECK_STACK_OVF (1);
7302 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
7308 CHECK_STACK_OVF (1);
7309 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7310 ins->type = STACK_I8;
7311 ins->dreg = alloc_dreg (cfg, STACK_I8);
7313 ins->inst_l = (gint64)read64 (ip);
7314 MONO_ADD_INS (bblock, ins);
7320 gboolean use_aotconst = FALSE;
7322 #ifdef TARGET_POWERPC
7323 /* FIXME: Clean this up */
7324 if (cfg->compile_aot)
7325 use_aotconst = TRUE;
7328 /* FIXME: we should really allocate this only late in the compilation process */
7329 f = mono_domain_alloc (cfg->domain, sizeof (float));
7331 CHECK_STACK_OVF (1);
7337 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
7339 dreg = alloc_freg (cfg);
7340 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
7341 ins->type = STACK_R8;
7343 MONO_INST_NEW (cfg, ins, OP_R4CONST);
7344 ins->type = STACK_R8;
7345 ins->dreg = alloc_dreg (cfg, STACK_R8);
7347 MONO_ADD_INS (bblock, ins);
7357 gboolean use_aotconst = FALSE;
7359 #ifdef TARGET_POWERPC
7360 /* FIXME: Clean this up */
7361 if (cfg->compile_aot)
7362 use_aotconst = TRUE;
7365 /* FIXME: we should really allocate this only late in the compilation process */
7366 d = mono_domain_alloc (cfg->domain, sizeof (double));
7368 CHECK_STACK_OVF (1);
7374 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
7376 dreg = alloc_freg (cfg);
7377 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
7378 ins->type = STACK_R8;
7380 MONO_INST_NEW (cfg, ins, OP_R8CONST);
7381 ins->type = STACK_R8;
7382 ins->dreg = alloc_dreg (cfg, STACK_R8);
7384 MONO_ADD_INS (bblock, ins);
7393 MonoInst *temp, *store;
7395 CHECK_STACK_OVF (1);
7399 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
7400 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
7402 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
7405 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
7418 if (sp [0]->type == STACK_R8)
7419 /* we need to pop the value from the x86 FP stack */
7420 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
7426 INLINE_FAILURE ("jmp");
7427 GSHAREDVT_FAILURE (*ip);
7430 if (stack_start != sp)
7432 token = read32 (ip + 1);
7433 /* FIXME: check the signature matches */
7434 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7436 if (!cmethod || mono_loader_get_last_error ())
7439 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
7440 GENERIC_SHARING_FAILURE (CEE_JMP);
7442 if (mono_security_cas_enabled ())
7443 CHECK_CFG_EXCEPTION;
7445 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
7447 MonoMethodSignature *fsig = mono_method_signature (cmethod);
7450 /* Handle tail calls similarly to calls */
7451 n = fsig->param_count + fsig->hasthis;
7453 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
7454 call->method = cmethod;
7455 call->tail_call = TRUE;
7456 call->signature = mono_method_signature (cmethod);
7457 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
7458 call->inst.inst_p0 = cmethod;
7459 for (i = 0; i < n; ++i)
7460 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
7462 mono_arch_emit_call (cfg, call);
7463 MONO_ADD_INS (bblock, (MonoInst*)call);
7466 for (i = 0; i < num_args; ++i)
7467 /* Prevent arguments from being optimized away */
7468 arg_array [i]->flags |= MONO_INST_VOLATILE;
7470 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
7471 ins = (MonoInst*)call;
7472 ins->inst_p0 = cmethod;
7473 MONO_ADD_INS (bblock, ins);
7477 start_new_bblock = 1;
7482 case CEE_CALLVIRT: {
7483 MonoInst *addr = NULL;
7484 MonoMethodSignature *fsig = NULL;
7486 int virtual = *ip == CEE_CALLVIRT;
7487 int calli = *ip == CEE_CALLI;
7488 gboolean pass_imt_from_rgctx = FALSE;
7489 MonoInst *imt_arg = NULL;
7490 MonoInst *keep_this_alive = NULL;
7491 gboolean pass_vtable = FALSE;
7492 gboolean pass_mrgctx = FALSE;
7493 MonoInst *vtable_arg = NULL;
7494 gboolean check_this = FALSE;
7495 gboolean supported_tail_call = FALSE;
7496 gboolean tail_call = FALSE;
7497 gboolean need_seq_point = FALSE;
7498 guint32 call_opcode = *ip;
7499 gboolean emit_widen = TRUE;
7500 gboolean push_res = TRUE;
7501 gboolean skip_ret = FALSE;
7502 gboolean delegate_invoke = FALSE;
7505 token = read32 (ip + 1);
7510 //GSHAREDVT_FAILURE (*ip);
7515 fsig = mini_get_signature (method, token, generic_context);
7516 n = fsig->param_count + fsig->hasthis;
7518 if (method->dynamic && fsig->pinvoke) {
7522 * This is a call through a function pointer using a pinvoke
7523 * signature. Have to create a wrapper and call that instead.
7524 * FIXME: This is very slow, need to create a wrapper at JIT time
7525 * instead based on the signature.
7527 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
7528 EMIT_NEW_PCONST (cfg, args [1], fsig);
7530 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
7533 MonoMethod *cil_method;
7535 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7536 cil_method = cmethod;
7538 if (constrained_call) {
7539 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7540 if (cfg->verbose_level > 2)
7541 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_call));
7542 if (!((constrained_call->byval_arg.type == MONO_TYPE_VAR ||
7543 constrained_call->byval_arg.type == MONO_TYPE_MVAR) &&
7544 cfg->generic_sharing_context)) {
7545 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_call, generic_context);
7548 if (cfg->verbose_level > 2)
7549 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_call));
7551 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
7553 * This is needed since get_method_constrained can't find
7554 * the method in klass representing a type var.
7555 * The type var is guaranteed to be a reference type in this
7558 if (!mini_is_gsharedvt_klass (cfg, constrained_call))
7559 g_assert (!cmethod->klass->valuetype);
7561 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
7566 if (!cmethod || mono_loader_get_last_error ())
7568 if (!dont_verify && !cfg->skip_visibility) {
7569 MonoMethod *target_method = cil_method;
7570 if (method->is_inflated) {
7571 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
7573 if (!mono_method_can_access_method (method_definition, target_method) &&
7574 !mono_method_can_access_method (method, cil_method))
7575 METHOD_ACCESS_FAILURE;
7578 if (mono_security_core_clr_enabled ())
7579 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
7581 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
7582 /* MS.NET seems to silently convert this to a callvirt */
7587 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
7588 * converts to a callvirt.
7590 * tests/bug-515884.il is an example of this behavior
7592 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
7593 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
7594 if (!virtual && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
7598 if (!cmethod->klass->inited)
7599 if (!mono_class_init (cmethod->klass))
7600 TYPE_LOAD_ERROR (cmethod->klass);
7602 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
7603 mini_class_is_system_array (cmethod->klass)) {
7604 array_rank = cmethod->klass->rank;
7605 fsig = mono_method_signature (cmethod);
7607 fsig = mono_method_signature (cmethod);
7612 if (fsig->pinvoke) {
7613 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
7614 check_for_pending_exc, FALSE);
7615 fsig = mono_method_signature (wrapper);
7616 } else if (constrained_call) {
7617 fsig = mono_method_signature (cmethod);
7619 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
7623 mono_save_token_info (cfg, image, token, cil_method);
7625 if (!MONO_TYPE_IS_VOID (fsig->ret) && !sym_seq_points) {
7627 * Need to emit an implicit seq point after every non-void call so single stepping through nested calls like
7628 * foo (bar (), baz ())
7629 * works correctly. MS does this also:
7630 * http://stackoverflow.com/questions/6937198/making-your-net-language-step-correctly-in-the-debugger
7631 * The problem with this approach is that the debugger will stop after all calls returning a value,
7632 * even for simple cases, like:
7635 /* Special case a few common successor opcodes */
7636 if (!(ip + 5 < end && ip [5] == CEE_POP))
7637 need_seq_point = TRUE;
7640 n = fsig->param_count + fsig->hasthis;
7642 /* Don't support calls made using type arguments for now */
7644 if (cfg->gsharedvt) {
7645 if (mini_is_gsharedvt_signature (cfg, fsig))
7646 GSHAREDVT_FAILURE (*ip);
7650 if (mono_security_cas_enabled ()) {
7651 if (check_linkdemand (cfg, method, cmethod))
7652 INLINE_FAILURE ("linkdemand");
7653 CHECK_CFG_EXCEPTION;
7656 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
7657 g_assert_not_reached ();
7660 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
7663 if (!cfg->generic_sharing_context && cmethod)
7664 g_assert (!mono_method_check_context_used (cmethod));
7668 //g_assert (!virtual || fsig->hasthis);
7672 if (constrained_call) {
7673 if (mini_is_gsharedvt_klass (cfg, constrained_call)) {
7675 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
7677 /* Special case Object methods as they are easy to implement */
7678 if (cmethod->klass == mono_defaults.object_class) {
7679 MonoInst *args [16];
7682 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
7683 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_call), constrained_call, MONO_RGCTX_INFO_KLASS);
7685 if (!strcmp (cmethod->name, "ToString")) {
7686 ins = mono_emit_jit_icall (cfg, mono_object_tostring_gsharedvt, args);
7687 } else if (!strcmp (cmethod->name, "Equals")) {
7689 ins = mono_emit_jit_icall (cfg, mono_object_equals_gsharedvt, args);
7690 } else if (!strcmp (cmethod->name, "GetHashCode")) {
7691 ins = mono_emit_jit_icall (cfg, mono_object_gethashcode_gsharedvt, args);
7693 GSHAREDVT_FAILURE (*ip);
7696 } else if (constrained_call->valuetype && cmethod->klass->valuetype) {
7697 /* The 'Own method' case below */
7698 } else if ((cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && MONO_TYPE_IS_VOID (fsig->ret) && (fsig->param_count == 0 || (fsig->param_count == 1 && MONO_TYPE_IS_REFERENCE (fsig->params [0])))) {
7699 MonoInst *args [16];
7702 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
7703 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_call), constrained_call, MONO_RGCTX_INFO_KLASS);
7705 if (fsig->param_count) {
7706 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
7707 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
7708 ins->dreg = alloc_preg (cfg);
7709 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
7710 MONO_ADD_INS (cfg->cbb, ins);
7713 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [3]->dreg, 0, sp [1]->dreg);
7715 EMIT_NEW_ICONST (cfg, args [3], 0);
7718 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
7721 GSHAREDVT_FAILURE (*ip);
7725 * We have the `constrained.' prefix opcode.
7727 if (constrained_call->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
7729 * The type parameter is instantiated as a valuetype,
7730 * but that type doesn't override the method we're
7731 * calling, so we need to box `this'.
7733 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
7734 ins->klass = constrained_call;
7735 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
7736 CHECK_CFG_EXCEPTION;
7737 } else if (!constrained_call->valuetype) {
7738 int dreg = alloc_ireg_ref (cfg);
7741 * The type parameter is instantiated as a reference
7742 * type. We have a managed pointer on the stack, so
7743 * we need to dereference it here.
7745 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
7746 ins->type = STACK_OBJ;
7749 if (cmethod->klass->valuetype) {
7752 /* Interface method */
7755 mono_class_setup_vtable (constrained_call);
7756 CHECK_TYPELOAD (constrained_call);
7757 ioffset = mono_class_interface_offset (constrained_call, cmethod->klass);
7759 TYPE_LOAD_ERROR (constrained_call);
7760 slot = mono_method_get_vtable_slot (cmethod);
7762 TYPE_LOAD_ERROR (cmethod->klass);
7763 cmethod = constrained_call->vtable [ioffset + slot];
7765 if (cmethod->klass == mono_defaults.enum_class) {
7766 /* Enum implements some interfaces, so treat this as the first case */
7767 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
7768 ins->klass = constrained_call;
7769 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
7770 CHECK_CFG_EXCEPTION;
7775 constrained_call = NULL;
7778 if (!calli && check_call_signature (cfg, fsig, sp))
7781 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
7782 if (cmethod && (cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
7783 delegate_invoke = TRUE;
7786 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
7788 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7789 type_to_eval_stack_type ((cfg), fsig->ret, ins);
7797 * If the callee is a shared method, then its static cctor
7798 * might not get called after the call was patched.
7800 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
7801 emit_generic_class_init (cfg, cmethod->klass);
7802 CHECK_TYPELOAD (cmethod->klass);
7806 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
7808 if (cfg->generic_sharing_context && cmethod) {
7809 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
7811 context_used = mini_method_check_context_used (cfg, cmethod);
7813 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7814 /* Generic method interface
7815 calls are resolved via a
7816 helper function and don't
7818 if (!cmethod_context || !cmethod_context->method_inst)
7819 pass_imt_from_rgctx = TRUE;
7823 * If a shared method calls another
7824 * shared method then the caller must
7825 * have a generic sharing context
7826 * because the magic trampoline
7827 * requires it. FIXME: We shouldn't
7828 * have to force the vtable/mrgctx
7829 * variable here. Instead there
7830 * should be a flag in the cfg to
7831 * request a generic sharing context.
7834 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
7835 mono_get_vtable_var (cfg);
7840 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7842 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7844 CHECK_TYPELOAD (cmethod->klass);
7845 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7850 g_assert (!vtable_arg);
7852 if (!cfg->compile_aot) {
7854 * emit_get_rgctx_method () calls mono_class_vtable () so check
7855 * for type load errors before.
7857 mono_class_setup_vtable (cmethod->klass);
7858 CHECK_TYPELOAD (cmethod->klass);
7861 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7863 /* !marshalbyref is needed to properly handle generic methods + remoting */
7864 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
7865 MONO_METHOD_IS_FINAL (cmethod)) &&
7866 !mono_class_is_marshalbyref (cmethod->klass)) {
7873 if (pass_imt_from_rgctx) {
7874 g_assert (!pass_vtable);
7877 imt_arg = emit_get_rgctx_method (cfg, context_used,
7878 cmethod, MONO_RGCTX_INFO_METHOD);
7882 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
7884 /* Calling virtual generic methods */
7885 if (cmethod && virtual &&
7886 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
7887 !(MONO_METHOD_IS_FINAL (cmethod) &&
7888 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
7889 fsig->generic_param_count &&
7890 !(cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))) {
7891 MonoInst *this_temp, *this_arg_temp, *store;
7892 MonoInst *iargs [4];
7893 gboolean use_imt = FALSE;
7895 g_assert (fsig->is_inflated);
7897 /* Prevent inlining of methods that contain indirect calls */
7898 INLINE_FAILURE ("virtual generic call");
7900 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
7901 GSHAREDVT_FAILURE (*ip);
7903 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
7904 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt)
7909 g_assert (!imt_arg);
7911 g_assert (cmethod->is_inflated);
7912 imt_arg = emit_get_rgctx_method (cfg, context_used,
7913 cmethod, MONO_RGCTX_INFO_METHOD);
7914 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
7916 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
7917 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
7918 MONO_ADD_INS (bblock, store);
7920 /* FIXME: This should be a managed pointer */
7921 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7923 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
7924 iargs [1] = emit_get_rgctx_method (cfg, context_used,
7925 cmethod, MONO_RGCTX_INFO_METHOD);
7926 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
7927 addr = mono_emit_jit_icall (cfg,
7928 mono_helper_compile_generic_method, iargs);
7930 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
7932 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
7939 * Implement a workaround for the inherent races involved in locking:
7945 * If a thread abort happens between the call to Monitor.Enter () and the start of the
7946 * try block, the Exit () won't be executed, see:
7947 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
7948 * To work around this, we extend such try blocks to include the last x bytes
7949 * of the Monitor.Enter () call.
7951 if (cmethod && cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
7952 MonoBasicBlock *tbb;
7954 GET_BBLOCK (cfg, tbb, ip + 5);
7956 * Only extend try blocks with a finally, to avoid catching exceptions thrown
7957 * from Monitor.Enter like ArgumentNullException.
7959 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
7960 /* Mark this bblock as needing to be extended */
7961 tbb->extend_try_block = TRUE;
7965 /* Conversion to a JIT intrinsic */
7966 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
7968 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7969 type_to_eval_stack_type ((cfg), fsig->ret, ins);
7976 if (cmethod && (cfg->opt & MONO_OPT_INLINE) &&
7977 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
7978 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
7979 !g_list_find (dont_inline, cmethod)) {
7981 gboolean always = FALSE;
7983 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
7984 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
7985 /* Prevent inlining of methods that call wrappers */
7986 INLINE_FAILURE ("wrapper call");
7987 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
7991 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, always);
7993 cfg->real_offset += 5;
7996 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7997 /* *sp is already set by inline_method */
8002 inline_costs += costs;
8008 /* Tail recursion elimination */
8009 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
8010 gboolean has_vtargs = FALSE;
8013 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8014 INLINE_FAILURE ("tail call");
8016 /* keep it simple */
8017 for (i = fsig->param_count - 1; i >= 0; i--) {
8018 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
8023 for (i = 0; i < n; ++i)
8024 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8025 MONO_INST_NEW (cfg, ins, OP_BR);
8026 MONO_ADD_INS (bblock, ins);
8027 tblock = start_bblock->out_bb [0];
8028 link_bblock (cfg, bblock, tblock);
8029 ins->inst_target_bb = tblock;
8030 start_new_bblock = 1;
8032 /* skip the CEE_RET, too */
8033 if (ip_in_bb (cfg, bblock, ip + 5))
8040 inline_costs += 10 * num_calls++;
8043 * Making generic calls out of gsharedvt methods.
8045 if (cmethod && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
8046 MonoRgctxInfoType info_type;
8049 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
8050 //GSHAREDVT_FAILURE (*ip);
8051 // disable for possible remoting calls
8052 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
8053 GSHAREDVT_FAILURE (*ip);
8054 if (fsig->generic_param_count) {
8055 /* virtual generic call */
8056 g_assert (mono_use_imt);
8057 g_assert (!imt_arg);
8058 /* Same as the virtual generic case above */
8059 imt_arg = emit_get_rgctx_method (cfg, context_used,
8060 cmethod, MONO_RGCTX_INFO_METHOD);
8061 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
8066 if (cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)
8067 /* test_0_multi_dim_arrays () in gshared.cs */
8068 GSHAREDVT_FAILURE (*ip);
8070 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
8071 keep_this_alive = sp [0];
8073 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
8074 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
8076 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
8077 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
8079 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8081 } else if (calli && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
8083 * We pass the address to the gsharedvt trampoline in the rgctx reg
8085 MonoInst *callee = addr;
8087 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8089 GSHAREDVT_FAILURE (*ip);
8091 addr = emit_get_rgctx_sig (cfg, context_used,
8092 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8093 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8097 /* Generic sharing */
8098 /* FIXME: only do this for generic methods if
8099 they are not shared! */
8100 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
8101 (!mono_method_is_generic_sharable (cmethod, TRUE) ||
8102 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
8103 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
8104 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
8105 INLINE_FAILURE ("gshared");
8107 g_assert (cfg->generic_sharing_context && cmethod);
8111 * We are compiling a call to a
8112 * generic method from shared code,
8113 * which means that we have to look up
8114 * the method in the rgctx and do an
8118 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8120 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8121 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8125 /* Indirect calls */
8127 if (call_opcode == CEE_CALL)
8128 g_assert (context_used);
8129 else if (call_opcode == CEE_CALLI)
8130 g_assert (!vtable_arg);
8132 /* FIXME: what the hell is this??? */
8133 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
8134 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
8136 /* Prevent inlining of methods with indirect calls */
8137 INLINE_FAILURE ("indirect call");
8139 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8144 * Instead of emitting an indirect call, emit a direct call
8145 * with the contents of the aotconst as the patch info.
8147 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8148 info_type = addr->inst_c1;
8149 info_data = addr->inst_p0;
8151 info_type = addr->inst_right->inst_c1;
8152 info_data = addr->inst_right->inst_left;
8155 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8156 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8161 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8169 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
8170 MonoInst *val = sp [fsig->param_count];
8172 if (val->type == STACK_OBJ) {
8173 MonoInst *iargs [2];
8178 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
8181 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
8182 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
8183 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
8184 emit_write_barrier (cfg, addr, val);
8185 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
8186 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
8188 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
8189 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
8190 if (!cmethod->klass->element_class->valuetype && !readonly)
8191 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
8192 CHECK_TYPELOAD (cmethod->klass);
8195 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
8198 g_assert_not_reached ();
8205 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
8209 /* Tail prefix / tail call optimization */
8211 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
8212 /* FIXME: runtime generic context pointer for jumps? */
8213 /* FIXME: handle this for generic sharing eventually */
8214 if (cmethod && (ins_flag & MONO_INST_TAILCALL) &&
8215 !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig))
8216 supported_tail_call = TRUE;
8217 if (supported_tail_call) {
8218 if (call_opcode != CEE_CALL)
8219 supported_tail_call = FALSE;
8222 if (supported_tail_call) {
8225 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8226 INLINE_FAILURE ("tail call");
8228 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
8230 if (ARCH_USE_OP_TAIL_CALL) {
8231 /* Handle tail calls similarly to normal calls */
8234 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8235 call->tail_call = TRUE;
8236 call->method = cmethod;
8237 call->signature = mono_method_signature (cmethod);
8240 * We implement tail calls by storing the actual arguments into the
8241 * argument variables, then emitting a CEE_JMP.
8243 for (i = 0; i < n; ++i) {
8244 /* Prevent argument from being register allocated */
8245 arg_array [i]->flags |= MONO_INST_VOLATILE;
8246 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8248 ins = (MonoInst*)call;
8249 ins->inst_p0 = cmethod;
8250 ins->inst_p1 = arg_array [0];
8251 MONO_ADD_INS (bblock, ins);
8252 link_bblock (cfg, bblock, end_bblock);
8253 start_new_bblock = 1;
8255 // FIXME: Eliminate unreachable epilogs
8258 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
8259 * only reachable from this call.
8261 GET_BBLOCK (cfg, tblock, ip + 5);
8262 if (tblock == bblock || tblock->in_count == 0)
8271 * Synchronized wrappers.
8272 * Its hard to determine where to replace a method with its synchronized
8273 * wrapper without causing an infinite recursion. The current solution is
8274 * to add the synchronized wrapper in the trampolines, and to
8275 * change the called method to a dummy wrapper, and resolve that wrapper
8276 * to the real method in mono_jit_compile_method ().
8278 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8279 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
8280 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
8281 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
8285 INLINE_FAILURE ("call");
8286 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual ? sp [0] : NULL,
8287 imt_arg, vtable_arg);
8290 link_bblock (cfg, bblock, end_bblock);
8291 start_new_bblock = 1;
8293 // FIXME: Eliminate unreachable epilogs
8296 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
8297 * only reachable from this call.
8299 GET_BBLOCK (cfg, tblock, ip + 5);
8300 if (tblock == bblock || tblock->in_count == 0)
8307 /* End of call, INS should contain the result of the call, if any */
8309 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
8312 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8317 if (keep_this_alive) {
8318 MonoInst *dummy_use;
8320 /* See mono_emit_method_call_full () */
8321 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
8324 CHECK_CFG_EXCEPTION;
8328 g_assert (*ip == CEE_RET);
8332 constrained_call = NULL;
8334 emit_seq_point (cfg, method, ip, FALSE);
8338 if (cfg->method != method) {
8339 /* return from inlined method */
8341 * If in_count == 0, that means the ret is unreachable due to
8342 * being preceeded by a throw. In that case, inline_method () will
8343 * handle setting the return value
8344 * (test case: test_0_inline_throw ()).
8346 if (return_var && cfg->cbb->in_count) {
8347 MonoType *ret_type = mono_method_signature (method)->ret;
8353 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
8356 //g_assert (returnvar != -1);
8357 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
8358 cfg->ret_var_set = TRUE;
8361 if (cfg->lmf_var && cfg->cbb->in_count)
8365 MonoType *ret_type = mono_method_signature (method)->ret;
8367 if (seq_points && !sym_seq_points) {
8369 * Place a seq point here too even through the IL stack is not
8370 * empty, so a step over on
8373 * will work correctly.
8375 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
8376 MONO_ADD_INS (cfg->cbb, ins);
8379 g_assert (!return_var);
8383 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
8386 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
8389 if (!cfg->vret_addr) {
8392 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
8394 EMIT_NEW_RETLOADA (cfg, ret_addr);
8396 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
8397 ins->klass = mono_class_from_mono_type (ret_type);
8400 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
8401 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
8402 MonoInst *iargs [1];
8406 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
8407 mono_arch_emit_setret (cfg, method, conv);
8409 mono_arch_emit_setret (cfg, method, *sp);
8412 mono_arch_emit_setret (cfg, method, *sp);
8417 if (sp != stack_start)
8419 MONO_INST_NEW (cfg, ins, OP_BR);
8421 ins->inst_target_bb = end_bblock;
8422 MONO_ADD_INS (bblock, ins);
8423 link_bblock (cfg, bblock, end_bblock);
8424 start_new_bblock = 1;
8428 MONO_INST_NEW (cfg, ins, OP_BR);
8430 target = ip + 1 + (signed char)(*ip);
8432 GET_BBLOCK (cfg, tblock, target);
8433 link_bblock (cfg, bblock, tblock);
8434 ins->inst_target_bb = tblock;
8435 if (sp != stack_start) {
8436 handle_stack_args (cfg, stack_start, sp - stack_start);
8438 CHECK_UNVERIFIABLE (cfg);
8440 MONO_ADD_INS (bblock, ins);
8441 start_new_bblock = 1;
8442 inline_costs += BRANCH_COST;
8456 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
8458 target = ip + 1 + *(signed char*)ip;
8464 inline_costs += BRANCH_COST;
8468 MONO_INST_NEW (cfg, ins, OP_BR);
8471 target = ip + 4 + (gint32)read32(ip);
8473 GET_BBLOCK (cfg, tblock, target);
8474 link_bblock (cfg, bblock, tblock);
8475 ins->inst_target_bb = tblock;
8476 if (sp != stack_start) {
8477 handle_stack_args (cfg, stack_start, sp - stack_start);
8479 CHECK_UNVERIFIABLE (cfg);
8482 MONO_ADD_INS (bblock, ins);
8484 start_new_bblock = 1;
8485 inline_costs += BRANCH_COST;
8492 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
8493 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
8494 guint32 opsize = is_short ? 1 : 4;
8496 CHECK_OPSIZE (opsize);
8498 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
8501 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
8506 GET_BBLOCK (cfg, tblock, target);
8507 link_bblock (cfg, bblock, tblock);
8508 GET_BBLOCK (cfg, tblock, ip);
8509 link_bblock (cfg, bblock, tblock);
8511 if (sp != stack_start) {
8512 handle_stack_args (cfg, stack_start, sp - stack_start);
8513 CHECK_UNVERIFIABLE (cfg);
8516 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
8517 cmp->sreg1 = sp [0]->dreg;
8518 type_from_op (cmp, sp [0], NULL);
8521 #if SIZEOF_REGISTER == 4
8522 if (cmp->opcode == OP_LCOMPARE_IMM) {
8523 /* Convert it to OP_LCOMPARE */
8524 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8525 ins->type = STACK_I8;
8526 ins->dreg = alloc_dreg (cfg, STACK_I8);
8528 MONO_ADD_INS (bblock, ins);
8529 cmp->opcode = OP_LCOMPARE;
8530 cmp->sreg2 = ins->dreg;
8533 MONO_ADD_INS (bblock, cmp);
8535 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
8536 type_from_op (ins, sp [0], NULL);
8537 MONO_ADD_INS (bblock, ins);
8538 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
8539 GET_BBLOCK (cfg, tblock, target);
8540 ins->inst_true_bb = tblock;
8541 GET_BBLOCK (cfg, tblock, ip);
8542 ins->inst_false_bb = tblock;
8543 start_new_bblock = 2;
8546 inline_costs += BRANCH_COST;
8561 MONO_INST_NEW (cfg, ins, *ip);
8563 target = ip + 4 + (gint32)read32(ip);
8569 inline_costs += BRANCH_COST;
8573 MonoBasicBlock **targets;
8574 MonoBasicBlock *default_bblock;
8575 MonoJumpInfoBBTable *table;
8576 int offset_reg = alloc_preg (cfg);
8577 int target_reg = alloc_preg (cfg);
8578 int table_reg = alloc_preg (cfg);
8579 int sum_reg = alloc_preg (cfg);
8580 gboolean use_op_switch;
8584 n = read32 (ip + 1);
8587 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
8591 CHECK_OPSIZE (n * sizeof (guint32));
8592 target = ip + n * sizeof (guint32);
8594 GET_BBLOCK (cfg, default_bblock, target);
8595 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
8597 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
8598 for (i = 0; i < n; ++i) {
8599 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
8600 targets [i] = tblock;
8601 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
8605 if (sp != stack_start) {
8607 * Link the current bb with the targets as well, so handle_stack_args
8608 * will set their in_stack correctly.
8610 link_bblock (cfg, bblock, default_bblock);
8611 for (i = 0; i < n; ++i)
8612 link_bblock (cfg, bblock, targets [i]);
8614 handle_stack_args (cfg, stack_start, sp - stack_start);
8616 CHECK_UNVERIFIABLE (cfg);
8619 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
8620 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
8623 for (i = 0; i < n; ++i)
8624 link_bblock (cfg, bblock, targets [i]);
8626 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
8627 table->table = targets;
8628 table->table_size = n;
8630 use_op_switch = FALSE;
8632 /* ARM implements SWITCH statements differently */
8633 /* FIXME: Make it use the generic implementation */
8634 if (!cfg->compile_aot)
8635 use_op_switch = TRUE;
8638 if (COMPILE_LLVM (cfg))
8639 use_op_switch = TRUE;
8641 cfg->cbb->has_jump_table = 1;
8643 if (use_op_switch) {
8644 MONO_INST_NEW (cfg, ins, OP_SWITCH);
8645 ins->sreg1 = src1->dreg;
8646 ins->inst_p0 = table;
8647 ins->inst_many_bb = targets;
8648 ins->klass = GUINT_TO_POINTER (n);
8649 MONO_ADD_INS (cfg->cbb, ins);
8651 if (sizeof (gpointer) == 8)
8652 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
8654 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
8656 #if SIZEOF_REGISTER == 8
8657 /* The upper word might not be zero, and we add it to a 64 bit address later */
8658 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
8661 if (cfg->compile_aot) {
8662 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
8664 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
8665 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
8666 ins->inst_p0 = table;
8667 ins->dreg = table_reg;
8668 MONO_ADD_INS (cfg->cbb, ins);
8671 /* FIXME: Use load_memindex */
8672 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
8673 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
8674 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
8676 start_new_bblock = 1;
8677 inline_costs += (BRANCH_COST * 2);
8697 dreg = alloc_freg (cfg);
8700 dreg = alloc_lreg (cfg);
8703 dreg = alloc_ireg_ref (cfg);
8706 dreg = alloc_preg (cfg);
8709 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
8710 ins->type = ldind_type [*ip - CEE_LDIND_I1];
8711 ins->flags |= ins_flag;
8713 MONO_ADD_INS (bblock, ins);
8715 if (ins->flags & MONO_INST_VOLATILE) {
8716 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
8717 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
8718 emit_memory_barrier (cfg, FullBarrier);
8733 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
8734 ins->flags |= ins_flag;
8737 if (ins->flags & MONO_INST_VOLATILE) {
8738 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
8739 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
8740 emit_memory_barrier (cfg, FullBarrier);
8743 MONO_ADD_INS (bblock, ins);
8745 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
8746 emit_write_barrier (cfg, sp [0], sp [1]);
8755 MONO_INST_NEW (cfg, ins, (*ip));
8757 ins->sreg1 = sp [0]->dreg;
8758 ins->sreg2 = sp [1]->dreg;
8759 type_from_op (ins, sp [0], sp [1]);
8761 ins->dreg = alloc_dreg ((cfg), (ins)->type);
8763 /* Use the immediate opcodes if possible */
8764 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
8765 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
8766 if (imm_opcode != -1) {
8767 ins->opcode = imm_opcode;
8768 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
8771 sp [1]->opcode = OP_NOP;
8775 MONO_ADD_INS ((cfg)->cbb, (ins));
8777 *sp++ = mono_decompose_opcode (cfg, ins);
8794 MONO_INST_NEW (cfg, ins, (*ip));
8796 ins->sreg1 = sp [0]->dreg;
8797 ins->sreg2 = sp [1]->dreg;
8798 type_from_op (ins, sp [0], sp [1]);
8800 ADD_WIDEN_OP (ins, sp [0], sp [1]);
8801 ins->dreg = alloc_dreg ((cfg), (ins)->type);
8803 /* FIXME: Pass opcode to is_inst_imm */
8805 /* Use the immediate opcodes if possible */
8806 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
8809 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
8810 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
8811 /* Keep emulated opcodes which are optimized away later */
8812 if ((ins->opcode == OP_IREM_UN || ins->opcode == OP_IDIV_UN_IMM) && (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) && sp [1]->opcode == OP_ICONST && mono_is_power_of_two (sp [1]->inst_c0) >= 0) {
8813 imm_opcode = mono_op_to_op_imm (ins->opcode);
8816 if (imm_opcode != -1) {
8817 ins->opcode = imm_opcode;
8818 if (sp [1]->opcode == OP_I8CONST) {
8819 #if SIZEOF_REGISTER == 8
8820 ins->inst_imm = sp [1]->inst_l;
8822 ins->inst_ls_word = sp [1]->inst_ls_word;
8823 ins->inst_ms_word = sp [1]->inst_ms_word;
8827 ins->inst_imm = (gssize)(sp [1]->inst_c0);
8830 /* Might be followed by an instruction added by ADD_WIDEN_OP */
8831 if (sp [1]->next == NULL)
8832 sp [1]->opcode = OP_NOP;
8835 MONO_ADD_INS ((cfg)->cbb, (ins));
8837 *sp++ = mono_decompose_opcode (cfg, ins);
8850 case CEE_CONV_OVF_I8:
8851 case CEE_CONV_OVF_U8:
8855 /* Special case this earlier so we have long constants in the IR */
8856 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
8857 int data = sp [-1]->inst_c0;
8858 sp [-1]->opcode = OP_I8CONST;
8859 sp [-1]->type = STACK_I8;
8860 #if SIZEOF_REGISTER == 8
8861 if ((*ip) == CEE_CONV_U8)
8862 sp [-1]->inst_c0 = (guint32)data;
8864 sp [-1]->inst_c0 = data;
8866 sp [-1]->inst_ls_word = data;
8867 if ((*ip) == CEE_CONV_U8)
8868 sp [-1]->inst_ms_word = 0;
8870 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
8872 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
8879 case CEE_CONV_OVF_I4:
8880 case CEE_CONV_OVF_I1:
8881 case CEE_CONV_OVF_I2:
8882 case CEE_CONV_OVF_I:
8883 case CEE_CONV_OVF_U:
8886 if (sp [-1]->type == STACK_R8) {
8887 ADD_UNOP (CEE_CONV_OVF_I8);
8894 case CEE_CONV_OVF_U1:
8895 case CEE_CONV_OVF_U2:
8896 case CEE_CONV_OVF_U4:
8899 if (sp [-1]->type == STACK_R8) {
8900 ADD_UNOP (CEE_CONV_OVF_U8);
8907 case CEE_CONV_OVF_I1_UN:
8908 case CEE_CONV_OVF_I2_UN:
8909 case CEE_CONV_OVF_I4_UN:
8910 case CEE_CONV_OVF_I8_UN:
8911 case CEE_CONV_OVF_U1_UN:
8912 case CEE_CONV_OVF_U2_UN:
8913 case CEE_CONV_OVF_U4_UN:
8914 case CEE_CONV_OVF_U8_UN:
8915 case CEE_CONV_OVF_I_UN:
8916 case CEE_CONV_OVF_U_UN:
8923 CHECK_CFG_EXCEPTION;
8927 case CEE_ADD_OVF_UN:
8929 case CEE_MUL_OVF_UN:
8931 case CEE_SUB_OVF_UN:
8937 GSHAREDVT_FAILURE (*ip);
8940 token = read32 (ip + 1);
8941 klass = mini_get_class (method, token, generic_context);
8942 CHECK_TYPELOAD (klass);
8944 if (generic_class_is_reference_type (cfg, klass)) {
8945 MonoInst *store, *load;
8946 int dreg = alloc_ireg_ref (cfg);
8948 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
8949 load->flags |= ins_flag;
8950 MONO_ADD_INS (cfg->cbb, load);
8952 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
8953 store->flags |= ins_flag;
8954 MONO_ADD_INS (cfg->cbb, store);
8956 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
8957 emit_write_barrier (cfg, sp [0], sp [1]);
8959 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
8971 token = read32 (ip + 1);
8972 klass = mini_get_class (method, token, generic_context);
8973 CHECK_TYPELOAD (klass);
8975 /* Optimize the common ldobj+stloc combination */
8985 loc_index = ip [5] - CEE_STLOC_0;
8992 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
8993 CHECK_LOCAL (loc_index);
8995 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
8996 ins->dreg = cfg->locals [loc_index]->dreg;
9002 /* Optimize the ldobj+stobj combination */
9003 /* The reference case ends up being a load+store anyway */
9004 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
9009 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
9016 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9025 CHECK_STACK_OVF (1);
9027 n = read32 (ip + 1);
9029 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
9030 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
9031 ins->type = STACK_OBJ;
9034 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
9035 MonoInst *iargs [1];
9037 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
9038 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
9040 if (cfg->opt & MONO_OPT_SHARED) {
9041 MonoInst *iargs [3];
9043 if (cfg->compile_aot) {
9044 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
9046 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9047 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
9048 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
9049 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
9050 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
9052 if (bblock->out_of_line) {
9053 MonoInst *iargs [2];
9055 if (image == mono_defaults.corlib) {
9057 * Avoid relocations in AOT and save some space by using a
9058 * version of helper_ldstr specialized to mscorlib.
9060 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
9061 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
9063 /* Avoid creating the string object */
9064 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9065 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
9066 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
9070 if (cfg->compile_aot) {
9071 NEW_LDSTRCONST (cfg, ins, image, n);
9073 MONO_ADD_INS (bblock, ins);
9076 NEW_PCONST (cfg, ins, NULL);
9077 ins->type = STACK_OBJ;
9078 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
9080 OUT_OF_MEMORY_FAILURE;
9083 MONO_ADD_INS (bblock, ins);
9092 MonoInst *iargs [2];
9093 MonoMethodSignature *fsig;
9096 MonoInst *vtable_arg = NULL;
9099 token = read32 (ip + 1);
9100 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
9101 if (!cmethod || mono_loader_get_last_error ())
9103 fsig = mono_method_get_signature (cmethod, image, token);
9107 mono_save_token_info (cfg, image, token, cmethod);
9109 if (!mono_class_init (cmethod->klass))
9110 TYPE_LOAD_ERROR (cmethod->klass);
9112 context_used = mini_method_check_context_used (cfg, cmethod);
9114 if (mono_security_cas_enabled ()) {
9115 if (check_linkdemand (cfg, method, cmethod))
9116 INLINE_FAILURE ("linkdemand");
9117 CHECK_CFG_EXCEPTION;
9118 } else if (mono_security_core_clr_enabled ()) {
9119 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9122 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9123 emit_generic_class_init (cfg, cmethod->klass);
9124 CHECK_TYPELOAD (cmethod->klass);
9128 if (cfg->gsharedvt) {
9129 if (mini_is_gsharedvt_variable_signature (sig))
9130 GSHAREDVT_FAILURE (*ip);
9134 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
9135 mono_method_is_generic_sharable (cmethod, TRUE)) {
9136 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
9137 mono_class_vtable (cfg->domain, cmethod->klass);
9138 CHECK_TYPELOAD (cmethod->klass);
9140 vtable_arg = emit_get_rgctx_method (cfg, context_used,
9141 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9144 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
9145 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9147 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9149 CHECK_TYPELOAD (cmethod->klass);
9150 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9155 n = fsig->param_count;
9159 * Generate smaller code for the common newobj <exception> instruction in
9160 * argument checking code.
9162 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
9163 is_exception_class (cmethod->klass) && n <= 2 &&
9164 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
9165 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
9166 MonoInst *iargs [3];
9168 g_assert (!vtable_arg);
9172 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
9175 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
9179 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
9184 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
9187 g_assert_not_reached ();
9195 /* move the args to allow room for 'this' in the first position */
9201 /* check_call_signature () requires sp[0] to be set */
9202 this_ins.type = STACK_OBJ;
9204 if (check_call_signature (cfg, fsig, sp))
9209 if (mini_class_is_system_array (cmethod->klass)) {
9210 g_assert (!vtable_arg);
9212 *sp = emit_get_rgctx_method (cfg, context_used,
9213 cmethod, MONO_RGCTX_INFO_METHOD);
9215 /* Avoid varargs in the common case */
9216 if (fsig->param_count == 1)
9217 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
9218 else if (fsig->param_count == 2)
9219 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
9220 else if (fsig->param_count == 3)
9221 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
9222 else if (fsig->param_count == 4)
9223 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
9225 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
9226 } else if (cmethod->string_ctor) {
9227 g_assert (!context_used);
9228 g_assert (!vtable_arg);
9229 /* we simply pass a null pointer */
9230 EMIT_NEW_PCONST (cfg, *sp, NULL);
9231 /* now call the string ctor */
9232 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
9234 MonoInst* callvirt_this_arg = NULL;
9236 if (cmethod->klass->valuetype) {
9237 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
9238 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
9239 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
9244 * The code generated by mini_emit_virtual_call () expects
9245 * iargs [0] to be a boxed instance, but luckily the vcall
9246 * will be transformed into a normal call there.
9248 } else if (context_used) {
9249 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
9252 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9254 CHECK_TYPELOAD (cmethod->klass);
9257 * TypeInitializationExceptions thrown from the mono_runtime_class_init
9258 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
9259 * As a workaround, we call class cctors before allocating objects.
9261 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
9262 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
9263 if (cfg->verbose_level > 2)
9264 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
9265 class_inits = g_slist_prepend (class_inits, vtable);
9268 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
9271 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
9274 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
9276 /* Now call the actual ctor */
9277 /* Avoid virtual calls to ctors if possible */
9278 if (mono_class_is_marshalbyref (cmethod->klass))
9279 callvirt_this_arg = sp [0];
9282 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
9283 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9284 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9289 CHECK_CFG_EXCEPTION;
9290 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
9291 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
9292 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
9293 !g_list_find (dont_inline, cmethod)) {
9296 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
9297 cfg->real_offset += 5;
9300 inline_costs += costs - 5;
9302 INLINE_FAILURE ("inline failure");
9303 // FIXME-VT: Clean this up
9304 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
9305 GSHAREDVT_FAILURE(*ip);
9306 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
9308 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
9311 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
9312 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
9313 } else if (context_used &&
9314 (!mono_method_is_generic_sharable (cmethod, TRUE) ||
9315 !mono_class_generic_sharing_enabled (cmethod->klass))) {
9316 MonoInst *cmethod_addr;
9318 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
9319 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9321 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
9323 INLINE_FAILURE ("ctor call");
9324 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
9325 callvirt_this_arg, NULL, vtable_arg);
9329 if (alloc == NULL) {
9331 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
9332 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
9346 token = read32 (ip + 1);
9347 klass = mini_get_class (method, token, generic_context);
9348 CHECK_TYPELOAD (klass);
9349 if (sp [0]->type != STACK_OBJ)
9352 context_used = mini_class_check_context_used (cfg, klass);
9354 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9355 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
9362 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9365 if (cfg->compile_aot)
9366 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9368 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9370 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
9371 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
9374 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9375 MonoMethod *mono_castclass;
9376 MonoInst *iargs [1];
9379 mono_castclass = mono_marshal_get_castclass (klass);
9382 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
9383 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9384 CHECK_CFG_EXCEPTION;
9385 g_assert (costs > 0);
9388 cfg->real_offset += 5;
9393 inline_costs += costs;
9396 ins = handle_castclass (cfg, klass, *sp, context_used);
9397 CHECK_CFG_EXCEPTION;
9407 token = read32 (ip + 1);
9408 klass = mini_get_class (method, token, generic_context);
9409 CHECK_TYPELOAD (klass);
9410 if (sp [0]->type != STACK_OBJ)
9413 context_used = mini_class_check_context_used (cfg, klass);
9415 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9416 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
9423 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9426 if (cfg->compile_aot)
9427 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9429 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9431 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
9434 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9435 MonoMethod *mono_isinst;
9436 MonoInst *iargs [1];
9439 mono_isinst = mono_marshal_get_isinst (klass);
9442 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
9443 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9444 CHECK_CFG_EXCEPTION;
9445 g_assert (costs > 0);
9448 cfg->real_offset += 5;
9453 inline_costs += costs;
9456 ins = handle_isinst (cfg, klass, *sp, context_used);
9457 CHECK_CFG_EXCEPTION;
9464 case CEE_UNBOX_ANY: {
9468 token = read32 (ip + 1);
9469 klass = mini_get_class (method, token, generic_context);
9470 CHECK_TYPELOAD (klass);
9472 mono_save_token_info (cfg, image, token, klass);
9474 context_used = mini_class_check_context_used (cfg, klass);
9476 if (mini_is_gsharedvt_klass (cfg, klass)) {
9477 *sp = handle_unbox_gsharedvt (cfg, context_used, klass, *sp, &bblock);
9485 if (generic_class_is_reference_type (cfg, klass)) {
9486 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
9487 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9488 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
9495 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9498 /*FIXME AOT support*/
9499 if (cfg->compile_aot)
9500 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9502 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9504 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
9505 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
9508 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9509 MonoMethod *mono_castclass;
9510 MonoInst *iargs [1];
9513 mono_castclass = mono_marshal_get_castclass (klass);
9516 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
9517 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9518 CHECK_CFG_EXCEPTION;
9519 g_assert (costs > 0);
9522 cfg->real_offset += 5;
9526 inline_costs += costs;
9528 ins = handle_castclass (cfg, klass, *sp, context_used);
9529 CHECK_CFG_EXCEPTION;
9537 if (mono_class_is_nullable (klass)) {
9538 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
9545 ins = handle_unbox (cfg, klass, sp, context_used);
9551 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9564 token = read32 (ip + 1);
9565 klass = mini_get_class (method, token, generic_context);
9566 CHECK_TYPELOAD (klass);
9568 mono_save_token_info (cfg, image, token, klass);
9570 context_used = mini_class_check_context_used (cfg, klass);
9572 if (generic_class_is_reference_type (cfg, klass)) {
9578 if (klass == mono_defaults.void_class)
9580 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
9582 /* frequent check in generic code: box (struct), brtrue */
9584 // FIXME: LLVM can't handle the inconsistent bb linking
9585 if (!mono_class_is_nullable (klass) &&
9586 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
9587 (ip [5] == CEE_BRTRUE ||
9588 ip [5] == CEE_BRTRUE_S ||
9589 ip [5] == CEE_BRFALSE ||
9590 ip [5] == CEE_BRFALSE_S)) {
9591 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
9593 MonoBasicBlock *true_bb, *false_bb;
9597 if (cfg->verbose_level > 3) {
9598 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9599 printf ("<box+brtrue opt>\n");
9607 target = ip + 1 + (signed char)(*ip);
9614 target = ip + 4 + (gint)(read32 (ip));
9618 g_assert_not_reached ();
9622 * We need to link both bblocks, since it is needed for handling stack
9623 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
9624 * Branching to only one of them would lead to inconsistencies, so
9625 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
9627 GET_BBLOCK (cfg, true_bb, target);
9628 GET_BBLOCK (cfg, false_bb, ip);
9630 mono_link_bblock (cfg, cfg->cbb, true_bb);
9631 mono_link_bblock (cfg, cfg->cbb, false_bb);
9633 if (sp != stack_start) {
9634 handle_stack_args (cfg, stack_start, sp - stack_start);
9636 CHECK_UNVERIFIABLE (cfg);
9639 if (COMPILE_LLVM (cfg)) {
9640 dreg = alloc_ireg (cfg);
9641 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
9642 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
9644 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
9646 /* The JIT can't eliminate the iconst+compare */
9647 MONO_INST_NEW (cfg, ins, OP_BR);
9648 ins->inst_target_bb = is_true ? true_bb : false_bb;
9649 MONO_ADD_INS (cfg->cbb, ins);
9652 start_new_bblock = 1;
9656 *sp++ = handle_box (cfg, val, klass, context_used, &bblock);
9658 CHECK_CFG_EXCEPTION;
9667 token = read32 (ip + 1);
9668 klass = mini_get_class (method, token, generic_context);
9669 CHECK_TYPELOAD (klass);
9671 mono_save_token_info (cfg, image, token, klass);
9673 context_used = mini_class_check_context_used (cfg, klass);
9675 if (mono_class_is_nullable (klass)) {
9678 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
9679 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
9683 ins = handle_unbox (cfg, klass, sp, context_used);
9696 MonoClassField *field;
9697 #ifndef DISABLE_REMOTING
9701 gboolean is_instance;
9703 gpointer addr = NULL;
9704 gboolean is_special_static;
9706 MonoInst *store_val = NULL;
9709 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
9711 if (op == CEE_STFLD) {
9719 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
9721 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
9724 if (op == CEE_STSFLD) {
9732 token = read32 (ip + 1);
9733 if (method->wrapper_type != MONO_WRAPPER_NONE) {
9734 field = mono_method_get_wrapper_data (method, token);
9735 klass = field->parent;
9738 field = mono_field_from_token (image, token, &klass, generic_context);
9742 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
9743 FIELD_ACCESS_FAILURE;
9744 mono_class_init (klass);
9746 if (is_instance && *ip != CEE_LDFLDA && is_magic_tls_access (field))
9749 /* if the class is Critical then transparent code cannot access it's fields */
9750 if (!is_instance && mono_security_core_clr_enabled ())
9751 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
9753 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
9754 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
9755 if (mono_security_core_clr_enabled ())
9756 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
9760 * LDFLD etc. is usable on static fields as well, so convert those cases to
9763 if (is_instance && field->type->attrs & FIELD_ATTRIBUTE_STATIC) {
9775 g_assert_not_reached ();
9777 is_instance = FALSE;
9780 context_used = mini_class_check_context_used (cfg, klass);
9784 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
9785 if (op == CEE_STFLD) {
9786 if (target_type_is_incompatible (cfg, field->type, sp [1]))
9788 #ifndef DISABLE_REMOTING
9789 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
9790 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
9791 MonoInst *iargs [5];
9793 GSHAREDVT_FAILURE (op);
9796 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9797 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
9798 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
9802 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
9803 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
9804 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9805 CHECK_CFG_EXCEPTION;
9806 g_assert (costs > 0);
9808 cfg->real_offset += 5;
9811 inline_costs += costs;
9813 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
9820 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
9822 if (mini_is_gsharedvt_klass (cfg, klass)) {
9823 MonoInst *offset_ins;
9825 context_used = mini_class_check_context_used (cfg, klass);
9827 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9828 dreg = alloc_ireg_mp (cfg);
9829 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
9830 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
9831 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
9833 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
9835 if (sp [0]->opcode != OP_LDADDR)
9836 store->flags |= MONO_INST_FAULT;
9838 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
9839 /* insert call to write barrier */
9843 dreg = alloc_ireg_mp (cfg);
9844 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
9845 emit_write_barrier (cfg, ptr, sp [1]);
9848 store->flags |= ins_flag;
9855 #ifndef DISABLE_REMOTING
9856 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
9857 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
9858 MonoInst *iargs [4];
9860 GSHAREDVT_FAILURE (op);
9863 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9864 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
9865 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
9866 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
9867 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
9868 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9869 CHECK_CFG_EXCEPTION;
9871 g_assert (costs > 0);
9873 cfg->real_offset += 5;
9877 inline_costs += costs;
9879 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
9885 if (sp [0]->type == STACK_VTYPE) {
9888 /* Have to compute the address of the variable */
9890 var = get_vreg_to_inst (cfg, sp [0]->dreg);
9892 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
9894 g_assert (var->klass == klass);
9896 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
9900 if (op == CEE_LDFLDA) {
9901 if (is_magic_tls_access (field)) {
9902 GSHAREDVT_FAILURE (*ip);
9904 *sp++ = create_magic_tls_access (cfg, field, &cached_tls_addr, ins);
9906 if (sp [0]->type == STACK_OBJ) {
9907 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
9908 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
9911 dreg = alloc_ireg_mp (cfg);
9913 if (mini_is_gsharedvt_klass (cfg, klass)) {
9914 MonoInst *offset_ins;
9916 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9917 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
9919 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
9921 ins->klass = mono_class_from_mono_type (field->type);
9922 ins->type = STACK_MP;
9928 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
9930 if (mini_is_gsharedvt_klass (cfg, klass)) {
9931 MonoInst *offset_ins;
9933 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9934 dreg = alloc_ireg_mp (cfg);
9935 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
9936 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
9938 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
9940 load->flags |= ins_flag;
9941 if (sp [0]->opcode != OP_LDADDR)
9942 load->flags |= MONO_INST_FAULT;
9956 * We can only support shared generic static
9957 * field access on architectures where the
9958 * trampoline code has been extended to handle
9959 * the generic class init.
9961 #ifndef MONO_ARCH_VTABLE_REG
9962 GENERIC_SHARING_FAILURE (op);
9965 context_used = mini_class_check_context_used (cfg, klass);
9967 ftype = mono_field_get_type (field);
9969 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
9972 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
9973 * to be called here.
9975 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
9976 mono_class_vtable (cfg->domain, klass);
9977 CHECK_TYPELOAD (klass);
9979 mono_domain_lock (cfg->domain);
9980 if (cfg->domain->special_static_fields)
9981 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
9982 mono_domain_unlock (cfg->domain);
9984 is_special_static = mono_class_field_is_special_static (field);
9986 /* Generate IR to compute the field address */
9987 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
9989 * Fast access to TLS data
9990 * Inline version of get_thread_static_data () in
9994 int idx, static_data_reg, array_reg, dreg;
9995 MonoInst *thread_ins;
9997 GSHAREDVT_FAILURE (op);
9999 // offset &= 0x7fffffff;
10000 // idx = (offset >> 24) - 1;
10001 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
10003 thread_ins = mono_get_thread_intrinsic (cfg);
10004 MONO_ADD_INS (cfg->cbb, thread_ins);
10005 static_data_reg = alloc_ireg (cfg);
10006 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
10008 if (cfg->compile_aot) {
10009 int offset_reg, offset2_reg, idx_reg;
10011 /* For TLS variables, this will return the TLS offset */
10012 EMIT_NEW_SFLDACONST (cfg, ins, field);
10013 offset_reg = ins->dreg;
10014 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
10015 idx_reg = alloc_ireg (cfg);
10016 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
10017 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
10018 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
10019 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
10020 array_reg = alloc_ireg (cfg);
10021 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
10022 offset2_reg = alloc_ireg (cfg);
10023 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
10024 dreg = alloc_ireg (cfg);
10025 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
10027 offset = (gsize)addr & 0x7fffffff;
10028 idx = (offset >> 24) - 1;
10030 array_reg = alloc_ireg (cfg);
10031 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
10032 dreg = alloc_ireg (cfg);
10033 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
10035 } else if ((cfg->opt & MONO_OPT_SHARED) ||
10036 (cfg->compile_aot && is_special_static) ||
10037 (context_used && is_special_static)) {
10038 MonoInst *iargs [2];
10040 g_assert (field->parent);
10041 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10042 if (context_used) {
10043 iargs [1] = emit_get_rgctx_field (cfg, context_used,
10044 field, MONO_RGCTX_INFO_CLASS_FIELD);
10046 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10048 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10049 } else if (context_used) {
10050 MonoInst *static_data;
10053 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
10054 method->klass->name_space, method->klass->name, method->name,
10055 depth, field->offset);
10058 if (mono_class_needs_cctor_run (klass, method))
10059 emit_generic_class_init (cfg, klass);
10062 * The pointer we're computing here is
10064 * super_info.static_data + field->offset
10066 static_data = emit_get_rgctx_klass (cfg, context_used,
10067 klass, MONO_RGCTX_INFO_STATIC_DATA);
10069 if (mini_is_gsharedvt_klass (cfg, klass)) {
10070 MonoInst *offset_ins;
10072 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10073 dreg = alloc_ireg_mp (cfg);
10074 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
10075 } else if (field->offset == 0) {
10078 int addr_reg = mono_alloc_preg (cfg);
10079 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
10081 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
10082 MonoInst *iargs [2];
10084 g_assert (field->parent);
10085 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10086 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10087 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10089 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
10091 CHECK_TYPELOAD (klass);
10093 if (mini_field_access_needs_cctor_run (cfg, method, vtable)) {
10094 if (!(g_slist_find (class_inits, vtable))) {
10095 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
10096 if (cfg->verbose_level > 2)
10097 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
10098 class_inits = g_slist_prepend (class_inits, vtable);
10101 if (cfg->run_cctors) {
10103 /* This makes so that inline cannot trigger */
10104 /* .cctors: too many apps depend on them */
10105 /* running with a specific order... */
10106 if (! vtable->initialized)
10107 INLINE_FAILURE ("class init");
10108 ex = mono_runtime_class_init_full (vtable, FALSE);
10110 set_exception_object (cfg, ex);
10111 goto exception_exit;
10115 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10117 if (cfg->compile_aot)
10118 EMIT_NEW_SFLDACONST (cfg, ins, field);
10120 EMIT_NEW_PCONST (cfg, ins, addr);
10122 MonoInst *iargs [1];
10123 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
10124 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
10128 /* Generate IR to do the actual load/store operation */
10130 if (op == CEE_LDSFLDA) {
10131 ins->klass = mono_class_from_mono_type (ftype);
10132 ins->type = STACK_PTR;
10134 } else if (op == CEE_STSFLD) {
10137 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
10138 store->flags |= ins_flag;
10140 gboolean is_const = FALSE;
10141 MonoVTable *vtable = NULL;
10142 gpointer addr = NULL;
10144 if (!context_used) {
10145 vtable = mono_class_vtable (cfg->domain, klass);
10146 CHECK_TYPELOAD (klass);
10148 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
10149 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
10150 int ro_type = ftype->type;
10152 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10153 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
10154 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
10157 GSHAREDVT_FAILURE (op);
10159 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
10162 case MONO_TYPE_BOOLEAN:
10164 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
10168 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
10171 case MONO_TYPE_CHAR:
10173 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
10177 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
10182 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
10186 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
10191 case MONO_TYPE_PTR:
10192 case MONO_TYPE_FNPTR:
10193 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10194 type_to_eval_stack_type ((cfg), field->type, *sp);
10197 case MONO_TYPE_STRING:
10198 case MONO_TYPE_OBJECT:
10199 case MONO_TYPE_CLASS:
10200 case MONO_TYPE_SZARRAY:
10201 case MONO_TYPE_ARRAY:
10202 if (!mono_gc_is_moving ()) {
10203 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10204 type_to_eval_stack_type ((cfg), field->type, *sp);
10212 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
10217 case MONO_TYPE_VALUETYPE:
10227 CHECK_STACK_OVF (1);
10229 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
10230 load->flags |= ins_flag;
10243 token = read32 (ip + 1);
10244 klass = mini_get_class (method, token, generic_context);
10245 CHECK_TYPELOAD (klass);
10246 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
10247 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
10248 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
10249 generic_class_is_reference_type (cfg, klass)) {
10250 /* insert call to write barrier */
10251 emit_write_barrier (cfg, sp [0], sp [1]);
10263 const char *data_ptr;
10265 guint32 field_token;
10271 token = read32 (ip + 1);
10273 klass = mini_get_class (method, token, generic_context);
10274 CHECK_TYPELOAD (klass);
10276 context_used = mini_class_check_context_used (cfg, klass);
10278 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
10279 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
10280 ins->sreg1 = sp [0]->dreg;
10281 ins->type = STACK_I4;
10282 ins->dreg = alloc_ireg (cfg);
10283 MONO_ADD_INS (cfg->cbb, ins);
10284 *sp = mono_decompose_opcode (cfg, ins);
10287 if (context_used) {
10288 MonoInst *args [3];
10289 MonoClass *array_class = mono_array_class_get (klass, 1);
10290 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
10292 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
10295 args [0] = emit_get_rgctx_klass (cfg, context_used,
10296 array_class, MONO_RGCTX_INFO_VTABLE);
10301 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
10303 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
10305 if (cfg->opt & MONO_OPT_SHARED) {
10306 /* Decompose now to avoid problems with references to the domainvar */
10307 MonoInst *iargs [3];
10309 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10310 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10311 iargs [2] = sp [0];
10313 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
10315 /* Decompose later since it is needed by abcrem */
10316 MonoClass *array_type = mono_array_class_get (klass, 1);
10317 mono_class_vtable (cfg->domain, array_type);
10318 CHECK_TYPELOAD (array_type);
10320 MONO_INST_NEW (cfg, ins, OP_NEWARR);
10321 ins->dreg = alloc_ireg_ref (cfg);
10322 ins->sreg1 = sp [0]->dreg;
10323 ins->inst_newa_class = klass;
10324 ins->type = STACK_OBJ;
10325 ins->klass = array_type;
10326 MONO_ADD_INS (cfg->cbb, ins);
10327 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10328 cfg->cbb->has_array_access = TRUE;
10330 /* Needed so mono_emit_load_get_addr () gets called */
10331 mono_get_got_var (cfg);
10341 * we inline/optimize the initialization sequence if possible.
10342 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
10343 * for small sizes open code the memcpy
10344 * ensure the rva field is big enough
10346 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
10347 MonoMethod *memcpy_method = get_memcpy_method ();
10348 MonoInst *iargs [3];
10349 int add_reg = alloc_ireg_mp (cfg);
10351 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
10352 if (cfg->compile_aot) {
10353 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
10355 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
10357 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
10358 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
10367 if (sp [0]->type != STACK_OBJ)
10370 MONO_INST_NEW (cfg, ins, OP_LDLEN);
10371 ins->dreg = alloc_preg (cfg);
10372 ins->sreg1 = sp [0]->dreg;
10373 ins->type = STACK_I4;
10374 /* This flag will be inherited by the decomposition */
10375 ins->flags |= MONO_INST_FAULT;
10376 MONO_ADD_INS (cfg->cbb, ins);
10377 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10378 cfg->cbb->has_array_access = TRUE;
10386 if (sp [0]->type != STACK_OBJ)
10389 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10391 klass = mini_get_class (method, read32 (ip + 1), generic_context);
10392 CHECK_TYPELOAD (klass);
10393 /* we need to make sure that this array is exactly the type it needs
10394 * to be for correctness. the wrappers are lax with their usage
10395 * so we need to ignore them here
10397 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
10398 MonoClass *array_class = mono_array_class_get (klass, 1);
10399 mini_emit_check_array_type (cfg, sp [0], array_class);
10400 CHECK_TYPELOAD (array_class);
10404 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10409 case CEE_LDELEM_I1:
10410 case CEE_LDELEM_U1:
10411 case CEE_LDELEM_I2:
10412 case CEE_LDELEM_U2:
10413 case CEE_LDELEM_I4:
10414 case CEE_LDELEM_U4:
10415 case CEE_LDELEM_I8:
10417 case CEE_LDELEM_R4:
10418 case CEE_LDELEM_R8:
10419 case CEE_LDELEM_REF: {
10425 if (*ip == CEE_LDELEM) {
10427 token = read32 (ip + 1);
10428 klass = mini_get_class (method, token, generic_context);
10429 CHECK_TYPELOAD (klass);
10430 mono_class_init (klass);
10433 klass = array_access_to_klass (*ip);
10435 if (sp [0]->type != STACK_OBJ)
10438 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10440 if (mini_is_gsharedvt_klass (cfg, klass)) {
10441 // FIXME-VT: OP_ICONST optimization
10442 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10443 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10444 ins->opcode = OP_LOADV_MEMBASE;
10445 } else if (sp [1]->opcode == OP_ICONST) {
10446 int array_reg = sp [0]->dreg;
10447 int index_reg = sp [1]->dreg;
10448 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
10450 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
10451 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
10453 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10454 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10457 if (*ip == CEE_LDELEM)
10464 case CEE_STELEM_I1:
10465 case CEE_STELEM_I2:
10466 case CEE_STELEM_I4:
10467 case CEE_STELEM_I8:
10468 case CEE_STELEM_R4:
10469 case CEE_STELEM_R8:
10470 case CEE_STELEM_REF:
10475 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10477 if (*ip == CEE_STELEM) {
10479 token = read32 (ip + 1);
10480 klass = mini_get_class (method, token, generic_context);
10481 CHECK_TYPELOAD (klass);
10482 mono_class_init (klass);
10485 klass = array_access_to_klass (*ip);
10487 if (sp [0]->type != STACK_OBJ)
10490 emit_array_store (cfg, klass, sp, TRUE);
10492 if (*ip == CEE_STELEM)
10499 case CEE_CKFINITE: {
10503 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
10504 ins->sreg1 = sp [0]->dreg;
10505 ins->dreg = alloc_freg (cfg);
10506 ins->type = STACK_R8;
10507 MONO_ADD_INS (bblock, ins);
10509 *sp++ = mono_decompose_opcode (cfg, ins);
10514 case CEE_REFANYVAL: {
10515 MonoInst *src_var, *src;
10517 int klass_reg = alloc_preg (cfg);
10518 int dreg = alloc_preg (cfg);
10520 GSHAREDVT_FAILURE (*ip);
10523 MONO_INST_NEW (cfg, ins, *ip);
10526 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
10527 CHECK_TYPELOAD (klass);
10528 mono_class_init (klass);
10530 context_used = mini_class_check_context_used (cfg, klass);
10533 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10535 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
10536 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
10537 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
10539 if (context_used) {
10540 MonoInst *klass_ins;
10542 klass_ins = emit_get_rgctx_klass (cfg, context_used,
10543 klass, MONO_RGCTX_INFO_KLASS);
10546 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
10547 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
10549 mini_emit_class_check (cfg, klass_reg, klass);
10551 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
10552 ins->type = STACK_MP;
10557 case CEE_MKREFANY: {
10558 MonoInst *loc, *addr;
10560 GSHAREDVT_FAILURE (*ip);
10563 MONO_INST_NEW (cfg, ins, *ip);
10566 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
10567 CHECK_TYPELOAD (klass);
10568 mono_class_init (klass);
10570 context_used = mini_class_check_context_used (cfg, klass);
10572 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
10573 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
10575 if (context_used) {
10576 MonoInst *const_ins;
10577 int type_reg = alloc_preg (cfg);
10579 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
10580 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
10581 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
10582 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
10583 } else if (cfg->compile_aot) {
10584 int const_reg = alloc_preg (cfg);
10585 int type_reg = alloc_preg (cfg);
10587 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
10588 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
10589 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
10590 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
10592 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
10593 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
10595 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
10597 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
10598 ins->type = STACK_VTYPE;
10599 ins->klass = mono_defaults.typed_reference_class;
10604 case CEE_LDTOKEN: {
10606 MonoClass *handle_class;
10608 CHECK_STACK_OVF (1);
10611 n = read32 (ip + 1);
10613 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
10614 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
10615 handle = mono_method_get_wrapper_data (method, n);
10616 handle_class = mono_method_get_wrapper_data (method, n + 1);
10617 if (handle_class == mono_defaults.typehandle_class)
10618 handle = &((MonoClass*)handle)->byval_arg;
10621 handle = mono_ldtoken (image, n, &handle_class, generic_context);
10625 mono_class_init (handle_class);
10626 if (cfg->generic_sharing_context) {
10627 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
10628 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
10629 /* This case handles ldtoken
10630 of an open type, like for
10633 } else if (handle_class == mono_defaults.typehandle_class) {
10634 /* If we get a MONO_TYPE_CLASS
10635 then we need to provide the
10637 instantiation of it. */
10638 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
10641 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type (handle));
10642 } else if (handle_class == mono_defaults.fieldhandle_class)
10643 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
10644 else if (handle_class == mono_defaults.methodhandle_class)
10645 context_used = mini_method_check_context_used (cfg, handle);
10647 g_assert_not_reached ();
10650 if ((cfg->opt & MONO_OPT_SHARED) &&
10651 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
10652 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
10653 MonoInst *addr, *vtvar, *iargs [3];
10654 int method_context_used;
10656 method_context_used = mini_method_check_context_used (cfg, method);
10658 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
10660 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10661 EMIT_NEW_ICONST (cfg, iargs [1], n);
10662 if (method_context_used) {
10663 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
10664 method, MONO_RGCTX_INFO_METHOD);
10665 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
10667 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
10668 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
10670 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
10672 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
10674 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
10676 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
10677 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
10678 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
10679 (cmethod->klass == mono_defaults.monotype_class->parent) &&
10680 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
10681 MonoClass *tclass = mono_class_from_mono_type (handle);
10683 mono_class_init (tclass);
10684 if (context_used) {
10685 ins = emit_get_rgctx_klass (cfg, context_used,
10686 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
10687 } else if (cfg->compile_aot) {
10688 if (method->wrapper_type) {
10689 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
10690 /* Special case for static synchronized wrappers */
10691 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
10693 /* FIXME: n is not a normal token */
10694 cfg->disable_aot = TRUE;
10695 EMIT_NEW_PCONST (cfg, ins, NULL);
10698 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
10701 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
10703 ins->type = STACK_OBJ;
10704 ins->klass = cmethod->klass;
10707 MonoInst *addr, *vtvar;
10709 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
10711 if (context_used) {
10712 if (handle_class == mono_defaults.typehandle_class) {
10713 ins = emit_get_rgctx_klass (cfg, context_used,
10714 mono_class_from_mono_type (handle),
10715 MONO_RGCTX_INFO_TYPE);
10716 } else if (handle_class == mono_defaults.methodhandle_class) {
10717 ins = emit_get_rgctx_method (cfg, context_used,
10718 handle, MONO_RGCTX_INFO_METHOD);
10719 } else if (handle_class == mono_defaults.fieldhandle_class) {
10720 ins = emit_get_rgctx_field (cfg, context_used,
10721 handle, MONO_RGCTX_INFO_CLASS_FIELD);
10723 g_assert_not_reached ();
10725 } else if (cfg->compile_aot) {
10726 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
10728 EMIT_NEW_PCONST (cfg, ins, handle);
10730 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
10731 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
10732 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
10742 MONO_INST_NEW (cfg, ins, OP_THROW);
10744 ins->sreg1 = sp [0]->dreg;
10746 bblock->out_of_line = TRUE;
10747 MONO_ADD_INS (bblock, ins);
10748 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
10749 MONO_ADD_INS (bblock, ins);
10752 link_bblock (cfg, bblock, end_bblock);
10753 start_new_bblock = 1;
10755 case CEE_ENDFINALLY:
10756 /* mono_save_seq_point_info () depends on this */
10757 if (sp != stack_start)
10758 emit_seq_point (cfg, method, ip, FALSE);
10759 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
10760 MONO_ADD_INS (bblock, ins);
10762 start_new_bblock = 1;
10765 * Control will leave the method so empty the stack, otherwise
10766 * the next basic block will start with a nonempty stack.
10768 while (sp != stack_start) {
10773 case CEE_LEAVE_S: {
10776 if (*ip == CEE_LEAVE) {
10778 target = ip + 5 + (gint32)read32(ip + 1);
10781 target = ip + 2 + (signed char)(ip [1]);
10784 /* empty the stack */
10785 while (sp != stack_start) {
10790 * If this leave statement is in a catch block, check for a
10791 * pending exception, and rethrow it if necessary.
10792 * We avoid doing this in runtime invoke wrappers, since those are called
10793 * by native code which excepts the wrapper to catch all exceptions.
10795 for (i = 0; i < header->num_clauses; ++i) {
10796 MonoExceptionClause *clause = &header->clauses [i];
10799 * Use <= in the final comparison to handle clauses with multiple
10800 * leave statements, like in bug #78024.
10801 * The ordering of the exception clauses guarantees that we find the
10802 * innermost clause.
10804 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
10806 MonoBasicBlock *dont_throw;
10811 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
10814 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
10816 NEW_BBLOCK (cfg, dont_throw);
10819 * Currently, we always rethrow the abort exception, despite the
10820 * fact that this is not correct. See thread6.cs for an example.
10821 * But propagating the abort exception is more important than
10822 * getting the sematics right.
10824 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
10825 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
10826 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
10828 MONO_START_BB (cfg, dont_throw);
10833 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
10835 MonoExceptionClause *clause;
10837 for (tmp = handlers; tmp; tmp = tmp->next) {
10838 clause = tmp->data;
10839 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
10841 link_bblock (cfg, bblock, tblock);
10842 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
10843 ins->inst_target_bb = tblock;
10844 ins->inst_eh_block = clause;
10845 MONO_ADD_INS (bblock, ins);
10846 bblock->has_call_handler = 1;
10847 if (COMPILE_LLVM (cfg)) {
10848 MonoBasicBlock *target_bb;
10851 * Link the finally bblock with the target, since it will
10852 * conceptually branch there.
10853 * FIXME: Have to link the bblock containing the endfinally.
10855 GET_BBLOCK (cfg, target_bb, target);
10856 link_bblock (cfg, tblock, target_bb);
10859 g_list_free (handlers);
10862 MONO_INST_NEW (cfg, ins, OP_BR);
10863 MONO_ADD_INS (bblock, ins);
10864 GET_BBLOCK (cfg, tblock, target);
10865 link_bblock (cfg, bblock, tblock);
10866 ins->inst_target_bb = tblock;
10867 start_new_bblock = 1;
10869 if (*ip == CEE_LEAVE)
10878 * Mono specific opcodes
10880 case MONO_CUSTOM_PREFIX: {
10882 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10886 case CEE_MONO_ICALL: {
10888 MonoJitICallInfo *info;
10890 token = read32 (ip + 2);
10891 func = mono_method_get_wrapper_data (method, token);
10892 info = mono_find_jit_icall_by_addr (func);
10894 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
10897 CHECK_STACK (info->sig->param_count);
10898 sp -= info->sig->param_count;
10900 ins = mono_emit_jit_icall (cfg, info->func, sp);
10901 if (!MONO_TYPE_IS_VOID (info->sig->ret))
10905 inline_costs += 10 * num_calls++;
10909 case CEE_MONO_LDPTR: {
10912 CHECK_STACK_OVF (1);
10914 token = read32 (ip + 2);
10916 ptr = mono_method_get_wrapper_data (method, token);
10917 /* FIXME: Generalize this */
10918 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
10919 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
10924 EMIT_NEW_PCONST (cfg, ins, ptr);
10927 inline_costs += 10 * num_calls++;
10928 /* Can't embed random pointers into AOT code */
10929 cfg->disable_aot = 1;
10932 case CEE_MONO_JIT_ICALL_ADDR: {
10933 MonoJitICallInfo *callinfo;
10936 CHECK_STACK_OVF (1);
10938 token = read32 (ip + 2);
10940 ptr = mono_method_get_wrapper_data (method, token);
10941 callinfo = mono_find_jit_icall_by_addr (ptr);
10942 g_assert (callinfo);
10943 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
10946 inline_costs += 10 * num_calls++;
10949 case CEE_MONO_ICALL_ADDR: {
10950 MonoMethod *cmethod;
10953 CHECK_STACK_OVF (1);
10955 token = read32 (ip + 2);
10957 cmethod = mono_method_get_wrapper_data (method, token);
10959 if (cfg->compile_aot) {
10960 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
10962 ptr = mono_lookup_internal_call (cmethod);
10964 EMIT_NEW_PCONST (cfg, ins, ptr);
10970 case CEE_MONO_VTADDR: {
10971 MonoInst *src_var, *src;
10977 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10978 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
10983 case CEE_MONO_NEWOBJ: {
10984 MonoInst *iargs [2];
10986 CHECK_STACK_OVF (1);
10988 token = read32 (ip + 2);
10989 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
10990 mono_class_init (klass);
10991 NEW_DOMAINCONST (cfg, iargs [0]);
10992 MONO_ADD_INS (cfg->cbb, iargs [0]);
10993 NEW_CLASSCONST (cfg, iargs [1], klass);
10994 MONO_ADD_INS (cfg->cbb, iargs [1]);
10995 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
10997 inline_costs += 10 * num_calls++;
11000 case CEE_MONO_OBJADDR:
11003 MONO_INST_NEW (cfg, ins, OP_MOVE);
11004 ins->dreg = alloc_ireg_mp (cfg);
11005 ins->sreg1 = sp [0]->dreg;
11006 ins->type = STACK_MP;
11007 MONO_ADD_INS (cfg->cbb, ins);
11011 case CEE_MONO_LDNATIVEOBJ:
11013 * Similar to LDOBJ, but instead load the unmanaged
11014 * representation of the vtype to the stack.
11019 token = read32 (ip + 2);
11020 klass = mono_method_get_wrapper_data (method, token);
11021 g_assert (klass->valuetype);
11022 mono_class_init (klass);
11025 MonoInst *src, *dest, *temp;
11028 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
11029 temp->backend.is_pinvoke = 1;
11030 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
11031 mini_emit_stobj (cfg, dest, src, klass, TRUE);
11033 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
11034 dest->type = STACK_VTYPE;
11035 dest->klass = klass;
11041 case CEE_MONO_RETOBJ: {
11043 * Same as RET, but return the native representation of a vtype
11046 g_assert (cfg->ret);
11047 g_assert (mono_method_signature (method)->pinvoke);
11052 token = read32 (ip + 2);
11053 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11055 if (!cfg->vret_addr) {
11056 g_assert (cfg->ret_var_is_local);
11058 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
11060 EMIT_NEW_RETLOADA (cfg, ins);
11062 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
11064 if (sp != stack_start)
11067 MONO_INST_NEW (cfg, ins, OP_BR);
11068 ins->inst_target_bb = end_bblock;
11069 MONO_ADD_INS (bblock, ins);
11070 link_bblock (cfg, bblock, end_bblock);
11071 start_new_bblock = 1;
11075 case CEE_MONO_CISINST:
11076 case CEE_MONO_CCASTCLASS: {
11081 token = read32 (ip + 2);
11082 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11083 if (ip [1] == CEE_MONO_CISINST)
11084 ins = handle_cisinst (cfg, klass, sp [0]);
11086 ins = handle_ccastclass (cfg, klass, sp [0]);
11092 case CEE_MONO_SAVE_LMF:
11093 case CEE_MONO_RESTORE_LMF:
11094 #ifdef MONO_ARCH_HAVE_LMF_OPS
11095 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
11096 MONO_ADD_INS (bblock, ins);
11097 cfg->need_lmf_area = TRUE;
11101 case CEE_MONO_CLASSCONST:
11102 CHECK_STACK_OVF (1);
11104 token = read32 (ip + 2);
11105 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
11108 inline_costs += 10 * num_calls++;
11110 case CEE_MONO_NOT_TAKEN:
11111 bblock->out_of_line = TRUE;
11115 CHECK_STACK_OVF (1);
11117 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
11118 ins->dreg = alloc_preg (cfg);
11119 ins->inst_offset = (gint32)read32 (ip + 2);
11120 ins->type = STACK_PTR;
11121 MONO_ADD_INS (bblock, ins);
11125 case CEE_MONO_DYN_CALL: {
11126 MonoCallInst *call;
11128 /* It would be easier to call a trampoline, but that would put an
11129 * extra frame on the stack, confusing exception handling. So
11130 * implement it inline using an opcode for now.
11133 if (!cfg->dyn_call_var) {
11134 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11135 /* prevent it from being register allocated */
11136 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
11139 /* Has to use a call inst since it local regalloc expects it */
11140 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
11141 ins = (MonoInst*)call;
11143 ins->sreg1 = sp [0]->dreg;
11144 ins->sreg2 = sp [1]->dreg;
11145 MONO_ADD_INS (bblock, ins);
11147 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
11148 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
11152 inline_costs += 10 * num_calls++;
11156 case CEE_MONO_MEMORY_BARRIER: {
11158 emit_memory_barrier (cfg, (int)read32 (ip + 1));
11162 case CEE_MONO_JIT_ATTACH: {
11163 MonoInst *args [16];
11164 MonoInst *ad_ins, *lmf_ins;
11165 MonoBasicBlock *next_bb = NULL;
11167 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11169 EMIT_NEW_PCONST (cfg, ins, NULL);
11170 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11176 ad_ins = mono_get_domain_intrinsic (cfg);
11177 lmf_ins = mono_get_lmf_intrinsic (cfg);
11180 if (MONO_ARCH_HAVE_TLS_GET && ad_ins && lmf_ins) {
11181 NEW_BBLOCK (cfg, next_bb);
11183 MONO_ADD_INS (cfg->cbb, ad_ins);
11184 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ad_ins->dreg, 0);
11185 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
11187 MONO_ADD_INS (cfg->cbb, lmf_ins);
11188 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, lmf_ins->dreg, 0);
11189 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
11192 if (cfg->compile_aot) {
11193 /* AOT code is only used in the root domain */
11194 EMIT_NEW_PCONST (cfg, args [0], NULL);
11196 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
11198 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
11199 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11202 MONO_START_BB (cfg, next_bb);
11208 case CEE_MONO_JIT_DETACH: {
11209 MonoInst *args [16];
11211 /* Restore the original domain */
11212 dreg = alloc_ireg (cfg);
11213 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
11214 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
11219 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
11225 case CEE_PREFIX1: {
11228 case CEE_ARGLIST: {
11229 /* somewhat similar to LDTOKEN */
11230 MonoInst *addr, *vtvar;
11231 CHECK_STACK_OVF (1);
11232 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
11234 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11235 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
11237 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11238 ins->type = STACK_VTYPE;
11239 ins->klass = mono_defaults.argumenthandle_class;
11252 * The following transforms:
11253 * CEE_CEQ into OP_CEQ
11254 * CEE_CGT into OP_CGT
11255 * CEE_CGT_UN into OP_CGT_UN
11256 * CEE_CLT into OP_CLT
11257 * CEE_CLT_UN into OP_CLT_UN
11259 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
11261 MONO_INST_NEW (cfg, ins, cmp->opcode);
11263 cmp->sreg1 = sp [0]->dreg;
11264 cmp->sreg2 = sp [1]->dreg;
11265 type_from_op (cmp, sp [0], sp [1]);
11267 if ((sp [0]->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
11268 cmp->opcode = OP_LCOMPARE;
11269 else if (sp [0]->type == STACK_R8)
11270 cmp->opcode = OP_FCOMPARE;
11272 cmp->opcode = OP_ICOMPARE;
11273 MONO_ADD_INS (bblock, cmp);
11274 ins->type = STACK_I4;
11275 ins->dreg = alloc_dreg (cfg, ins->type);
11276 type_from_op (ins, sp [0], sp [1]);
11278 if (cmp->opcode == OP_FCOMPARE) {
11280 * The backends expect the fceq opcodes to do the
11283 cmp->opcode = OP_NOP;
11284 ins->sreg1 = cmp->sreg1;
11285 ins->sreg2 = cmp->sreg2;
11287 MONO_ADD_INS (bblock, ins);
11293 MonoInst *argconst;
11294 MonoMethod *cil_method;
11296 CHECK_STACK_OVF (1);
11298 n = read32 (ip + 2);
11299 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
11300 if (!cmethod || mono_loader_get_last_error ())
11302 mono_class_init (cmethod->klass);
11304 mono_save_token_info (cfg, image, n, cmethod);
11306 context_used = mini_method_check_context_used (cfg, cmethod);
11308 cil_method = cmethod;
11309 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
11310 METHOD_ACCESS_FAILURE;
11312 if (mono_security_cas_enabled ()) {
11313 if (check_linkdemand (cfg, method, cmethod))
11314 INLINE_FAILURE ("linkdemand");
11315 CHECK_CFG_EXCEPTION;
11316 } else if (mono_security_core_clr_enabled ()) {
11317 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
11321 * Optimize the common case of ldftn+delegate creation
11323 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
11324 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
11325 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
11326 MonoInst *target_ins;
11327 MonoMethod *invoke;
11328 int invoke_context_used;
11330 invoke = mono_get_delegate_invoke (ctor_method->klass);
11331 if (!invoke || !mono_method_signature (invoke))
11334 invoke_context_used = mini_method_check_context_used (cfg, invoke);
11336 target_ins = sp [-1];
11338 if (mono_security_core_clr_enabled ())
11339 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
11341 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
11342 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
11343 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
11344 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
11345 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
11349 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
11350 /* FIXME: SGEN support */
11351 if (invoke_context_used == 0) {
11353 if (cfg->verbose_level > 3)
11354 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
11356 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
11357 CHECK_CFG_EXCEPTION;
11366 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
11367 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
11371 inline_costs += 10 * num_calls++;
11374 case CEE_LDVIRTFTN: {
11375 MonoInst *args [2];
11379 n = read32 (ip + 2);
11380 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
11381 if (!cmethod || mono_loader_get_last_error ())
11383 mono_class_init (cmethod->klass);
11385 context_used = mini_method_check_context_used (cfg, cmethod);
11387 if (mono_security_cas_enabled ()) {
11388 if (check_linkdemand (cfg, method, cmethod))
11389 INLINE_FAILURE ("linkdemand");
11390 CHECK_CFG_EXCEPTION;
11391 } else if (mono_security_core_clr_enabled ()) {
11392 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
11398 args [1] = emit_get_rgctx_method (cfg, context_used,
11399 cmethod, MONO_RGCTX_INFO_METHOD);
11402 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
11404 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
11407 inline_costs += 10 * num_calls++;
11411 CHECK_STACK_OVF (1);
11413 n = read16 (ip + 2);
11415 EMIT_NEW_ARGLOAD (cfg, ins, n);
11420 CHECK_STACK_OVF (1);
11422 n = read16 (ip + 2);
11424 NEW_ARGLOADA (cfg, ins, n);
11425 MONO_ADD_INS (cfg->cbb, ins);
11433 n = read16 (ip + 2);
11435 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
11437 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
11441 CHECK_STACK_OVF (1);
11443 n = read16 (ip + 2);
11445 EMIT_NEW_LOCLOAD (cfg, ins, n);
11450 unsigned char *tmp_ip;
11451 CHECK_STACK_OVF (1);
11453 n = read16 (ip + 2);
11456 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
11462 EMIT_NEW_LOCLOADA (cfg, ins, n);
11471 n = read16 (ip + 2);
11473 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
11475 emit_stloc_ir (cfg, sp, header, n);
11482 if (sp != stack_start)
11484 if (cfg->method != method)
11486 * Inlining this into a loop in a parent could lead to
11487 * stack overflows which is different behavior than the
11488 * non-inlined case, thus disable inlining in this case.
11490 goto inline_failure;
11492 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
11493 ins->dreg = alloc_preg (cfg);
11494 ins->sreg1 = sp [0]->dreg;
11495 ins->type = STACK_PTR;
11496 MONO_ADD_INS (cfg->cbb, ins);
11498 cfg->flags |= MONO_CFG_HAS_ALLOCA;
11500 ins->flags |= MONO_INST_INIT;
11505 case CEE_ENDFILTER: {
11506 MonoExceptionClause *clause, *nearest;
11507 int cc, nearest_num;
11511 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
11513 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
11514 ins->sreg1 = (*sp)->dreg;
11515 MONO_ADD_INS (bblock, ins);
11516 start_new_bblock = 1;
11521 for (cc = 0; cc < header->num_clauses; ++cc) {
11522 clause = &header->clauses [cc];
11523 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
11524 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
11525 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
11530 g_assert (nearest);
11531 if ((ip - header->code) != nearest->handler_offset)
11536 case CEE_UNALIGNED_:
11537 ins_flag |= MONO_INST_UNALIGNED;
11538 /* FIXME: record alignment? we can assume 1 for now */
11542 case CEE_VOLATILE_:
11543 ins_flag |= MONO_INST_VOLATILE;
11547 ins_flag |= MONO_INST_TAILCALL;
11548 cfg->flags |= MONO_CFG_HAS_TAIL;
11549 /* Can't inline tail calls at this time */
11550 inline_costs += 100000;
11557 token = read32 (ip + 2);
11558 klass = mini_get_class (method, token, generic_context);
11559 CHECK_TYPELOAD (klass);
11560 if (generic_class_is_reference_type (cfg, klass))
11561 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
11563 mini_emit_initobj (cfg, *sp, NULL, klass);
11567 case CEE_CONSTRAINED_:
11569 token = read32 (ip + 2);
11570 constrained_call = mini_get_class (method, token, generic_context);
11571 CHECK_TYPELOAD (constrained_call);
11575 case CEE_INITBLK: {
11576 MonoInst *iargs [3];
11580 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
11581 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
11582 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
11583 /* emit_memset only works when val == 0 */
11584 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
11586 iargs [0] = sp [0];
11587 iargs [1] = sp [1];
11588 iargs [2] = sp [2];
11589 if (ip [1] == CEE_CPBLK) {
11590 MonoMethod *memcpy_method = get_memcpy_method ();
11591 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11593 MonoMethod *memset_method = get_memset_method ();
11594 mono_emit_method_call (cfg, memset_method, iargs, NULL);
11604 ins_flag |= MONO_INST_NOTYPECHECK;
11606 ins_flag |= MONO_INST_NORANGECHECK;
11607 /* we ignore the no-nullcheck for now since we
11608 * really do it explicitly only when doing callvirt->call
11612 case CEE_RETHROW: {
11614 int handler_offset = -1;
11616 for (i = 0; i < header->num_clauses; ++i) {
11617 MonoExceptionClause *clause = &header->clauses [i];
11618 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
11619 handler_offset = clause->handler_offset;
11624 bblock->flags |= BB_EXCEPTION_UNSAFE;
11626 g_assert (handler_offset != -1);
11628 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
11629 MONO_INST_NEW (cfg, ins, OP_RETHROW);
11630 ins->sreg1 = load->dreg;
11631 MONO_ADD_INS (bblock, ins);
11633 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11634 MONO_ADD_INS (bblock, ins);
11637 link_bblock (cfg, bblock, end_bblock);
11638 start_new_bblock = 1;
11646 GSHAREDVT_FAILURE (*ip);
11648 CHECK_STACK_OVF (1);
11650 token = read32 (ip + 2);
11651 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic && !generic_context) {
11652 MonoType *type = mono_type_create_from_typespec (image, token);
11653 val = mono_type_size (type, &ialign);
11655 MonoClass *klass = mono_class_get_full (image, token, generic_context);
11656 CHECK_TYPELOAD (klass);
11657 mono_class_init (klass);
11658 val = mono_type_size (&klass->byval_arg, &ialign);
11660 EMIT_NEW_ICONST (cfg, ins, val);
11665 case CEE_REFANYTYPE: {
11666 MonoInst *src_var, *src;
11668 GSHAREDVT_FAILURE (*ip);
11674 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11676 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11677 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11678 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
11683 case CEE_READONLY_:
11696 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
11706 g_warning ("opcode 0x%02x not handled", *ip);
11710 if (start_new_bblock != 1)
11713 bblock->cil_length = ip - bblock->cil_code;
11714 if (bblock->next_bb) {
11715 /* This could already be set because of inlining, #693905 */
11716 MonoBasicBlock *bb = bblock;
11718 while (bb->next_bb)
11720 bb->next_bb = end_bblock;
11722 bblock->next_bb = end_bblock;
11725 if (cfg->lmf_var) {
11726 cfg->cbb = init_localsbb;
11727 emit_push_lmf (cfg);
11730 if (cfg->method == method && cfg->domainvar) {
11732 MonoInst *get_domain;
11734 cfg->cbb = init_localsbb;
11736 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
11737 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
11740 get_domain->dreg = alloc_preg (cfg);
11741 MONO_ADD_INS (cfg->cbb, get_domain);
11743 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
11744 MONO_ADD_INS (cfg->cbb, store);
11747 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
11748 if (cfg->compile_aot)
11749 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
11750 mono_get_got_var (cfg);
11753 if (cfg->method == method && cfg->got_var)
11754 mono_emit_load_got_addr (cfg);
11759 cfg->cbb = init_localsbb;
11761 for (i = 0; i < header->num_locals; ++i) {
11762 MonoType *ptype = header->locals [i];
11763 int t = ptype->type;
11764 dreg = cfg->locals [i]->dreg;
11766 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
11767 t = mono_class_enum_basetype (ptype->data.klass)->type;
11768 if (ptype->byref) {
11769 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
11770 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
11771 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
11772 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
11773 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
11774 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
11775 MONO_INST_NEW (cfg, ins, OP_R8CONST);
11776 ins->type = STACK_R8;
11777 ins->inst_p0 = (void*)&r8_0;
11778 ins->dreg = alloc_dreg (cfg, STACK_R8);
11779 MONO_ADD_INS (init_localsbb, ins);
11780 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
11781 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
11782 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
11783 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
11784 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, ptype)) {
11785 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
11787 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
11792 if (cfg->init_ref_vars && cfg->method == method) {
11793 /* Emit initialization for ref vars */
11794 // FIXME: Avoid duplication initialization for IL locals.
11795 for (i = 0; i < cfg->num_varinfo; ++i) {
11796 MonoInst *ins = cfg->varinfo [i];
11798 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
11799 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
11804 MonoBasicBlock *bb;
11807 * Make seq points at backward branch targets interruptable.
11809 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
11810 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
11811 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
11814 /* Add a sequence point for method entry/exit events */
11816 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
11817 MONO_ADD_INS (init_localsbb, ins);
11818 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
11819 MONO_ADD_INS (cfg->bb_exit, ins);
11823 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
11824 * the code they refer to was dead (#11880).
11826 if (sym_seq_points) {
11827 for (i = 0; i < header->code_size; ++i) {
11828 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
11831 NEW_SEQ_POINT (cfg, ins, i, FALSE);
11832 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
11839 if (cfg->method == method) {
11840 MonoBasicBlock *bb;
11841 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
11842 bb->region = mono_find_block_region (cfg, bb->real_offset);
11844 mono_create_spvar_for_region (cfg, bb->region);
11845 if (cfg->verbose_level > 2)
11846 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
11850 g_slist_free (class_inits);
11851 dont_inline = g_list_remove (dont_inline, method);
11853 if (inline_costs < 0) {
11856 /* Method is too large */
11857 mname = mono_method_full_name (method, TRUE);
11858 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
11859 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
11861 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
11862 mono_basic_block_free (original_bb);
11866 if ((cfg->verbose_level > 2) && (cfg->method == method))
11867 mono_print_code (cfg, "AFTER METHOD-TO-IR");
11869 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
11870 mono_basic_block_free (original_bb);
11871 return inline_costs;
11874 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
11881 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
11885 set_exception_type_from_invalid_il (cfg, method, ip);
11889 g_slist_free (class_inits);
11890 mono_basic_block_free (original_bb);
11891 dont_inline = g_list_remove (dont_inline, method);
11892 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
11897 store_membase_reg_to_store_membase_imm (int opcode)
11900 case OP_STORE_MEMBASE_REG:
11901 return OP_STORE_MEMBASE_IMM;
11902 case OP_STOREI1_MEMBASE_REG:
11903 return OP_STOREI1_MEMBASE_IMM;
11904 case OP_STOREI2_MEMBASE_REG:
11905 return OP_STOREI2_MEMBASE_IMM;
11906 case OP_STOREI4_MEMBASE_REG:
11907 return OP_STOREI4_MEMBASE_IMM;
11908 case OP_STOREI8_MEMBASE_REG:
11909 return OP_STOREI8_MEMBASE_IMM;
11911 g_assert_not_reached ();
11918 mono_op_to_op_imm (int opcode)
11922 return OP_IADD_IMM;
11924 return OP_ISUB_IMM;
11926 return OP_IDIV_IMM;
11928 return OP_IDIV_UN_IMM;
11930 return OP_IREM_IMM;
11932 return OP_IREM_UN_IMM;
11934 return OP_IMUL_IMM;
11936 return OP_IAND_IMM;
11940 return OP_IXOR_IMM;
11942 return OP_ISHL_IMM;
11944 return OP_ISHR_IMM;
11946 return OP_ISHR_UN_IMM;
11949 return OP_LADD_IMM;
11951 return OP_LSUB_IMM;
11953 return OP_LAND_IMM;
11957 return OP_LXOR_IMM;
11959 return OP_LSHL_IMM;
11961 return OP_LSHR_IMM;
11963 return OP_LSHR_UN_IMM;
11966 return OP_COMPARE_IMM;
11968 return OP_ICOMPARE_IMM;
11970 return OP_LCOMPARE_IMM;
11972 case OP_STORE_MEMBASE_REG:
11973 return OP_STORE_MEMBASE_IMM;
11974 case OP_STOREI1_MEMBASE_REG:
11975 return OP_STOREI1_MEMBASE_IMM;
11976 case OP_STOREI2_MEMBASE_REG:
11977 return OP_STOREI2_MEMBASE_IMM;
11978 case OP_STOREI4_MEMBASE_REG:
11979 return OP_STOREI4_MEMBASE_IMM;
11981 #if defined(TARGET_X86) || defined (TARGET_AMD64)
11983 return OP_X86_PUSH_IMM;
11984 case OP_X86_COMPARE_MEMBASE_REG:
11985 return OP_X86_COMPARE_MEMBASE_IMM;
11987 #if defined(TARGET_AMD64)
11988 case OP_AMD64_ICOMPARE_MEMBASE_REG:
11989 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
11991 case OP_VOIDCALL_REG:
11992 return OP_VOIDCALL;
12000 return OP_LOCALLOC_IMM;
12007 ldind_to_load_membase (int opcode)
12011 return OP_LOADI1_MEMBASE;
12013 return OP_LOADU1_MEMBASE;
12015 return OP_LOADI2_MEMBASE;
12017 return OP_LOADU2_MEMBASE;
12019 return OP_LOADI4_MEMBASE;
12021 return OP_LOADU4_MEMBASE;
12023 return OP_LOAD_MEMBASE;
12024 case CEE_LDIND_REF:
12025 return OP_LOAD_MEMBASE;
12027 return OP_LOADI8_MEMBASE;
12029 return OP_LOADR4_MEMBASE;
12031 return OP_LOADR8_MEMBASE;
12033 g_assert_not_reached ();
12040 stind_to_store_membase (int opcode)
12044 return OP_STOREI1_MEMBASE_REG;
12046 return OP_STOREI2_MEMBASE_REG;
12048 return OP_STOREI4_MEMBASE_REG;
12050 case CEE_STIND_REF:
12051 return OP_STORE_MEMBASE_REG;
12053 return OP_STOREI8_MEMBASE_REG;
12055 return OP_STORER4_MEMBASE_REG;
12057 return OP_STORER8_MEMBASE_REG;
12059 g_assert_not_reached ();
12066 mono_load_membase_to_load_mem (int opcode)
12068 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
12069 #if defined(TARGET_X86) || defined(TARGET_AMD64)
12071 case OP_LOAD_MEMBASE:
12072 return OP_LOAD_MEM;
12073 case OP_LOADU1_MEMBASE:
12074 return OP_LOADU1_MEM;
12075 case OP_LOADU2_MEMBASE:
12076 return OP_LOADU2_MEM;
12077 case OP_LOADI4_MEMBASE:
12078 return OP_LOADI4_MEM;
12079 case OP_LOADU4_MEMBASE:
12080 return OP_LOADU4_MEM;
12081 #if SIZEOF_REGISTER == 8
12082 case OP_LOADI8_MEMBASE:
12083 return OP_LOADI8_MEM;
12092 op_to_op_dest_membase (int store_opcode, int opcode)
12094 #if defined(TARGET_X86)
12095 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
12100 return OP_X86_ADD_MEMBASE_REG;
12102 return OP_X86_SUB_MEMBASE_REG;
12104 return OP_X86_AND_MEMBASE_REG;
12106 return OP_X86_OR_MEMBASE_REG;
12108 return OP_X86_XOR_MEMBASE_REG;
12111 return OP_X86_ADD_MEMBASE_IMM;
12114 return OP_X86_SUB_MEMBASE_IMM;
12117 return OP_X86_AND_MEMBASE_IMM;
12120 return OP_X86_OR_MEMBASE_IMM;
12123 return OP_X86_XOR_MEMBASE_IMM;
12129 #if defined(TARGET_AMD64)
12130 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
12135 return OP_X86_ADD_MEMBASE_REG;
12137 return OP_X86_SUB_MEMBASE_REG;
12139 return OP_X86_AND_MEMBASE_REG;
12141 return OP_X86_OR_MEMBASE_REG;
12143 return OP_X86_XOR_MEMBASE_REG;
12145 return OP_X86_ADD_MEMBASE_IMM;
12147 return OP_X86_SUB_MEMBASE_IMM;
12149 return OP_X86_AND_MEMBASE_IMM;
12151 return OP_X86_OR_MEMBASE_IMM;
12153 return OP_X86_XOR_MEMBASE_IMM;
12155 return OP_AMD64_ADD_MEMBASE_REG;
12157 return OP_AMD64_SUB_MEMBASE_REG;
12159 return OP_AMD64_AND_MEMBASE_REG;
12161 return OP_AMD64_OR_MEMBASE_REG;
12163 return OP_AMD64_XOR_MEMBASE_REG;
12166 return OP_AMD64_ADD_MEMBASE_IMM;
12169 return OP_AMD64_SUB_MEMBASE_IMM;
12172 return OP_AMD64_AND_MEMBASE_IMM;
12175 return OP_AMD64_OR_MEMBASE_IMM;
12178 return OP_AMD64_XOR_MEMBASE_IMM;
12188 op_to_op_store_membase (int store_opcode, int opcode)
12190 #if defined(TARGET_X86) || defined(TARGET_AMD64)
12193 if (store_opcode == OP_STOREI1_MEMBASE_REG)
12194 return OP_X86_SETEQ_MEMBASE;
12196 if (store_opcode == OP_STOREI1_MEMBASE_REG)
12197 return OP_X86_SETNE_MEMBASE;
12205 op_to_op_src1_membase (int load_opcode, int opcode)
12208 /* FIXME: This has sign extension issues */
12210 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
12211 return OP_X86_COMPARE_MEMBASE8_IMM;
12214 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
12219 return OP_X86_PUSH_MEMBASE;
12220 case OP_COMPARE_IMM:
12221 case OP_ICOMPARE_IMM:
12222 return OP_X86_COMPARE_MEMBASE_IMM;
12225 return OP_X86_COMPARE_MEMBASE_REG;
12229 #ifdef TARGET_AMD64
12230 /* FIXME: This has sign extension issues */
12232 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
12233 return OP_X86_COMPARE_MEMBASE8_IMM;
12238 #ifdef __mono_ilp32__
12239 if (load_opcode == OP_LOADI8_MEMBASE)
12241 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12243 return OP_X86_PUSH_MEMBASE;
12245 /* FIXME: This only works for 32 bit immediates
12246 case OP_COMPARE_IMM:
12247 case OP_LCOMPARE_IMM:
12248 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12249 return OP_AMD64_COMPARE_MEMBASE_IMM;
12251 case OP_ICOMPARE_IMM:
12252 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
12253 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
12257 #ifdef __mono_ilp32__
12258 if (load_opcode == OP_LOAD_MEMBASE)
12259 return OP_AMD64_ICOMPARE_MEMBASE_REG;
12260 if (load_opcode == OP_LOADI8_MEMBASE)
12262 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12264 return OP_AMD64_COMPARE_MEMBASE_REG;
12267 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
12268 return OP_AMD64_ICOMPARE_MEMBASE_REG;
12277 op_to_op_src2_membase (int load_opcode, int opcode)
12280 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
12286 return OP_X86_COMPARE_REG_MEMBASE;
12288 return OP_X86_ADD_REG_MEMBASE;
12290 return OP_X86_SUB_REG_MEMBASE;
12292 return OP_X86_AND_REG_MEMBASE;
12294 return OP_X86_OR_REG_MEMBASE;
12296 return OP_X86_XOR_REG_MEMBASE;
12300 #ifdef TARGET_AMD64
12301 #ifdef __mono_ilp32__
12302 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
12304 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
12308 return OP_AMD64_ICOMPARE_REG_MEMBASE;
12310 return OP_X86_ADD_REG_MEMBASE;
12312 return OP_X86_SUB_REG_MEMBASE;
12314 return OP_X86_AND_REG_MEMBASE;
12316 return OP_X86_OR_REG_MEMBASE;
12318 return OP_X86_XOR_REG_MEMBASE;
12320 #ifdef __mono_ilp32__
12321 } else if (load_opcode == OP_LOADI8_MEMBASE) {
12323 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
12328 return OP_AMD64_COMPARE_REG_MEMBASE;
12330 return OP_AMD64_ADD_REG_MEMBASE;
12332 return OP_AMD64_SUB_REG_MEMBASE;
12334 return OP_AMD64_AND_REG_MEMBASE;
12336 return OP_AMD64_OR_REG_MEMBASE;
12338 return OP_AMD64_XOR_REG_MEMBASE;
12347 mono_op_to_op_imm_noemul (int opcode)
12350 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
12356 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
12363 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
12368 return mono_op_to_op_imm (opcode);
12373 * mono_handle_global_vregs:
12375 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
12379 mono_handle_global_vregs (MonoCompile *cfg)
12381 gint32 *vreg_to_bb;
12382 MonoBasicBlock *bb;
12385 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
12387 #ifdef MONO_ARCH_SIMD_INTRINSICS
12388 if (cfg->uses_simd_intrinsics)
12389 mono_simd_simplify_indirection (cfg);
12392 /* Find local vregs used in more than one bb */
12393 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12394 MonoInst *ins = bb->code;
12395 int block_num = bb->block_num;
12397 if (cfg->verbose_level > 2)
12398 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
12401 for (; ins; ins = ins->next) {
12402 const char *spec = INS_INFO (ins->opcode);
12403 int regtype = 0, regindex;
12406 if (G_UNLIKELY (cfg->verbose_level > 2))
12407 mono_print_ins (ins);
12409 g_assert (ins->opcode >= MONO_CEE_LAST);
12411 for (regindex = 0; regindex < 4; regindex ++) {
12414 if (regindex == 0) {
12415 regtype = spec [MONO_INST_DEST];
12416 if (regtype == ' ')
12419 } else if (regindex == 1) {
12420 regtype = spec [MONO_INST_SRC1];
12421 if (regtype == ' ')
12424 } else if (regindex == 2) {
12425 regtype = spec [MONO_INST_SRC2];
12426 if (regtype == ' ')
12429 } else if (regindex == 3) {
12430 regtype = spec [MONO_INST_SRC3];
12431 if (regtype == ' ')
12436 #if SIZEOF_REGISTER == 4
12437 /* In the LLVM case, the long opcodes are not decomposed */
12438 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
12440 * Since some instructions reference the original long vreg,
12441 * and some reference the two component vregs, it is quite hard
12442 * to determine when it needs to be global. So be conservative.
12444 if (!get_vreg_to_inst (cfg, vreg)) {
12445 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
12447 if (cfg->verbose_level > 2)
12448 printf ("LONG VREG R%d made global.\n", vreg);
12452 * Make the component vregs volatile since the optimizations can
12453 * get confused otherwise.
12455 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
12456 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
12460 g_assert (vreg != -1);
12462 prev_bb = vreg_to_bb [vreg];
12463 if (prev_bb == 0) {
12464 /* 0 is a valid block num */
12465 vreg_to_bb [vreg] = block_num + 1;
12466 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
12467 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
12470 if (!get_vreg_to_inst (cfg, vreg)) {
12471 if (G_UNLIKELY (cfg->verbose_level > 2))
12472 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
12476 if (vreg_is_ref (cfg, vreg))
12477 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
12479 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
12482 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
12485 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
12488 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
12491 g_assert_not_reached ();
12495 /* Flag as having been used in more than one bb */
12496 vreg_to_bb [vreg] = -1;
12502 /* If a variable is used in only one bblock, convert it into a local vreg */
12503 for (i = 0; i < cfg->num_varinfo; i++) {
12504 MonoInst *var = cfg->varinfo [i];
12505 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
12507 switch (var->type) {
12513 #if SIZEOF_REGISTER == 8
12516 #if !defined(TARGET_X86)
12517 /* Enabling this screws up the fp stack on x86 */
12520 if (mono_arch_is_soft_float ())
12523 /* Arguments are implicitly global */
12524 /* Putting R4 vars into registers doesn't work currently */
12525 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
12526 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var) {
12528 * Make that the variable's liveness interval doesn't contain a call, since
12529 * that would cause the lvreg to be spilled, making the whole optimization
12532 /* This is too slow for JIT compilation */
12534 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
12536 int def_index, call_index, ins_index;
12537 gboolean spilled = FALSE;
12542 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
12543 const char *spec = INS_INFO (ins->opcode);
12545 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
12546 def_index = ins_index;
12548 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
12549 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
12550 if (call_index > def_index) {
12556 if (MONO_IS_CALL (ins))
12557 call_index = ins_index;
12567 if (G_UNLIKELY (cfg->verbose_level > 2))
12568 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
12569 var->flags |= MONO_INST_IS_DEAD;
12570 cfg->vreg_to_inst [var->dreg] = NULL;
12577 * Compress the varinfo and vars tables so the liveness computation is faster and
12578 * takes up less space.
12581 for (i = 0; i < cfg->num_varinfo; ++i) {
12582 MonoInst *var = cfg->varinfo [i];
12583 if (pos < i && cfg->locals_start == i)
12584 cfg->locals_start = pos;
12585 if (!(var->flags & MONO_INST_IS_DEAD)) {
12587 cfg->varinfo [pos] = cfg->varinfo [i];
12588 cfg->varinfo [pos]->inst_c0 = pos;
12589 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
12590 cfg->vars [pos].idx = pos;
12591 #if SIZEOF_REGISTER == 4
12592 if (cfg->varinfo [pos]->type == STACK_I8) {
12593 /* Modify the two component vars too */
12596 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
12597 var1->inst_c0 = pos;
12598 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
12599 var1->inst_c0 = pos;
12606 cfg->num_varinfo = pos;
12607 if (cfg->locals_start > cfg->num_varinfo)
12608 cfg->locals_start = cfg->num_varinfo;
12612 * mono_spill_global_vars:
12614 * Generate spill code for variables which are not allocated to registers,
12615 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
12616 * code is generated which could be optimized by the local optimization passes.
12619 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
12621 MonoBasicBlock *bb;
12623 int orig_next_vreg;
12624 guint32 *vreg_to_lvreg;
12626 guint32 i, lvregs_len;
12627 gboolean dest_has_lvreg = FALSE;
12628 guint32 stacktypes [128];
12629 MonoInst **live_range_start, **live_range_end;
12630 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
12631 int *gsharedvt_vreg_to_idx = NULL;
12633 *need_local_opts = FALSE;
12635 memset (spec2, 0, sizeof (spec2));
12637 /* FIXME: Move this function to mini.c */
12638 stacktypes ['i'] = STACK_PTR;
12639 stacktypes ['l'] = STACK_I8;
12640 stacktypes ['f'] = STACK_R8;
12641 #ifdef MONO_ARCH_SIMD_INTRINSICS
12642 stacktypes ['x'] = STACK_VTYPE;
12645 #if SIZEOF_REGISTER == 4
12646 /* Create MonoInsts for longs */
12647 for (i = 0; i < cfg->num_varinfo; i++) {
12648 MonoInst *ins = cfg->varinfo [i];
12650 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
12651 switch (ins->type) {
12656 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
12659 g_assert (ins->opcode == OP_REGOFFSET);
12661 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
12663 tree->opcode = OP_REGOFFSET;
12664 tree->inst_basereg = ins->inst_basereg;
12665 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
12667 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
12669 tree->opcode = OP_REGOFFSET;
12670 tree->inst_basereg = ins->inst_basereg;
12671 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
12681 if (cfg->compute_gc_maps) {
12682 /* registers need liveness info even for !non refs */
12683 for (i = 0; i < cfg->num_varinfo; i++) {
12684 MonoInst *ins = cfg->varinfo [i];
12686 if (ins->opcode == OP_REGVAR)
12687 ins->flags |= MONO_INST_GC_TRACK;
12691 if (cfg->gsharedvt) {
12692 gsharedvt_vreg_to_idx = g_new0 (int, cfg->next_vreg);
12694 for (i = 0; i < cfg->num_varinfo; ++i) {
12695 MonoInst *ins = cfg->varinfo [i];
12698 if (mini_is_gsharedvt_variable_type (cfg, ins->inst_vtype)) {
12699 if (i >= cfg->locals_start) {
12701 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
12702 gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
12703 ins->opcode = OP_GSHAREDVT_LOCAL;
12704 ins->inst_imm = idx;
12707 gsharedvt_vreg_to_idx [ins->dreg] = -1;
12708 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
12714 /* FIXME: widening and truncation */
12717 * As an optimization, when a variable allocated to the stack is first loaded into
12718 * an lvreg, we will remember the lvreg and use it the next time instead of loading
12719 * the variable again.
12721 orig_next_vreg = cfg->next_vreg;
12722 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
12723 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
12727 * These arrays contain the first and last instructions accessing a given
12729 * Since we emit bblocks in the same order we process them here, and we
12730 * don't split live ranges, these will precisely describe the live range of
12731 * the variable, i.e. the instruction range where a valid value can be found
12732 * in the variables location.
12733 * The live range is computed using the liveness info computed by the liveness pass.
12734 * We can't use vmv->range, since that is an abstract live range, and we need
12735 * one which is instruction precise.
12736 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
12738 /* FIXME: Only do this if debugging info is requested */
12739 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
12740 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
12741 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
12742 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
12744 /* Add spill loads/stores */
12745 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12748 if (cfg->verbose_level > 2)
12749 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
12751 /* Clear vreg_to_lvreg array */
12752 for (i = 0; i < lvregs_len; i++)
12753 vreg_to_lvreg [lvregs [i]] = 0;
12757 MONO_BB_FOR_EACH_INS (bb, ins) {
12758 const char *spec = INS_INFO (ins->opcode);
12759 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
12760 gboolean store, no_lvreg;
12761 int sregs [MONO_MAX_SRC_REGS];
12763 if (G_UNLIKELY (cfg->verbose_level > 2))
12764 mono_print_ins (ins);
12766 if (ins->opcode == OP_NOP)
12770 * We handle LDADDR here as well, since it can only be decomposed
12771 * when variable addresses are known.
12773 if (ins->opcode == OP_LDADDR) {
12774 MonoInst *var = ins->inst_p0;
12776 if (var->opcode == OP_VTARG_ADDR) {
12777 /* Happens on SPARC/S390 where vtypes are passed by reference */
12778 MonoInst *vtaddr = var->inst_left;
12779 if (vtaddr->opcode == OP_REGVAR) {
12780 ins->opcode = OP_MOVE;
12781 ins->sreg1 = vtaddr->dreg;
12783 else if (var->inst_left->opcode == OP_REGOFFSET) {
12784 ins->opcode = OP_LOAD_MEMBASE;
12785 ins->inst_basereg = vtaddr->inst_basereg;
12786 ins->inst_offset = vtaddr->inst_offset;
12789 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg] < 0) {
12790 /* gsharedvt arg passed by ref */
12791 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
12793 ins->opcode = OP_LOAD_MEMBASE;
12794 ins->inst_basereg = var->inst_basereg;
12795 ins->inst_offset = var->inst_offset;
12796 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg]) {
12797 MonoInst *load, *load2, *load3;
12798 int idx = gsharedvt_vreg_to_idx [var->dreg] - 1;
12799 int reg1, reg2, reg3;
12800 MonoInst *info_var = cfg->gsharedvt_info_var;
12801 MonoInst *locals_var = cfg->gsharedvt_locals_var;
12805 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
12808 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
12810 g_assert (info_var);
12811 g_assert (locals_var);
12813 /* Mark the instruction used to compute the locals var as used */
12814 cfg->gsharedvt_locals_var_ins = NULL;
12816 /* Load the offset */
12817 if (info_var->opcode == OP_REGOFFSET) {
12818 reg1 = alloc_ireg (cfg);
12819 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
12820 } else if (info_var->opcode == OP_REGVAR) {
12822 reg1 = info_var->dreg;
12824 g_assert_not_reached ();
12826 reg2 = alloc_ireg (cfg);
12827 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
12828 /* Load the locals area address */
12829 reg3 = alloc_ireg (cfg);
12830 if (locals_var->opcode == OP_REGOFFSET) {
12831 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
12832 } else if (locals_var->opcode == OP_REGVAR) {
12833 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
12835 g_assert_not_reached ();
12837 /* Compute the address */
12838 ins->opcode = OP_PADD;
12842 mono_bblock_insert_before_ins (bb, ins, load3);
12843 mono_bblock_insert_before_ins (bb, load3, load2);
12845 mono_bblock_insert_before_ins (bb, load2, load);
12847 g_assert (var->opcode == OP_REGOFFSET);
12849 ins->opcode = OP_ADD_IMM;
12850 ins->sreg1 = var->inst_basereg;
12851 ins->inst_imm = var->inst_offset;
12854 *need_local_opts = TRUE;
12855 spec = INS_INFO (ins->opcode);
12858 if (ins->opcode < MONO_CEE_LAST) {
12859 mono_print_ins (ins);
12860 g_assert_not_reached ();
12864 * Store opcodes have destbasereg in the dreg, but in reality, it is an
12868 if (MONO_IS_STORE_MEMBASE (ins)) {
12869 tmp_reg = ins->dreg;
12870 ins->dreg = ins->sreg2;
12871 ins->sreg2 = tmp_reg;
12874 spec2 [MONO_INST_DEST] = ' ';
12875 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
12876 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
12877 spec2 [MONO_INST_SRC3] = ' ';
12879 } else if (MONO_IS_STORE_MEMINDEX (ins))
12880 g_assert_not_reached ();
12885 if (G_UNLIKELY (cfg->verbose_level > 2)) {
12886 printf ("\t %.3s %d", spec, ins->dreg);
12887 num_sregs = mono_inst_get_src_registers (ins, sregs);
12888 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
12889 printf (" %d", sregs [srcindex]);
12896 regtype = spec [MONO_INST_DEST];
12897 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
12900 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
12901 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
12902 MonoInst *store_ins;
12904 MonoInst *def_ins = ins;
12905 int dreg = ins->dreg; /* The original vreg */
12907 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
12909 if (var->opcode == OP_REGVAR) {
12910 ins->dreg = var->dreg;
12911 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
12913 * Instead of emitting a load+store, use a _membase opcode.
12915 g_assert (var->opcode == OP_REGOFFSET);
12916 if (ins->opcode == OP_MOVE) {
12920 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
12921 ins->inst_basereg = var->inst_basereg;
12922 ins->inst_offset = var->inst_offset;
12925 spec = INS_INFO (ins->opcode);
12929 g_assert (var->opcode == OP_REGOFFSET);
12931 prev_dreg = ins->dreg;
12933 /* Invalidate any previous lvreg for this vreg */
12934 vreg_to_lvreg [ins->dreg] = 0;
12938 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
12940 store_opcode = OP_STOREI8_MEMBASE_REG;
12943 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
12945 #if SIZEOF_REGISTER != 8
12946 if (regtype == 'l') {
12947 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
12948 mono_bblock_insert_after_ins (bb, ins, store_ins);
12949 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
12950 mono_bblock_insert_after_ins (bb, ins, store_ins);
12951 def_ins = store_ins;
12956 g_assert (store_opcode != OP_STOREV_MEMBASE);
12958 /* Try to fuse the store into the instruction itself */
12959 /* FIXME: Add more instructions */
12960 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
12961 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
12962 ins->inst_imm = ins->inst_c0;
12963 ins->inst_destbasereg = var->inst_basereg;
12964 ins->inst_offset = var->inst_offset;
12965 spec = INS_INFO (ins->opcode);
12966 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
12967 ins->opcode = store_opcode;
12968 ins->inst_destbasereg = var->inst_basereg;
12969 ins->inst_offset = var->inst_offset;
12973 tmp_reg = ins->dreg;
12974 ins->dreg = ins->sreg2;
12975 ins->sreg2 = tmp_reg;
12978 spec2 [MONO_INST_DEST] = ' ';
12979 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
12980 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
12981 spec2 [MONO_INST_SRC3] = ' ';
12983 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
12984 // FIXME: The backends expect the base reg to be in inst_basereg
12985 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
12987 ins->inst_basereg = var->inst_basereg;
12988 ins->inst_offset = var->inst_offset;
12989 spec = INS_INFO (ins->opcode);
12991 /* printf ("INS: "); mono_print_ins (ins); */
12992 /* Create a store instruction */
12993 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
12995 /* Insert it after the instruction */
12996 mono_bblock_insert_after_ins (bb, ins, store_ins);
12998 def_ins = store_ins;
13001 * We can't assign ins->dreg to var->dreg here, since the
13002 * sregs could use it. So set a flag, and do it after
13005 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
13006 dest_has_lvreg = TRUE;
13011 if (def_ins && !live_range_start [dreg]) {
13012 live_range_start [dreg] = def_ins;
13013 live_range_start_bb [dreg] = bb;
13016 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
13019 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
13020 tmp->inst_c1 = dreg;
13021 mono_bblock_insert_after_ins (bb, def_ins, tmp);
13028 num_sregs = mono_inst_get_src_registers (ins, sregs);
13029 for (srcindex = 0; srcindex < 3; ++srcindex) {
13030 regtype = spec [MONO_INST_SRC1 + srcindex];
13031 sreg = sregs [srcindex];
13033 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
13034 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
13035 MonoInst *var = get_vreg_to_inst (cfg, sreg);
13036 MonoInst *use_ins = ins;
13037 MonoInst *load_ins;
13038 guint32 load_opcode;
13040 if (var->opcode == OP_REGVAR) {
13041 sregs [srcindex] = var->dreg;
13042 //mono_inst_set_src_registers (ins, sregs);
13043 live_range_end [sreg] = use_ins;
13044 live_range_end_bb [sreg] = bb;
13046 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
13049 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
13050 /* var->dreg is a hreg */
13051 tmp->inst_c1 = sreg;
13052 mono_bblock_insert_after_ins (bb, ins, tmp);
13058 g_assert (var->opcode == OP_REGOFFSET);
13060 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
13062 g_assert (load_opcode != OP_LOADV_MEMBASE);
13064 if (vreg_to_lvreg [sreg]) {
13065 g_assert (vreg_to_lvreg [sreg] != -1);
13067 /* The variable is already loaded to an lvreg */
13068 if (G_UNLIKELY (cfg->verbose_level > 2))
13069 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
13070 sregs [srcindex] = vreg_to_lvreg [sreg];
13071 //mono_inst_set_src_registers (ins, sregs);
13075 /* Try to fuse the load into the instruction */
13076 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
13077 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
13078 sregs [0] = var->inst_basereg;
13079 //mono_inst_set_src_registers (ins, sregs);
13080 ins->inst_offset = var->inst_offset;
13081 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
13082 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
13083 sregs [1] = var->inst_basereg;
13084 //mono_inst_set_src_registers (ins, sregs);
13085 ins->inst_offset = var->inst_offset;
13087 if (MONO_IS_REAL_MOVE (ins)) {
13088 ins->opcode = OP_NOP;
13091 //printf ("%d ", srcindex); mono_print_ins (ins);
13093 sreg = alloc_dreg (cfg, stacktypes [regtype]);
13095 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
13096 if (var->dreg == prev_dreg) {
13098 * sreg refers to the value loaded by the load
13099 * emitted below, but we need to use ins->dreg
13100 * since it refers to the store emitted earlier.
13104 g_assert (sreg != -1);
13105 vreg_to_lvreg [var->dreg] = sreg;
13106 g_assert (lvregs_len < 1024);
13107 lvregs [lvregs_len ++] = var->dreg;
13111 sregs [srcindex] = sreg;
13112 //mono_inst_set_src_registers (ins, sregs);
13114 #if SIZEOF_REGISTER != 8
13115 if (regtype == 'l') {
13116 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
13117 mono_bblock_insert_before_ins (bb, ins, load_ins);
13118 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
13119 mono_bblock_insert_before_ins (bb, ins, load_ins);
13120 use_ins = load_ins;
13125 #if SIZEOF_REGISTER == 4
13126 g_assert (load_opcode != OP_LOADI8_MEMBASE);
13128 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
13129 mono_bblock_insert_before_ins (bb, ins, load_ins);
13130 use_ins = load_ins;
13134 if (var->dreg < orig_next_vreg) {
13135 live_range_end [var->dreg] = use_ins;
13136 live_range_end_bb [var->dreg] = bb;
13139 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
13142 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
13143 tmp->inst_c1 = var->dreg;
13144 mono_bblock_insert_after_ins (bb, ins, tmp);
13148 mono_inst_set_src_registers (ins, sregs);
13150 if (dest_has_lvreg) {
13151 g_assert (ins->dreg != -1);
13152 vreg_to_lvreg [prev_dreg] = ins->dreg;
13153 g_assert (lvregs_len < 1024);
13154 lvregs [lvregs_len ++] = prev_dreg;
13155 dest_has_lvreg = FALSE;
13159 tmp_reg = ins->dreg;
13160 ins->dreg = ins->sreg2;
13161 ins->sreg2 = tmp_reg;
13164 if (MONO_IS_CALL (ins)) {
13165 /* Clear vreg_to_lvreg array */
13166 for (i = 0; i < lvregs_len; i++)
13167 vreg_to_lvreg [lvregs [i]] = 0;
13169 } else if (ins->opcode == OP_NOP) {
13171 MONO_INST_NULLIFY_SREGS (ins);
13174 if (cfg->verbose_level > 2)
13175 mono_print_ins_index (1, ins);
13178 /* Extend the live range based on the liveness info */
13179 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
13180 for (i = 0; i < cfg->num_varinfo; i ++) {
13181 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
13183 if (vreg_is_volatile (cfg, vi->vreg))
13184 /* The liveness info is incomplete */
13187 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
13188 /* Live from at least the first ins of this bb */
13189 live_range_start [vi->vreg] = bb->code;
13190 live_range_start_bb [vi->vreg] = bb;
13193 if (mono_bitset_test_fast (bb->live_out_set, i)) {
13194 /* Live at least until the last ins of this bb */
13195 live_range_end [vi->vreg] = bb->last_ins;
13196 live_range_end_bb [vi->vreg] = bb;
13202 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
13204 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
13205 * by storing the current native offset into MonoMethodVar->live_range_start/end.
13207 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
13208 for (i = 0; i < cfg->num_varinfo; ++i) {
13209 int vreg = MONO_VARINFO (cfg, i)->vreg;
13212 if (live_range_start [vreg]) {
13213 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
13215 ins->inst_c1 = vreg;
13216 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
13218 if (live_range_end [vreg]) {
13219 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
13221 ins->inst_c1 = vreg;
13222 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
13223 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
13225 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
13231 if (cfg->gsharedvt_locals_var_ins) {
13232 /* Nullify if unused */
13233 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
13234 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
13237 g_free (live_range_start);
13238 g_free (live_range_end);
13239 g_free (live_range_start_bb);
13240 g_free (live_range_end_bb);
13245 * - use 'iadd' instead of 'int_add'
13246 * - handling ovf opcodes: decompose in method_to_ir.
13247 * - unify iregs/fregs
13248 * -> partly done, the missing parts are:
13249 * - a more complete unification would involve unifying the hregs as well, so
13250 * code wouldn't need if (fp) all over the place. but that would mean the hregs
13251 * would no longer map to the machine hregs, so the code generators would need to
13252 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
13253 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
13254 * fp/non-fp branches speeds it up by about 15%.
13255 * - use sext/zext opcodes instead of shifts
13257 * - get rid of TEMPLOADs if possible and use vregs instead
13258 * - clean up usage of OP_P/OP_ opcodes
13259 * - cleanup usage of DUMMY_USE
13260 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
13262 * - set the stack type and allocate a dreg in the EMIT_NEW macros
13263 * - get rid of all the <foo>2 stuff when the new JIT is ready.
13264 * - make sure handle_stack_args () is called before the branch is emitted
13265 * - when the new IR is done, get rid of all unused stuff
13266 * - COMPARE/BEQ as separate instructions or unify them ?
13267 * - keeping them separate allows specialized compare instructions like
13268 * compare_imm, compare_membase
13269 * - most back ends unify fp compare+branch, fp compare+ceq
13270 * - integrate mono_save_args into inline_method
13271 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
13272 * - handle long shift opts on 32 bit platforms somehow: they require
13273 * 3 sregs (2 for arg1 and 1 for arg2)
13274 * - make byref a 'normal' type.
13275 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
13276 * variable if needed.
13277 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
13278 * like inline_method.
13279 * - remove inlining restrictions
13280 * - fix LNEG and enable cfold of INEG
13281 * - generalize x86 optimizations like ldelema as a peephole optimization
13282 * - add store_mem_imm for amd64
13283 * - optimize the loading of the interruption flag in the managed->native wrappers
13284 * - avoid special handling of OP_NOP in passes
13285 * - move code inserting instructions into one function/macro.
13286 * - try a coalescing phase after liveness analysis
13287 * - add float -> vreg conversion + local optimizations on !x86
13288 * - figure out how to handle decomposed branches during optimizations, ie.
13289 * compare+branch, op_jump_table+op_br etc.
13290 * - promote RuntimeXHandles to vregs
13291 * - vtype cleanups:
13292 * - add a NEW_VARLOADA_VREG macro
13293 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
13294 * accessing vtype fields.
13295 * - get rid of I8CONST on 64 bit platforms
13296 * - dealing with the increase in code size due to branches created during opcode
13298 * - use extended basic blocks
13299 * - all parts of the JIT
13300 * - handle_global_vregs () && local regalloc
13301 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
13302 * - sources of increase in code size:
13305 * - isinst and castclass
13306 * - lvregs not allocated to global registers even if used multiple times
13307 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
13309 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
13310 * - add all micro optimizations from the old JIT
13311 * - put tree optimizations into the deadce pass
13312 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
13313 * specific function.
13314 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
13315 * fcompare + branchCC.
13316 * - create a helper function for allocating a stack slot, taking into account
13317 * MONO_CFG_HAS_SPILLUP.
13319 * - merge the ia64 switch changes.
13320 * - optimize mono_regstate2_alloc_int/float.
13321 * - fix the pessimistic handling of variables accessed in exception handler blocks.
13322 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
13323 * parts of the tree could be separated by other instructions, killing the tree
13324 * arguments, or stores killing loads etc. Also, should we fold loads into other
13325 * instructions if the result of the load is used multiple times ?
13326 * - make the REM_IMM optimization in mini-x86.c arch-independent.
13327 * - LAST MERGE: 108395.
13328 * - when returning vtypes in registers, generate IR and append it to the end of the
13329 * last bb instead of doing it in the epilog.
13330 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
13338 - When to decompose opcodes:
13339 - earlier: this makes some optimizations hard to implement, since the low level IR
13340 no longer contains the neccessary information. But it is easier to do.
13341 - later: harder to implement, enables more optimizations.
13342 - Branches inside bblocks:
13343 - created when decomposing complex opcodes.
13344 - branches to another bblock: harmless, but not tracked by the branch
13345 optimizations, so need to branch to a label at the start of the bblock.
13346 - branches to inside the same bblock: very problematic, trips up the local
13347 reg allocator. Can be fixed by spitting the current bblock, but that is a
13348 complex operation, since some local vregs can become global vregs etc.
13349 - Local/global vregs:
13350 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
13351 local register allocator.
13352 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
13353 structure, created by mono_create_var (). Assigned to hregs or the stack by
13354 the global register allocator.
13355 - When to do optimizations like alu->alu_imm:
13356 - earlier -> saves work later on since the IR will be smaller/simpler
13357 - later -> can work on more instructions
13358 - Handling of valuetypes:
13359 - When a vtype is pushed on the stack, a new temporary is created, an
13360 instruction computing its address (LDADDR) is emitted and pushed on
13361 the stack. Need to optimize cases when the vtype is used immediately as in
13362 argument passing, stloc etc.
13363 - Instead of the to_end stuff in the old JIT, simply call the function handling
13364 the values on the stack before emitting the last instruction of the bb.
13367 #endif /* DISABLE_JIT */