2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/assembly.h>
38 #include <mono/metadata/attrdefs.h>
39 #include <mono/metadata/loader.h>
40 #include <mono/metadata/tabledefs.h>
41 #include <mono/metadata/class.h>
42 #include <mono/metadata/object.h>
43 #include <mono/metadata/exception.h>
44 #include <mono/metadata/opcodes.h>
45 #include <mono/metadata/mono-endian.h>
46 #include <mono/metadata/tokentype.h>
47 #include <mono/metadata/tabledefs.h>
48 #include <mono/metadata/marshal.h>
49 #include <mono/metadata/debug-helpers.h>
50 #include <mono/metadata/mono-debug.h>
51 #include <mono/metadata/gc-internal.h>
52 #include <mono/metadata/security-manager.h>
53 #include <mono/metadata/threads-types.h>
54 #include <mono/metadata/security-core-clr.h>
55 #include <mono/metadata/monitor.h>
56 #include <mono/metadata/profiler-private.h>
57 #include <mono/metadata/profiler.h>
58 #include <mono/metadata/debug-mono-symfile.h>
59 #include <mono/utils/mono-compiler.h>
60 #include <mono/utils/mono-memory-model.h>
61 #include <mono/metadata/mono-basic-block.h>
68 #include "jit-icalls.h"
70 #include "debugger-agent.h"
72 #define BRANCH_COST 10
73 #define INLINE_LENGTH_LIMIT 20
74 #define INLINE_FAILURE(msg) do { \
75 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE)) { \
76 if (cfg->verbose_level >= 2) \
77 printf ("inline failed: %s\n", msg); \
78 goto inline_failure; \
81 #define CHECK_CFG_EXCEPTION do {\
82 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
85 #define METHOD_ACCESS_FAILURE do { \
86 char *method_fname = mono_method_full_name (method, TRUE); \
87 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
88 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS); \
89 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
90 g_free (method_fname); \
91 g_free (cil_method_fname); \
92 goto exception_exit; \
94 #define FIELD_ACCESS_FAILURE do { \
95 char *method_fname = mono_method_full_name (method, TRUE); \
96 char *field_fname = mono_field_full_name (field); \
97 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS); \
98 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
99 g_free (method_fname); \
100 g_free (field_fname); \
101 goto exception_exit; \
103 #define GENERIC_SHARING_FAILURE(opcode) do { \
104 if (cfg->generic_sharing_context) { \
105 if (cfg->verbose_level > 2) \
106 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
107 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
108 goto exception_exit; \
111 #define GSHAREDVT_FAILURE(opcode) do { \
112 if (cfg->gsharedvt) { \
113 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __FILE__, __LINE__); \
114 if (cfg->verbose_level >= 2) \
115 printf ("%s\n", cfg->exception_message); \
116 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
117 goto exception_exit; \
120 #define OUT_OF_MEMORY_FAILURE do { \
121 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
122 goto exception_exit; \
124 /* Determine whenever 'ins' represents a load of the 'this' argument */
125 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
127 static int ldind_to_load_membase (int opcode);
128 static int stind_to_store_membase (int opcode);
130 int mono_op_to_op_imm (int opcode);
131 int mono_op_to_op_imm_noemul (int opcode);
133 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
135 /* helper methods signatures */
136 static MonoMethodSignature *helper_sig_class_init_trampoline = NULL;
137 static MonoMethodSignature *helper_sig_domain_get = NULL;
138 static MonoMethodSignature *helper_sig_generic_class_init_trampoline = NULL;
139 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm = NULL;
140 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline = NULL;
141 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline = NULL;
142 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm = NULL;
145 * Instruction metadata
153 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
154 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
160 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
165 /* keep in sync with the enum in mini.h */
168 #include "mini-ops.h"
173 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
174 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
176 * This should contain the index of the last sreg + 1. This is not the same
177 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
179 const gint8 ins_sreg_counts[] = {
180 #include "mini-ops.h"
185 #define MONO_INIT_VARINFO(vi,id) do { \
186 (vi)->range.first_use.pos.bid = 0xffff; \
192 mono_inst_set_src_registers (MonoInst *ins, int *regs)
194 ins->sreg1 = regs [0];
195 ins->sreg2 = regs [1];
196 ins->sreg3 = regs [2];
200 mono_alloc_ireg (MonoCompile *cfg)
202 return alloc_ireg (cfg);
206 mono_alloc_freg (MonoCompile *cfg)
208 return alloc_freg (cfg);
212 mono_alloc_preg (MonoCompile *cfg)
214 return alloc_preg (cfg);
218 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
220 return alloc_dreg (cfg, stack_type);
224 * mono_alloc_ireg_ref:
226 * Allocate an IREG, and mark it as holding a GC ref.
229 mono_alloc_ireg_ref (MonoCompile *cfg)
231 return alloc_ireg_ref (cfg);
235 * mono_alloc_ireg_mp:
237 * Allocate an IREG, and mark it as holding a managed pointer.
240 mono_alloc_ireg_mp (MonoCompile *cfg)
242 return alloc_ireg_mp (cfg);
246 * mono_alloc_ireg_copy:
248 * Allocate an IREG with the same GC type as VREG.
251 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
253 if (vreg_is_ref (cfg, vreg))
254 return alloc_ireg_ref (cfg);
255 else if (vreg_is_mp (cfg, vreg))
256 return alloc_ireg_mp (cfg);
258 return alloc_ireg (cfg);
262 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
268 switch (type->type) {
271 case MONO_TYPE_BOOLEAN:
283 case MONO_TYPE_FNPTR:
285 case MONO_TYPE_CLASS:
286 case MONO_TYPE_STRING:
287 case MONO_TYPE_OBJECT:
288 case MONO_TYPE_SZARRAY:
289 case MONO_TYPE_ARRAY:
293 #if SIZEOF_REGISTER == 8
302 case MONO_TYPE_VALUETYPE:
303 if (type->data.klass->enumtype) {
304 type = mono_class_enum_basetype (type->data.klass);
307 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
310 case MONO_TYPE_TYPEDBYREF:
312 case MONO_TYPE_GENERICINST:
313 type = &type->data.generic_class->container_class->byval_arg;
317 g_assert (cfg->generic_sharing_context);
318 if (mini_type_var_is_vt (cfg, type))
323 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
329 mono_print_bb (MonoBasicBlock *bb, const char *msg)
334 printf ("\n%s %d: [IN: ", msg, bb->block_num);
335 for (i = 0; i < bb->in_count; ++i)
336 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
338 for (i = 0; i < bb->out_count; ++i)
339 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
341 for (tree = bb->code; tree; tree = tree->next)
342 mono_print_ins_index (-1, tree);
346 mono_create_helper_signatures (void)
348 helper_sig_domain_get = mono_create_icall_signature ("ptr");
349 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
350 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
351 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
352 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
353 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
354 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
358 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
359 * foo<T> (int i) { ldarg.0; box T; }
361 #define UNVERIFIED do { \
362 if (cfg->gsharedvt) { \
363 if (cfg->verbose_level > 2) \
364 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
365 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
366 goto exception_exit; \
368 if (mini_get_debug_options ()->break_on_unverified) \
374 #define LOAD_ERROR do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto load_error; } while (0)
376 #define TYPE_LOAD_ERROR(klass) do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else { cfg->exception_ptr = klass; goto load_error; } } while (0)
378 #define GET_BBLOCK(cfg,tblock,ip) do { \
379 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
381 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
382 NEW_BBLOCK (cfg, (tblock)); \
383 (tblock)->cil_code = (ip); \
384 ADD_BBLOCK (cfg, (tblock)); \
388 #if defined(TARGET_X86) || defined(TARGET_AMD64)
389 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
390 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
391 (dest)->dreg = alloc_ireg_mp ((cfg)); \
392 (dest)->sreg1 = (sr1); \
393 (dest)->sreg2 = (sr2); \
394 (dest)->inst_imm = (imm); \
395 (dest)->backend.shift_amount = (shift); \
396 MONO_ADD_INS ((cfg)->cbb, (dest)); \
400 #if SIZEOF_REGISTER == 8
401 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
402 /* FIXME: Need to add many more cases */ \
403 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
405 int dr = alloc_preg (cfg); \
406 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
407 (ins)->sreg2 = widen->dreg; \
411 #define ADD_WIDEN_OP(ins, arg1, arg2)
414 #define ADD_BINOP(op) do { \
415 MONO_INST_NEW (cfg, ins, (op)); \
417 ins->sreg1 = sp [0]->dreg; \
418 ins->sreg2 = sp [1]->dreg; \
419 type_from_op (ins, sp [0], sp [1]); \
421 /* Have to insert a widening op */ \
422 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
423 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
424 MONO_ADD_INS ((cfg)->cbb, (ins)); \
425 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
428 #define ADD_UNOP(op) do { \
429 MONO_INST_NEW (cfg, ins, (op)); \
431 ins->sreg1 = sp [0]->dreg; \
432 type_from_op (ins, sp [0], NULL); \
434 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
435 MONO_ADD_INS ((cfg)->cbb, (ins)); \
436 *sp++ = mono_decompose_opcode (cfg, ins); \
439 #define ADD_BINCOND(next_block) do { \
442 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
443 cmp->sreg1 = sp [0]->dreg; \
444 cmp->sreg2 = sp [1]->dreg; \
445 type_from_op (cmp, sp [0], sp [1]); \
447 type_from_op (ins, sp [0], sp [1]); \
448 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
449 GET_BBLOCK (cfg, tblock, target); \
450 link_bblock (cfg, bblock, tblock); \
451 ins->inst_true_bb = tblock; \
452 if ((next_block)) { \
453 link_bblock (cfg, bblock, (next_block)); \
454 ins->inst_false_bb = (next_block); \
455 start_new_bblock = 1; \
457 GET_BBLOCK (cfg, tblock, ip); \
458 link_bblock (cfg, bblock, tblock); \
459 ins->inst_false_bb = tblock; \
460 start_new_bblock = 2; \
462 if (sp != stack_start) { \
463 handle_stack_args (cfg, stack_start, sp - stack_start); \
464 CHECK_UNVERIFIABLE (cfg); \
466 MONO_ADD_INS (bblock, cmp); \
467 MONO_ADD_INS (bblock, ins); \
471 * link_bblock: Links two basic blocks
473 * links two basic blocks in the control flow graph, the 'from'
474 * argument is the starting block and the 'to' argument is the block
475 * the control flow ends to after 'from'.
478 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
480 MonoBasicBlock **newa;
484 if (from->cil_code) {
486 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
488 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
491 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
493 printf ("edge from entry to exit\n");
498 for (i = 0; i < from->out_count; ++i) {
499 if (to == from->out_bb [i]) {
505 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
506 for (i = 0; i < from->out_count; ++i) {
507 newa [i] = from->out_bb [i];
515 for (i = 0; i < to->in_count; ++i) {
516 if (from == to->in_bb [i]) {
522 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
523 for (i = 0; i < to->in_count; ++i) {
524 newa [i] = to->in_bb [i];
533 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
535 link_bblock (cfg, from, to);
539 * mono_find_block_region:
541 * We mark each basic block with a region ID. We use that to avoid BB
542 * optimizations when blocks are in different regions.
545 * A region token that encodes where this region is, and information
546 * about the clause owner for this block.
548 * The region encodes the try/catch/filter clause that owns this block
549 * as well as the type. -1 is a special value that represents a block
550 * that is in none of try/catch/filter.
553 mono_find_block_region (MonoCompile *cfg, int offset)
555 MonoMethodHeader *header = cfg->header;
556 MonoExceptionClause *clause;
559 for (i = 0; i < header->num_clauses; ++i) {
560 clause = &header->clauses [i];
561 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
562 (offset < (clause->handler_offset)))
563 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
565 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
566 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
567 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
568 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
569 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
571 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
574 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
575 return ((i + 1) << 8) | clause->flags;
582 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
584 MonoMethodHeader *header = cfg->header;
585 MonoExceptionClause *clause;
589 for (i = 0; i < header->num_clauses; ++i) {
590 clause = &header->clauses [i];
591 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
592 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
593 if (clause->flags == type)
594 res = g_list_append (res, clause);
601 mono_create_spvar_for_region (MonoCompile *cfg, int region)
605 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
609 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
610 /* prevent it from being register allocated */
611 var->flags |= MONO_INST_INDIRECT;
613 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
617 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
619 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
623 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
627 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
631 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
632 /* prevent it from being register allocated */
633 var->flags |= MONO_INST_INDIRECT;
635 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
641 * Returns the type used in the eval stack when @type is loaded.
642 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
645 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
649 inst->klass = klass = mono_class_from_mono_type (type);
651 inst->type = STACK_MP;
656 switch (type->type) {
658 inst->type = STACK_INV;
662 case MONO_TYPE_BOOLEAN:
668 inst->type = STACK_I4;
673 case MONO_TYPE_FNPTR:
674 inst->type = STACK_PTR;
676 case MONO_TYPE_CLASS:
677 case MONO_TYPE_STRING:
678 case MONO_TYPE_OBJECT:
679 case MONO_TYPE_SZARRAY:
680 case MONO_TYPE_ARRAY:
681 inst->type = STACK_OBJ;
685 inst->type = STACK_I8;
689 inst->type = STACK_R8;
691 case MONO_TYPE_VALUETYPE:
692 if (type->data.klass->enumtype) {
693 type = mono_class_enum_basetype (type->data.klass);
697 inst->type = STACK_VTYPE;
700 case MONO_TYPE_TYPEDBYREF:
701 inst->klass = mono_defaults.typed_reference_class;
702 inst->type = STACK_VTYPE;
704 case MONO_TYPE_GENERICINST:
705 type = &type->data.generic_class->container_class->byval_arg;
709 g_assert (cfg->generic_sharing_context);
710 if (mini_is_gsharedvt_type (cfg, type)) {
711 g_assert (cfg->gsharedvt);
712 inst->type = STACK_VTYPE;
714 inst->type = STACK_OBJ;
718 g_error ("unknown type 0x%02x in eval stack type", type->type);
723 * The following tables are used to quickly validate the IL code in type_from_op ().
726 bin_num_table [STACK_MAX] [STACK_MAX] = {
727 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
728 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
729 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
730 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
731 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
732 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
733 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
734 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
739 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
742 /* reduce the size of this table */
744 bin_int_table [STACK_MAX] [STACK_MAX] = {
745 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
746 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
747 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
748 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
749 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
750 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
751 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
752 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
756 bin_comp_table [STACK_MAX] [STACK_MAX] = {
757 /* Inv i L p F & O vt */
759 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
760 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
761 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
762 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
763 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
764 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
765 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
768 /* reduce the size of this table */
770 shift_table [STACK_MAX] [STACK_MAX] = {
771 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
772 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
773 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
774 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
775 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
776 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
777 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
778 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
782 * Tables to map from the non-specific opcode to the matching
783 * type-specific opcode.
785 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
787 binops_op_map [STACK_MAX] = {
788 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
791 /* handles from CEE_NEG to CEE_CONV_U8 */
793 unops_op_map [STACK_MAX] = {
794 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
797 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
799 ovfops_op_map [STACK_MAX] = {
800 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
803 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
805 ovf2ops_op_map [STACK_MAX] = {
806 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
809 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
811 ovf3ops_op_map [STACK_MAX] = {
812 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
815 /* handles from CEE_BEQ to CEE_BLT_UN */
817 beqops_op_map [STACK_MAX] = {
818 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
821 /* handles from CEE_CEQ to CEE_CLT_UN */
823 ceqops_op_map [STACK_MAX] = {
824 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
828 * Sets ins->type (the type on the eval stack) according to the
829 * type of the opcode and the arguments to it.
830 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
832 * FIXME: this function sets ins->type unconditionally in some cases, but
833 * it should set it to invalid for some types (a conv.x on an object)
836 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
838 switch (ins->opcode) {
845 /* FIXME: check unverifiable args for STACK_MP */
846 ins->type = bin_num_table [src1->type] [src2->type];
847 ins->opcode += binops_op_map [ins->type];
854 ins->type = bin_int_table [src1->type] [src2->type];
855 ins->opcode += binops_op_map [ins->type];
860 ins->type = shift_table [src1->type] [src2->type];
861 ins->opcode += binops_op_map [ins->type];
866 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
867 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
868 ins->opcode = OP_LCOMPARE;
869 else if (src1->type == STACK_R8)
870 ins->opcode = OP_FCOMPARE;
872 ins->opcode = OP_ICOMPARE;
874 case OP_ICOMPARE_IMM:
875 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
876 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
877 ins->opcode = OP_LCOMPARE_IMM;
889 ins->opcode += beqops_op_map [src1->type];
892 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
893 ins->opcode += ceqops_op_map [src1->type];
899 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
900 ins->opcode += ceqops_op_map [src1->type];
904 ins->type = neg_table [src1->type];
905 ins->opcode += unops_op_map [ins->type];
908 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
909 ins->type = src1->type;
911 ins->type = STACK_INV;
912 ins->opcode += unops_op_map [ins->type];
918 ins->type = STACK_I4;
919 ins->opcode += unops_op_map [src1->type];
922 ins->type = STACK_R8;
923 switch (src1->type) {
926 ins->opcode = OP_ICONV_TO_R_UN;
929 ins->opcode = OP_LCONV_TO_R_UN;
933 case CEE_CONV_OVF_I1:
934 case CEE_CONV_OVF_U1:
935 case CEE_CONV_OVF_I2:
936 case CEE_CONV_OVF_U2:
937 case CEE_CONV_OVF_I4:
938 case CEE_CONV_OVF_U4:
939 ins->type = STACK_I4;
940 ins->opcode += ovf3ops_op_map [src1->type];
942 case CEE_CONV_OVF_I_UN:
943 case CEE_CONV_OVF_U_UN:
944 ins->type = STACK_PTR;
945 ins->opcode += ovf2ops_op_map [src1->type];
947 case CEE_CONV_OVF_I1_UN:
948 case CEE_CONV_OVF_I2_UN:
949 case CEE_CONV_OVF_I4_UN:
950 case CEE_CONV_OVF_U1_UN:
951 case CEE_CONV_OVF_U2_UN:
952 case CEE_CONV_OVF_U4_UN:
953 ins->type = STACK_I4;
954 ins->opcode += ovf2ops_op_map [src1->type];
957 ins->type = STACK_PTR;
958 switch (src1->type) {
960 ins->opcode = OP_ICONV_TO_U;
964 #if SIZEOF_VOID_P == 8
965 ins->opcode = OP_LCONV_TO_U;
967 ins->opcode = OP_MOVE;
971 ins->opcode = OP_LCONV_TO_U;
974 ins->opcode = OP_FCONV_TO_U;
980 ins->type = STACK_I8;
981 ins->opcode += unops_op_map [src1->type];
983 case CEE_CONV_OVF_I8:
984 case CEE_CONV_OVF_U8:
985 ins->type = STACK_I8;
986 ins->opcode += ovf3ops_op_map [src1->type];
988 case CEE_CONV_OVF_U8_UN:
989 case CEE_CONV_OVF_I8_UN:
990 ins->type = STACK_I8;
991 ins->opcode += ovf2ops_op_map [src1->type];
995 ins->type = STACK_R8;
996 ins->opcode += unops_op_map [src1->type];
999 ins->type = STACK_R8;
1003 ins->type = STACK_I4;
1004 ins->opcode += ovfops_op_map [src1->type];
1007 case CEE_CONV_OVF_I:
1008 case CEE_CONV_OVF_U:
1009 ins->type = STACK_PTR;
1010 ins->opcode += ovfops_op_map [src1->type];
1013 case CEE_ADD_OVF_UN:
1015 case CEE_MUL_OVF_UN:
1017 case CEE_SUB_OVF_UN:
1018 ins->type = bin_num_table [src1->type] [src2->type];
1019 ins->opcode += ovfops_op_map [src1->type];
1020 if (ins->type == STACK_R8)
1021 ins->type = STACK_INV;
1023 case OP_LOAD_MEMBASE:
1024 ins->type = STACK_PTR;
1026 case OP_LOADI1_MEMBASE:
1027 case OP_LOADU1_MEMBASE:
1028 case OP_LOADI2_MEMBASE:
1029 case OP_LOADU2_MEMBASE:
1030 case OP_LOADI4_MEMBASE:
1031 case OP_LOADU4_MEMBASE:
1032 ins->type = STACK_PTR;
1034 case OP_LOADI8_MEMBASE:
1035 ins->type = STACK_I8;
1037 case OP_LOADR4_MEMBASE:
1038 case OP_LOADR8_MEMBASE:
1039 ins->type = STACK_R8;
1042 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1046 if (ins->type == STACK_MP)
1047 ins->klass = mono_defaults.object_class;
1052 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1058 param_table [STACK_MAX] [STACK_MAX] = {
1063 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1067 switch (args->type) {
1077 for (i = 0; i < sig->param_count; ++i) {
1078 switch (args [i].type) {
1082 if (!sig->params [i]->byref)
1086 if (sig->params [i]->byref)
1088 switch (sig->params [i]->type) {
1089 case MONO_TYPE_CLASS:
1090 case MONO_TYPE_STRING:
1091 case MONO_TYPE_OBJECT:
1092 case MONO_TYPE_SZARRAY:
1093 case MONO_TYPE_ARRAY:
1100 if (sig->params [i]->byref)
1102 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1111 /*if (!param_table [args [i].type] [sig->params [i]->type])
1119 * When we need a pointer to the current domain many times in a method, we
1120 * call mono_domain_get() once and we store the result in a local variable.
1121 * This function returns the variable that represents the MonoDomain*.
1123 inline static MonoInst *
1124 mono_get_domainvar (MonoCompile *cfg)
1126 if (!cfg->domainvar)
1127 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1128 return cfg->domainvar;
1132 * The got_var contains the address of the Global Offset Table when AOT
1136 mono_get_got_var (MonoCompile *cfg)
1138 #ifdef MONO_ARCH_NEED_GOT_VAR
1139 if (!cfg->compile_aot)
1141 if (!cfg->got_var) {
1142 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1144 return cfg->got_var;
1151 mono_get_vtable_var (MonoCompile *cfg)
1153 g_assert (cfg->generic_sharing_context);
1155 if (!cfg->rgctx_var) {
1156 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1157 /* force the var to be stack allocated */
1158 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1161 return cfg->rgctx_var;
1165 type_from_stack_type (MonoInst *ins) {
1166 switch (ins->type) {
1167 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1168 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1169 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1170 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1172 return &ins->klass->this_arg;
1173 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1174 case STACK_VTYPE: return &ins->klass->byval_arg;
1176 g_error ("stack type %d to monotype not handled\n", ins->type);
1181 static G_GNUC_UNUSED int
1182 type_to_stack_type (MonoType *t)
1184 t = mono_type_get_underlying_type (t);
1188 case MONO_TYPE_BOOLEAN:
1191 case MONO_TYPE_CHAR:
1198 case MONO_TYPE_FNPTR:
1200 case MONO_TYPE_CLASS:
1201 case MONO_TYPE_STRING:
1202 case MONO_TYPE_OBJECT:
1203 case MONO_TYPE_SZARRAY:
1204 case MONO_TYPE_ARRAY:
1212 case MONO_TYPE_VALUETYPE:
1213 case MONO_TYPE_TYPEDBYREF:
1215 case MONO_TYPE_GENERICINST:
1216 if (mono_type_generic_inst_is_valuetype (t))
1222 g_assert_not_reached ();
1229 array_access_to_klass (int opcode)
1233 return mono_defaults.byte_class;
1235 return mono_defaults.uint16_class;
1238 return mono_defaults.int_class;
1241 return mono_defaults.sbyte_class;
1244 return mono_defaults.int16_class;
1247 return mono_defaults.int32_class;
1249 return mono_defaults.uint32_class;
1252 return mono_defaults.int64_class;
1255 return mono_defaults.single_class;
1258 return mono_defaults.double_class;
1259 case CEE_LDELEM_REF:
1260 case CEE_STELEM_REF:
1261 return mono_defaults.object_class;
1263 g_assert_not_reached ();
1269 * We try to share variables when possible
1272 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1277 /* inlining can result in deeper stacks */
1278 if (slot >= cfg->header->max_stack)
1279 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1281 pos = ins->type - 1 + slot * STACK_MAX;
1283 switch (ins->type) {
1290 if ((vnum = cfg->intvars [pos]))
1291 return cfg->varinfo [vnum];
1292 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1293 cfg->intvars [pos] = res->inst_c0;
1296 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1302 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1305 * Don't use this if a generic_context is set, since that means AOT can't
1306 * look up the method using just the image+token.
1307 * table == 0 means this is a reference made from a wrapper.
1309 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1310 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1311 jump_info_token->image = image;
1312 jump_info_token->token = token;
1313 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1318 * This function is called to handle items that are left on the evaluation stack
1319 * at basic block boundaries. What happens is that we save the values to local variables
1320 * and we reload them later when first entering the target basic block (with the
1321 * handle_loaded_temps () function).
1322 * A single joint point will use the same variables (stored in the array bb->out_stack or
1323 * bb->in_stack, if the basic block is before or after the joint point).
1325 * This function needs to be called _before_ emitting the last instruction of
1326 * the bb (i.e. before emitting a branch).
1327 * If the stack merge fails at a join point, cfg->unverifiable is set.
1330 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1333 MonoBasicBlock *bb = cfg->cbb;
1334 MonoBasicBlock *outb;
1335 MonoInst *inst, **locals;
1340 if (cfg->verbose_level > 3)
1341 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1342 if (!bb->out_scount) {
1343 bb->out_scount = count;
1344 //printf ("bblock %d has out:", bb->block_num);
1346 for (i = 0; i < bb->out_count; ++i) {
1347 outb = bb->out_bb [i];
1348 /* exception handlers are linked, but they should not be considered for stack args */
1349 if (outb->flags & BB_EXCEPTION_HANDLER)
1351 //printf (" %d", outb->block_num);
1352 if (outb->in_stack) {
1354 bb->out_stack = outb->in_stack;
1360 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1361 for (i = 0; i < count; ++i) {
1363 * try to reuse temps already allocated for this purpouse, if they occupy the same
1364 * stack slot and if they are of the same type.
1365 * This won't cause conflicts since if 'local' is used to
1366 * store one of the values in the in_stack of a bblock, then
1367 * the same variable will be used for the same outgoing stack
1369 * This doesn't work when inlining methods, since the bblocks
1370 * in the inlined methods do not inherit their in_stack from
1371 * the bblock they are inlined to. See bug #58863 for an
1374 if (cfg->inlined_method)
1375 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1377 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1382 for (i = 0; i < bb->out_count; ++i) {
1383 outb = bb->out_bb [i];
1384 /* exception handlers are linked, but they should not be considered for stack args */
1385 if (outb->flags & BB_EXCEPTION_HANDLER)
1387 if (outb->in_scount) {
1388 if (outb->in_scount != bb->out_scount) {
1389 cfg->unverifiable = TRUE;
1392 continue; /* check they are the same locals */
1394 outb->in_scount = count;
1395 outb->in_stack = bb->out_stack;
1398 locals = bb->out_stack;
1400 for (i = 0; i < count; ++i) {
1401 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1402 inst->cil_code = sp [i]->cil_code;
1403 sp [i] = locals [i];
1404 if (cfg->verbose_level > 3)
1405 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1409 * It is possible that the out bblocks already have in_stack assigned, and
1410 * the in_stacks differ. In this case, we will store to all the different
1417 /* Find a bblock which has a different in_stack */
1419 while (bindex < bb->out_count) {
1420 outb = bb->out_bb [bindex];
1421 /* exception handlers are linked, but they should not be considered for stack args */
1422 if (outb->flags & BB_EXCEPTION_HANDLER) {
1426 if (outb->in_stack != locals) {
1427 for (i = 0; i < count; ++i) {
1428 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1429 inst->cil_code = sp [i]->cil_code;
1430 sp [i] = locals [i];
1431 if (cfg->verbose_level > 3)
1432 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1434 locals = outb->in_stack;
1443 /* Emit code which loads interface_offsets [klass->interface_id]
1444 * The array is stored in memory before vtable.
1447 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1449 if (cfg->compile_aot) {
1450 int ioffset_reg = alloc_preg (cfg);
1451 int iid_reg = alloc_preg (cfg);
1453 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1454 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1455 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1458 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1463 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1465 int ibitmap_reg = alloc_preg (cfg);
1466 #ifdef COMPRESSED_INTERFACE_BITMAP
1468 MonoInst *res, *ins;
1469 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1470 MONO_ADD_INS (cfg->cbb, ins);
1472 if (cfg->compile_aot)
1473 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1475 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1476 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1477 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1479 int ibitmap_byte_reg = alloc_preg (cfg);
1481 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1483 if (cfg->compile_aot) {
1484 int iid_reg = alloc_preg (cfg);
1485 int shifted_iid_reg = alloc_preg (cfg);
1486 int ibitmap_byte_address_reg = alloc_preg (cfg);
1487 int masked_iid_reg = alloc_preg (cfg);
1488 int iid_one_bit_reg = alloc_preg (cfg);
1489 int iid_bit_reg = alloc_preg (cfg);
1490 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1491 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1492 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1493 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1494 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1495 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1496 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1497 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1499 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1500 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1506 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1507 * stored in "klass_reg" implements the interface "klass".
1510 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1512 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1516 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1517 * stored in "vtable_reg" implements the interface "klass".
1520 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1522 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1526 * Emit code which checks whenever the interface id of @klass is smaller than
1527 * than the value given by max_iid_reg.
1530 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1531 MonoBasicBlock *false_target)
1533 if (cfg->compile_aot) {
1534 int iid_reg = alloc_preg (cfg);
1535 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1536 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1539 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1541 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1543 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1546 /* Same as above, but obtains max_iid from a vtable */
1548 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1549 MonoBasicBlock *false_target)
1551 int max_iid_reg = alloc_preg (cfg);
1553 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1554 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1557 /* Same as above, but obtains max_iid from a klass */
1559 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1560 MonoBasicBlock *false_target)
1562 int max_iid_reg = alloc_preg (cfg);
1564 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1565 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1569 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1571 int idepth_reg = alloc_preg (cfg);
1572 int stypes_reg = alloc_preg (cfg);
1573 int stype = alloc_preg (cfg);
1575 mono_class_setup_supertypes (klass);
1577 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1578 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1579 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1580 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1582 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1583 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1585 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1586 } else if (cfg->compile_aot) {
1587 int const_reg = alloc_preg (cfg);
1588 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1589 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1591 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1593 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1597 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1599 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1603 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1605 int intf_reg = alloc_preg (cfg);
1607 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1608 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1609 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1611 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1613 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1617 * Variant of the above that takes a register to the class, not the vtable.
1620 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1622 int intf_bit_reg = alloc_preg (cfg);
1624 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1625 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1626 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1628 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1630 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1634 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1637 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1638 } else if (cfg->compile_aot) {
1639 int const_reg = alloc_preg (cfg);
1640 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1641 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1643 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1645 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1649 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1651 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1655 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1657 if (cfg->compile_aot) {
1658 int const_reg = alloc_preg (cfg);
1659 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1660 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1662 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1664 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1668 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1671 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1674 int rank_reg = alloc_preg (cfg);
1675 int eclass_reg = alloc_preg (cfg);
1677 g_assert (!klass_inst);
1678 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1679 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1680 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1681 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1682 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1683 if (klass->cast_class == mono_defaults.object_class) {
1684 int parent_reg = alloc_preg (cfg);
1685 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1686 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1687 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1688 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1689 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1690 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1691 } else if (klass->cast_class == mono_defaults.enum_class) {
1692 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1693 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1694 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1696 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1697 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1700 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1701 /* Check that the object is a vector too */
1702 int bounds_reg = alloc_preg (cfg);
1703 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1704 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1705 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1708 int idepth_reg = alloc_preg (cfg);
1709 int stypes_reg = alloc_preg (cfg);
1710 int stype = alloc_preg (cfg);
1712 mono_class_setup_supertypes (klass);
1714 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1715 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1716 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1717 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1719 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1720 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1721 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1726 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1728 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1732 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1736 g_assert (val == 0);
1741 if ((size <= 4) && (size <= align)) {
1744 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1747 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1750 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1752 #if SIZEOF_REGISTER == 8
1754 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1760 val_reg = alloc_preg (cfg);
1762 if (SIZEOF_REGISTER == 8)
1763 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1765 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1768 /* This could be optimized further if neccesary */
1770 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1777 #if !NO_UNALIGNED_ACCESS
1778 if (SIZEOF_REGISTER == 8) {
1780 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1785 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1793 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1798 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1803 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1810 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1817 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1818 g_assert (size < 10000);
1821 /* This could be optimized further if neccesary */
1823 cur_reg = alloc_preg (cfg);
1824 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1825 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1832 #if !NO_UNALIGNED_ACCESS
1833 if (SIZEOF_REGISTER == 8) {
1835 cur_reg = alloc_preg (cfg);
1836 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1837 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1846 cur_reg = alloc_preg (cfg);
1847 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1848 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1854 cur_reg = alloc_preg (cfg);
1855 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1856 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1862 cur_reg = alloc_preg (cfg);
1863 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1864 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1872 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1875 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1878 type = mini_get_basic_type_from_generic (gsctx, type);
1879 switch (type->type) {
1880 case MONO_TYPE_VOID:
1881 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1884 case MONO_TYPE_BOOLEAN:
1887 case MONO_TYPE_CHAR:
1890 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1894 case MONO_TYPE_FNPTR:
1895 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1896 case MONO_TYPE_CLASS:
1897 case MONO_TYPE_STRING:
1898 case MONO_TYPE_OBJECT:
1899 case MONO_TYPE_SZARRAY:
1900 case MONO_TYPE_ARRAY:
1901 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1904 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1907 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1908 case MONO_TYPE_VALUETYPE:
1909 if (type->data.klass->enumtype) {
1910 type = mono_class_enum_basetype (type->data.klass);
1913 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1914 case MONO_TYPE_TYPEDBYREF:
1915 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1916 case MONO_TYPE_GENERICINST:
1917 type = &type->data.generic_class->container_class->byval_arg;
1920 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1926 * target_type_is_incompatible:
1927 * @cfg: MonoCompile context
1929 * Check that the item @arg on the evaluation stack can be stored
1930 * in the target type (can be a local, or field, etc).
1931 * The cfg arg can be used to check if we need verification or just
1934 * Returns: non-0 value if arg can't be stored on a target.
1937 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1939 MonoType *simple_type;
1942 if (target->byref) {
1943 /* FIXME: check that the pointed to types match */
1944 if (arg->type == STACK_MP)
1945 return arg->klass != mono_class_from_mono_type (target);
1946 if (arg->type == STACK_PTR)
1951 simple_type = mono_type_get_underlying_type (target);
1952 switch (simple_type->type) {
1953 case MONO_TYPE_VOID:
1957 case MONO_TYPE_BOOLEAN:
1960 case MONO_TYPE_CHAR:
1963 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1967 /* STACK_MP is needed when setting pinned locals */
1968 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1973 case MONO_TYPE_FNPTR:
1975 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
1976 * in native int. (#688008).
1978 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1981 case MONO_TYPE_CLASS:
1982 case MONO_TYPE_STRING:
1983 case MONO_TYPE_OBJECT:
1984 case MONO_TYPE_SZARRAY:
1985 case MONO_TYPE_ARRAY:
1986 if (arg->type != STACK_OBJ)
1988 /* FIXME: check type compatibility */
1992 if (arg->type != STACK_I8)
1997 if (arg->type != STACK_R8)
2000 case MONO_TYPE_VALUETYPE:
2001 if (arg->type != STACK_VTYPE)
2003 klass = mono_class_from_mono_type (simple_type);
2004 if (klass != arg->klass)
2007 case MONO_TYPE_TYPEDBYREF:
2008 if (arg->type != STACK_VTYPE)
2010 klass = mono_class_from_mono_type (simple_type);
2011 if (klass != arg->klass)
2014 case MONO_TYPE_GENERICINST:
2015 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2016 if (arg->type != STACK_VTYPE)
2018 klass = mono_class_from_mono_type (simple_type);
2019 if (klass != arg->klass)
2023 if (arg->type != STACK_OBJ)
2025 /* FIXME: check type compatibility */
2029 case MONO_TYPE_MVAR:
2030 g_assert (cfg->generic_sharing_context);
2031 if (mini_type_var_is_vt (cfg, simple_type)) {
2032 if (arg->type != STACK_VTYPE)
2035 if (arg->type != STACK_OBJ)
2040 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2046 * Prepare arguments for passing to a function call.
2047 * Return a non-zero value if the arguments can't be passed to the given
2049 * The type checks are not yet complete and some conversions may need
2050 * casts on 32 or 64 bit architectures.
2052 * FIXME: implement this using target_type_is_incompatible ()
2055 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2057 MonoType *simple_type;
2061 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2065 for (i = 0; i < sig->param_count; ++i) {
2066 if (sig->params [i]->byref) {
2067 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2071 simple_type = sig->params [i];
2072 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
2074 switch (simple_type->type) {
2075 case MONO_TYPE_VOID:
2080 case MONO_TYPE_BOOLEAN:
2083 case MONO_TYPE_CHAR:
2086 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2092 case MONO_TYPE_FNPTR:
2093 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2096 case MONO_TYPE_CLASS:
2097 case MONO_TYPE_STRING:
2098 case MONO_TYPE_OBJECT:
2099 case MONO_TYPE_SZARRAY:
2100 case MONO_TYPE_ARRAY:
2101 if (args [i]->type != STACK_OBJ)
2106 if (args [i]->type != STACK_I8)
2111 if (args [i]->type != STACK_R8)
2114 case MONO_TYPE_VALUETYPE:
2115 if (simple_type->data.klass->enumtype) {
2116 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2119 if (args [i]->type != STACK_VTYPE)
2122 case MONO_TYPE_TYPEDBYREF:
2123 if (args [i]->type != STACK_VTYPE)
2126 case MONO_TYPE_GENERICINST:
2127 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2131 g_error ("unknown type 0x%02x in check_call_signature",
2139 callvirt_to_call (int opcode)
2144 case OP_VOIDCALLVIRT:
2153 g_assert_not_reached ();
2160 callvirt_to_call_membase (int opcode)
2164 return OP_CALL_MEMBASE;
2165 case OP_VOIDCALLVIRT:
2166 return OP_VOIDCALL_MEMBASE;
2168 return OP_FCALL_MEMBASE;
2170 return OP_LCALL_MEMBASE;
2172 return OP_VCALL_MEMBASE;
2174 g_assert_not_reached ();
2180 #ifdef MONO_ARCH_HAVE_IMT
2181 /* Either METHOD or IMT_ARG needs to be set */
2183 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2187 if (COMPILE_LLVM (cfg)) {
2188 method_reg = alloc_preg (cfg);
2191 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2192 } else if (cfg->compile_aot) {
2193 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2196 MONO_INST_NEW (cfg, ins, OP_PCONST);
2197 ins->inst_p0 = method;
2198 ins->dreg = method_reg;
2199 MONO_ADD_INS (cfg->cbb, ins);
2203 call->imt_arg_reg = method_reg;
2205 #ifdef MONO_ARCH_IMT_REG
2206 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2208 /* Need this to keep the IMT arg alive */
2209 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2214 #ifdef MONO_ARCH_IMT_REG
2215 method_reg = alloc_preg (cfg);
2218 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2219 } else if (cfg->compile_aot) {
2220 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2223 MONO_INST_NEW (cfg, ins, OP_PCONST);
2224 ins->inst_p0 = method;
2225 ins->dreg = method_reg;
2226 MONO_ADD_INS (cfg->cbb, ins);
2229 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2231 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2236 static MonoJumpInfo *
2237 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2239 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2243 ji->data.target = target;
2249 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2251 if (cfg->generic_sharing_context)
2252 return mono_class_check_context_used (klass);
2258 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2260 if (cfg->generic_sharing_context)
2261 return mono_method_check_context_used (method);
2266 inline static MonoCallInst *
2267 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2268 MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
2271 #ifdef MONO_ARCH_SOFT_FLOAT
2276 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2278 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2281 call->signature = sig;
2282 call->rgctx_reg = rgctx;
2284 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2287 if (mini_type_is_vtype (cfg, sig->ret)) {
2288 call->vret_var = cfg->vret_addr;
2289 //g_assert_not_reached ();
2291 } else if (mini_type_is_vtype (cfg, sig->ret)) {
2292 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2295 temp->backend.is_pinvoke = sig->pinvoke;
2298 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2299 * address of return value to increase optimization opportunities.
2300 * Before vtype decomposition, the dreg of the call ins itself represents the
2301 * fact the call modifies the return value. After decomposition, the call will
2302 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2303 * will be transformed into an LDADDR.
2305 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2306 loada->dreg = alloc_preg (cfg);
2307 loada->inst_p0 = temp;
2308 /* We reference the call too since call->dreg could change during optimization */
2309 loada->inst_p1 = call;
2310 MONO_ADD_INS (cfg->cbb, loada);
2312 call->inst.dreg = temp->dreg;
2314 call->vret_var = loada;
2315 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2316 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2318 #ifdef MONO_ARCH_SOFT_FLOAT
2319 if (COMPILE_SOFT_FLOAT (cfg)) {
2321 * If the call has a float argument, we would need to do an r8->r4 conversion using
2322 * an icall, but that cannot be done during the call sequence since it would clobber
2323 * the call registers + the stack. So we do it before emitting the call.
2325 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2327 MonoInst *in = call->args [i];
2329 if (i >= sig->hasthis)
2330 t = sig->params [i - sig->hasthis];
2332 t = &mono_defaults.int_class->byval_arg;
2333 t = mono_type_get_underlying_type (t);
2335 if (!t->byref && t->type == MONO_TYPE_R4) {
2336 MonoInst *iargs [1];
2340 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2342 /* The result will be in an int vreg */
2343 call->args [i] = conv;
2349 call->need_unbox_trampoline = unbox_trampoline;
2352 if (COMPILE_LLVM (cfg))
2353 mono_llvm_emit_call (cfg, call);
2355 mono_arch_emit_call (cfg, call);
2357 mono_arch_emit_call (cfg, call);
2360 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2361 cfg->flags |= MONO_CFG_HAS_CALLS;
2367 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2369 #ifdef MONO_ARCH_RGCTX_REG
2370 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2371 cfg->uses_rgctx_reg = TRUE;
2372 call->rgctx_reg = TRUE;
2374 call->rgctx_arg_reg = rgctx_reg;
2381 inline static MonoInst*
2382 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2388 rgctx_reg = mono_alloc_preg (cfg);
2389 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2392 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2394 call->inst.sreg1 = addr->dreg;
2397 emit_imt_argument (cfg, call, NULL, imt_arg);
2399 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2402 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2404 return (MonoInst*)call;
2408 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2410 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2413 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2414 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2416 #ifndef DISABLE_REMOTING
2417 gboolean might_be_remote = FALSE;
2419 gboolean virtual = this != NULL;
2420 gboolean enable_for_aot = TRUE;
2424 gboolean need_unbox_trampoline;
2427 rgctx_reg = mono_alloc_preg (cfg);
2428 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2431 if (method->string_ctor) {
2432 /* Create the real signature */
2433 /* FIXME: Cache these */
2434 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2435 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2440 context_used = mini_method_check_context_used (cfg, method);
2442 #ifndef DISABLE_REMOTING
2443 might_be_remote = this && sig->hasthis &&
2444 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2445 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2447 if (might_be_remote && context_used) {
2450 g_assert (cfg->generic_sharing_context);
2452 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2454 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2458 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2460 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2462 #ifndef DISABLE_REMOTING
2463 if (might_be_remote)
2464 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2467 call->method = method;
2468 call->inst.flags |= MONO_INST_HAS_METHOD;
2469 call->inst.inst_left = this;
2472 int vtable_reg, slot_reg, this_reg;
2474 this_reg = this->dreg;
2476 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2477 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2478 MonoInst *dummy_use;
2480 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2482 /* Make a call to delegate->invoke_impl */
2483 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2484 call->inst.inst_basereg = this_reg;
2485 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2486 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2488 /* We must emit a dummy use here because the delegate trampoline will
2489 replace the 'this' argument with the delegate target making this activation
2490 no longer a root for the delegate.
2491 This is an issue for delegates that target collectible code such as dynamic
2492 methods of GC'able assemblies.
2494 For a test case look into #667921.
2496 FIXME: a dummy use is not the best way to do it as the local register allocator
2497 will put it on a caller save register and spil it around the call.
2498 Ideally, we would either put it on a callee save register or only do the store part.
2500 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2502 return (MonoInst*)call;
2506 if ((!cfg->compile_aot || enable_for_aot) &&
2507 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2508 (MONO_METHOD_IS_FINAL (method) &&
2509 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2510 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2512 * the method is not virtual, we just need to ensure this is not null
2513 * and then we can call the method directly.
2515 #ifndef DISABLE_REMOTING
2516 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2518 * The check above ensures method is not gshared, this is needed since
2519 * gshared methods can't have wrappers.
2521 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2525 if (!method->string_ctor)
2526 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2528 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2529 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2531 * the method is virtual, but we can statically dispatch since either
2532 * it's class or the method itself are sealed.
2533 * But first we need to ensure it's not a null reference.
2535 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2537 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2539 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2541 vtable_reg = alloc_preg (cfg);
2542 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2543 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2545 #ifdef MONO_ARCH_HAVE_IMT
2547 guint32 imt_slot = mono_method_get_imt_slot (method);
2548 emit_imt_argument (cfg, call, call->method, imt_arg);
2549 slot_reg = vtable_reg;
2550 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2553 if (slot_reg == -1) {
2554 slot_reg = alloc_preg (cfg);
2555 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2556 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2559 slot_reg = vtable_reg;
2560 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2561 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2562 #ifdef MONO_ARCH_HAVE_IMT
2564 g_assert (mono_method_signature (method)->generic_param_count);
2565 emit_imt_argument (cfg, call, call->method, imt_arg);
2570 call->inst.sreg1 = slot_reg;
2571 call->virtual = TRUE;
2575 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2578 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2580 return (MonoInst*)call;
2584 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2586 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL, NULL);
2590 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2597 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2600 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2602 return (MonoInst*)call;
2606 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2608 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2612 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2616 * mono_emit_abs_call:
2618 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2620 inline static MonoInst*
2621 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2622 MonoMethodSignature *sig, MonoInst **args)
2624 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2628 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2631 if (cfg->abs_patches == NULL)
2632 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2633 g_hash_table_insert (cfg->abs_patches, ji, ji);
2634 ins = mono_emit_native_call (cfg, ji, sig, args);
2635 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2640 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2642 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2643 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2647 * Native code might return non register sized integers
2648 * without initializing the upper bits.
2650 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2651 case OP_LOADI1_MEMBASE:
2652 widen_op = OP_ICONV_TO_I1;
2654 case OP_LOADU1_MEMBASE:
2655 widen_op = OP_ICONV_TO_U1;
2657 case OP_LOADI2_MEMBASE:
2658 widen_op = OP_ICONV_TO_I2;
2660 case OP_LOADU2_MEMBASE:
2661 widen_op = OP_ICONV_TO_U2;
2667 if (widen_op != -1) {
2668 int dreg = alloc_preg (cfg);
2671 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2672 widen->type = ins->type;
2682 get_memcpy_method (void)
2684 static MonoMethod *memcpy_method = NULL;
2685 if (!memcpy_method) {
2686 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2688 g_error ("Old corlib found. Install a new one");
2690 return memcpy_method;
2694 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
2696 MonoClassField *field;
2697 gpointer iter = NULL;
2699 while ((field = mono_class_get_fields (klass, &iter))) {
2702 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2704 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2705 if (mini_type_is_reference (cfg, mono_field_get_type (field))) {
2706 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2707 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2709 MonoClass *field_class = mono_class_from_mono_type (field->type);
2710 if (field_class->has_references)
2711 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
2717 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value, int value_reg)
2719 int card_table_shift_bits;
2720 gpointer card_table_mask;
2722 MonoInst *dummy_use;
2723 int nursery_shift_bits;
2724 size_t nursery_size;
2725 gboolean has_card_table_wb = FALSE;
2727 if (!cfg->gen_write_barriers)
2730 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2732 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2734 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
2735 has_card_table_wb = TRUE;
2738 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0) {
2741 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2742 wbarrier->sreg1 = ptr->dreg;
2744 wbarrier->sreg2 = value->dreg;
2746 wbarrier->sreg2 = value_reg;
2747 MONO_ADD_INS (cfg->cbb, wbarrier);
2748 } else if (card_table) {
2749 int offset_reg = alloc_preg (cfg);
2750 int card_reg = alloc_preg (cfg);
2753 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2754 if (card_table_mask)
2755 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
2757 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2758 * IMM's larger than 32bits.
2760 if (cfg->compile_aot) {
2761 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
2763 MONO_INST_NEW (cfg, ins, OP_PCONST);
2764 ins->inst_p0 = card_table;
2765 ins->dreg = card_reg;
2766 MONO_ADD_INS (cfg->cbb, ins);
2769 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
2770 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
2772 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2773 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
2777 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
2779 MONO_INST_NEW (cfg, dummy_use, OP_DUMMY_USE);
2780 dummy_use->sreg1 = value_reg;
2781 MONO_ADD_INS (cfg->cbb, dummy_use);
2786 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
2788 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
2789 unsigned need_wb = 0;
2794 /*types with references can't have alignment smaller than sizeof(void*) */
2795 if (align < SIZEOF_VOID_P)
2798 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
2799 if (size > 32 * SIZEOF_VOID_P)
2802 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
2804 /* We don't unroll more than 5 stores to avoid code bloat. */
2805 if (size > 5 * SIZEOF_VOID_P) {
2806 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
2807 size += (SIZEOF_VOID_P - 1);
2808 size &= ~(SIZEOF_VOID_P - 1);
2810 EMIT_NEW_ICONST (cfg, iargs [2], size);
2811 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
2812 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
2816 destreg = iargs [0]->dreg;
2817 srcreg = iargs [1]->dreg;
2820 dest_ptr_reg = alloc_preg (cfg);
2821 tmp_reg = alloc_preg (cfg);
2824 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
2826 while (size >= SIZEOF_VOID_P) {
2827 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, tmp_reg, srcreg, offset);
2828 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
2831 emit_write_barrier (cfg, iargs [0], NULL, tmp_reg);
2833 offset += SIZEOF_VOID_P;
2834 size -= SIZEOF_VOID_P;
2837 /*tmp += sizeof (void*)*/
2838 if (size >= SIZEOF_VOID_P) {
2839 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
2840 MONO_ADD_INS (cfg->cbb, iargs [0]);
2844 /* Those cannot be references since size < sizeof (void*) */
2846 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
2847 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
2853 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
2854 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
2860 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
2861 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
2870 * Emit code to copy a valuetype of type @klass whose address is stored in
2871 * @src->dreg to memory whose address is stored at @dest->dreg.
2874 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2876 MonoInst *iargs [4];
2877 int context_used, n;
2879 MonoMethod *memcpy_method;
2880 MonoInst *size_ins = NULL;
2884 * This check breaks with spilled vars... need to handle it during verification anyway.
2885 * g_assert (klass && klass == src->klass && klass == dest->klass);
2888 if (mini_is_gsharedvt_klass (cfg, klass)) {
2890 context_used = mini_class_check_context_used (cfg, klass);
2891 size_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_VALUE_SIZE);
2895 n = mono_class_native_size (klass, &align);
2897 n = mono_class_value_size (klass, &align);
2899 /* if native is true there should be no references in the struct */
2900 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
2901 /* Avoid barriers when storing to the stack */
2902 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2903 (dest->opcode == OP_LDADDR))) {
2909 context_used = mini_class_check_context_used (cfg, klass);
2911 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
2912 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
2914 } else if (context_used) {
2915 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2917 if (cfg->compile_aot) {
2918 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2920 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2921 mono_class_compute_gc_descriptor (klass);
2925 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2930 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2931 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2932 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2937 iargs [2] = size_ins;
2939 EMIT_NEW_ICONST (cfg, iargs [2], n);
2941 memcpy_method = get_memcpy_method ();
2942 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2947 get_memset_method (void)
2949 static MonoMethod *memset_method = NULL;
2950 if (!memset_method) {
2951 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2953 g_error ("Old corlib found. Install a new one");
2955 return memset_method;
2959 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2961 MonoInst *iargs [3];
2962 int n, context_used;
2964 MonoMethod *memset_method;
2965 MonoInst *size_ins = NULL;
2967 /* FIXME: Optimize this for the case when dest is an LDADDR */
2969 mono_class_init (klass);
2970 if (mini_is_gsharedvt_klass (cfg, klass)) {
2971 context_used = mini_class_check_context_used (cfg, klass);
2972 size_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_VALUE_SIZE);
2975 n = mono_class_value_size (klass, &align);
2978 if (!size_ins && n <= sizeof (gpointer) * 5) {
2979 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2982 memset_method = get_memset_method ();
2984 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2986 iargs [2] = size_ins;
2988 EMIT_NEW_ICONST (cfg, iargs [2], n);
2989 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2994 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2996 MonoInst *this = NULL;
2998 g_assert (cfg->generic_sharing_context);
3000 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3001 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3002 !method->klass->valuetype)
3003 EMIT_NEW_ARGLOAD (cfg, this, 0);
3005 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3006 MonoInst *mrgctx_loc, *mrgctx_var;
3009 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3011 mrgctx_loc = mono_get_vtable_var (cfg);
3012 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3015 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3016 MonoInst *vtable_loc, *vtable_var;
3020 vtable_loc = mono_get_vtable_var (cfg);
3021 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3023 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3024 MonoInst *mrgctx_var = vtable_var;
3027 vtable_reg = alloc_preg (cfg);
3028 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3029 vtable_var->type = STACK_PTR;
3037 vtable_reg = alloc_preg (cfg);
3038 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3043 static MonoJumpInfoRgctxEntry *
3044 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3046 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3047 res->method = method;
3048 res->in_mrgctx = in_mrgctx;
3049 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3050 res->data->type = patch_type;
3051 res->data->data.target = patch_data;
3052 res->info_type = info_type;
3057 static inline MonoInst*
3058 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3060 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3064 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3065 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3067 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3068 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3070 return emit_rgctx_fetch (cfg, rgctx, entry);
3074 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3075 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3077 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3078 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3080 return emit_rgctx_fetch (cfg, rgctx, entry);
3084 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3085 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3087 MonoJumpInfoGSharedVtCall *call_info;
3088 MonoJumpInfoRgctxEntry *entry;
3091 call_info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3092 call_info->sig = sig;
3093 call_info->method = cmethod;
3095 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3096 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3098 return emit_rgctx_fetch (cfg, rgctx, entry);
3102 * emit_get_rgctx_method:
3104 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3105 * normal constants, else emit a load from the rgctx.
3108 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3109 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3111 if (!context_used) {
3114 switch (rgctx_type) {
3115 case MONO_RGCTX_INFO_METHOD:
3116 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3118 case MONO_RGCTX_INFO_METHOD_RGCTX:
3119 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3122 g_assert_not_reached ();
3125 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3126 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3128 return emit_rgctx_fetch (cfg, rgctx, entry);
3133 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3134 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3136 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3137 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3139 return emit_rgctx_fetch (cfg, rgctx, entry);
3143 * On return the caller must check @klass for load errors.
3146 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3148 MonoInst *vtable_arg;
3152 context_used = mini_class_check_context_used (cfg, klass);
3155 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3156 klass, MONO_RGCTX_INFO_VTABLE);
3158 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3162 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3165 if (COMPILE_LLVM (cfg))
3166 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3168 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3169 #ifdef MONO_ARCH_VTABLE_REG
3170 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3171 cfg->uses_vtable_reg = TRUE;
3178 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc)
3182 if (cfg->gen_seq_points && cfg->method == method) {
3183 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3184 MONO_ADD_INS (cfg->cbb, ins);
3189 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
3191 if (mini_get_debug_options ()->better_cast_details) {
3192 int to_klass_reg = alloc_preg (cfg);
3193 int vtable_reg = alloc_preg (cfg);
3194 int klass_reg = alloc_preg (cfg);
3195 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3198 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3202 MONO_ADD_INS (cfg->cbb, tls_get);
3203 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3204 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3206 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3207 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3208 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3213 reset_cast_details (MonoCompile *cfg)
3215 /* Reset the variables holding the cast details */
3216 if (mini_get_debug_options ()->better_cast_details) {
3217 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3219 MONO_ADD_INS (cfg->cbb, tls_get);
3220 /* It is enough to reset the from field */
3221 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3226 * On return the caller must check @array_class for load errors
3229 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3231 int vtable_reg = alloc_preg (cfg);
3234 context_used = mini_class_check_context_used (cfg, array_class);
3236 save_cast_details (cfg, array_class, obj->dreg);
3238 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3240 if (cfg->opt & MONO_OPT_SHARED) {
3241 int class_reg = alloc_preg (cfg);
3242 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3243 if (cfg->compile_aot) {
3244 int klass_reg = alloc_preg (cfg);
3245 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3246 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3248 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3250 } else if (context_used) {
3251 MonoInst *vtable_ins;
3253 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3254 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3256 if (cfg->compile_aot) {
3260 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3262 vt_reg = alloc_preg (cfg);
3263 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3264 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3267 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3269 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3273 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3275 reset_cast_details (cfg);
3279 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3280 * generic code is generated.
3283 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3285 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3288 MonoInst *rgctx, *addr;
3290 /* FIXME: What if the class is shared? We might not
3291 have to get the address of the method from the
3293 addr = emit_get_rgctx_method (cfg, context_used, method,
3294 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3296 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3298 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3300 return mono_emit_method_call (cfg, method, &val, NULL);
3305 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3309 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3310 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3311 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3312 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3314 obj_reg = sp [0]->dreg;
3315 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3316 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3318 /* FIXME: generics */
3319 g_assert (klass->rank == 0);
3322 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3323 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3325 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3326 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3329 MonoInst *element_class;
3331 /* This assertion is from the unboxcast insn */
3332 g_assert (klass->rank == 0);
3334 element_class = emit_get_rgctx_klass (cfg, context_used,
3335 klass->element_class, MONO_RGCTX_INFO_KLASS);
3337 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3338 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3340 save_cast_details (cfg, klass->element_class, obj_reg);
3341 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3342 reset_cast_details (cfg);
3345 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3346 MONO_ADD_INS (cfg->cbb, add);
3347 add->type = STACK_MP;
3354 * Returns NULL and set the cfg exception on error.
3357 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3359 MonoInst *iargs [2];
3365 MonoInst *iargs [2];
3368 FIXME: we cannot get managed_alloc here because we can't get
3369 the class's vtable (because it's not a closed class)
3371 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3372 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3375 if (cfg->opt & MONO_OPT_SHARED)
3376 rgctx_info = MONO_RGCTX_INFO_KLASS;
3378 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3379 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3381 if (cfg->opt & MONO_OPT_SHARED) {
3382 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3384 alloc_ftn = mono_object_new;
3387 alloc_ftn = mono_object_new_specific;
3390 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3393 if (cfg->opt & MONO_OPT_SHARED) {
3394 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3395 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3397 alloc_ftn = mono_object_new;
3398 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3399 /* This happens often in argument checking code, eg. throw new FooException... */
3400 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3401 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3402 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3404 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3405 MonoMethod *managed_alloc = NULL;
3409 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3410 cfg->exception_ptr = klass;
3414 #ifndef MONO_CROSS_COMPILE
3415 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3418 if (managed_alloc) {
3419 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3420 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3422 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3424 guint32 lw = vtable->klass->instance_size;
3425 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3426 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3427 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3430 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3434 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3438 * Returns NULL and set the cfg exception on error.
3441 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
3443 MonoInst *alloc, *ins;
3445 if (mono_class_is_nullable (klass)) {
3446 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3449 /* FIXME: What if the class is shared? We might not
3450 have to get the method address from the RGCTX. */
3451 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3452 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3453 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3455 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3457 return mono_emit_method_call (cfg, method, &val, NULL);
3461 if (mini_is_gsharedvt_klass (cfg, klass)) {
3462 MonoBasicBlock *is_ref_bb, *end_bb;
3463 MonoInst *res, *is_ref, *src_var, *addr;
3466 dreg = alloc_ireg (cfg);
3468 NEW_BBLOCK (cfg, is_ref_bb);
3469 NEW_BBLOCK (cfg, end_bb);
3470 is_ref = emit_get_rgctx_klass (cfg, context_used, klass,
3471 MONO_RGCTX_INFO_CLASS_IS_REF);
3472 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3473 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3476 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3479 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3480 ins->opcode = OP_STOREV_MEMBASE;
3482 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
3483 res->type = STACK_OBJ;
3485 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3488 MONO_START_BB (cfg, is_ref_bb);
3489 addr_reg = alloc_ireg (cfg);
3491 /* val is a vtype, so has to load the value manually */
3492 src_var = get_vreg_to_inst (cfg, val->dreg);
3494 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
3495 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
3496 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
3497 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3499 MONO_START_BB (cfg, end_bb);
3503 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3507 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3514 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
3517 MonoGenericContainer *container;
3518 MonoGenericInst *ginst;
3520 if (klass->generic_class) {
3521 container = klass->generic_class->container_class->generic_container;
3522 ginst = klass->generic_class->context.class_inst;
3523 } else if (klass->generic_container && context_used) {
3524 container = klass->generic_container;
3525 ginst = container->context.class_inst;
3530 for (i = 0; i < container->type_argc; ++i) {
3532 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
3534 type = ginst->type_argv [i];
3535 if (mini_type_is_reference (cfg, type))
3541 // FIXME: This doesn't work yet (class libs tests fail?)
3542 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3545 * Returns NULL and set the cfg exception on error.
3548 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3550 MonoBasicBlock *is_null_bb;
3551 int obj_reg = src->dreg;
3552 int vtable_reg = alloc_preg (cfg);
3553 MonoInst *klass_inst = NULL;
3558 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
3559 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
3560 MonoInst *cache_ins;
3562 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3567 /* klass - it's the second element of the cache entry*/
3568 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3571 args [2] = cache_ins;
3573 return mono_emit_method_call (cfg, mono_castclass, args, NULL);
3576 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3578 if (is_complex_isinst (klass)) {
3579 /* Complex case, handle by an icall */
3585 args [1] = klass_inst;
3587 return mono_emit_jit_icall (cfg, mono_object_castclass, args);
3589 /* Simple case, handled by the code below */
3593 NEW_BBLOCK (cfg, is_null_bb);
3595 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3596 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3598 save_cast_details (cfg, klass, obj_reg);
3600 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3601 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3602 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3604 int klass_reg = alloc_preg (cfg);
3606 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3608 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3609 /* the remoting code is broken, access the class for now */
3610 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3611 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3613 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3614 cfg->exception_ptr = klass;
3617 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3619 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3620 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3622 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3624 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3625 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3629 MONO_START_BB (cfg, is_null_bb);
3631 reset_cast_details (cfg);
3637 * Returns NULL and set the cfg exception on error.
3640 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3643 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3644 int obj_reg = src->dreg;
3645 int vtable_reg = alloc_preg (cfg);
3646 int res_reg = alloc_ireg_ref (cfg);
3647 MonoInst *klass_inst = NULL;
3652 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
3653 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
3654 MonoInst *cache_ins;
3656 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3661 /* klass - it's the second element of the cache entry*/
3662 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3665 args [2] = cache_ins;
3667 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
3670 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3672 if (is_complex_isinst (klass)) {
3673 /* Complex case, handle by an icall */
3679 args [1] = klass_inst;
3681 return mono_emit_jit_icall (cfg, mono_object_isinst, args);
3683 /* Simple case, the code below can handle it */
3687 NEW_BBLOCK (cfg, is_null_bb);
3688 NEW_BBLOCK (cfg, false_bb);
3689 NEW_BBLOCK (cfg, end_bb);
3691 /* Do the assignment at the beginning, so the other assignment can be if converted */
3692 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3693 ins->type = STACK_OBJ;
3696 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3697 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3699 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3701 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3702 g_assert (!context_used);
3703 /* the is_null_bb target simply copies the input register to the output */
3704 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3706 int klass_reg = alloc_preg (cfg);
3709 int rank_reg = alloc_preg (cfg);
3710 int eclass_reg = alloc_preg (cfg);
3712 g_assert (!context_used);
3713 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3714 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3715 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3716 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3717 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3718 if (klass->cast_class == mono_defaults.object_class) {
3719 int parent_reg = alloc_preg (cfg);
3720 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3721 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3722 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3723 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3724 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3725 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3726 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3727 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3728 } else if (klass->cast_class == mono_defaults.enum_class) {
3729 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3730 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3731 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3732 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3734 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3735 /* Check that the object is a vector too */
3736 int bounds_reg = alloc_preg (cfg);
3737 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3738 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3739 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3742 /* the is_null_bb target simply copies the input register to the output */
3743 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3745 } else if (mono_class_is_nullable (klass)) {
3746 g_assert (!context_used);
3747 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3748 /* the is_null_bb target simply copies the input register to the output */
3749 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3751 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3752 g_assert (!context_used);
3753 /* the remoting code is broken, access the class for now */
3754 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3755 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3757 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3758 cfg->exception_ptr = klass;
3761 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3763 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3764 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3766 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3767 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3769 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3770 /* the is_null_bb target simply copies the input register to the output */
3771 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
3776 MONO_START_BB (cfg, false_bb);
3778 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3779 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3781 MONO_START_BB (cfg, is_null_bb);
3783 MONO_START_BB (cfg, end_bb);
3789 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3791 /* This opcode takes as input an object reference and a class, and returns:
3792 0) if the object is an instance of the class,
3793 1) if the object is not instance of the class,
3794 2) if the object is a proxy whose type cannot be determined */
3797 #ifndef DISABLE_REMOTING
3798 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3800 MonoBasicBlock *true_bb, *false_bb, *end_bb;
3802 int obj_reg = src->dreg;
3803 int dreg = alloc_ireg (cfg);
3805 #ifndef DISABLE_REMOTING
3806 int klass_reg = alloc_preg (cfg);
3809 NEW_BBLOCK (cfg, true_bb);
3810 NEW_BBLOCK (cfg, false_bb);
3811 NEW_BBLOCK (cfg, end_bb);
3812 #ifndef DISABLE_REMOTING
3813 NEW_BBLOCK (cfg, false2_bb);
3814 NEW_BBLOCK (cfg, no_proxy_bb);
3817 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3818 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3820 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3821 #ifndef DISABLE_REMOTING
3822 NEW_BBLOCK (cfg, interface_fail_bb);
3825 tmp_reg = alloc_preg (cfg);
3826 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3827 #ifndef DISABLE_REMOTING
3828 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3829 MONO_START_BB (cfg, interface_fail_bb);
3830 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3832 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3834 tmp_reg = alloc_preg (cfg);
3835 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3836 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3837 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3839 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
3842 #ifndef DISABLE_REMOTING
3843 tmp_reg = alloc_preg (cfg);
3844 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3845 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3847 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3848 tmp_reg = alloc_preg (cfg);
3849 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3850 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3852 tmp_reg = alloc_preg (cfg);
3853 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3854 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3855 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3857 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3858 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3860 MONO_START_BB (cfg, no_proxy_bb);
3862 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3864 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
3868 MONO_START_BB (cfg, false_bb);
3870 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3871 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3873 #ifndef DISABLE_REMOTING
3874 MONO_START_BB (cfg, false2_bb);
3876 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3877 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3880 MONO_START_BB (cfg, true_bb);
3882 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3884 MONO_START_BB (cfg, end_bb);
3887 MONO_INST_NEW (cfg, ins, OP_ICONST);
3889 ins->type = STACK_I4;
3895 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3897 /* This opcode takes as input an object reference and a class, and returns:
3898 0) if the object is an instance of the class,
3899 1) if the object is a proxy whose type cannot be determined
3900 an InvalidCastException exception is thrown otherwhise*/
3903 #ifndef DISABLE_REMOTING
3904 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3906 MonoBasicBlock *ok_result_bb;
3908 int obj_reg = src->dreg;
3909 int dreg = alloc_ireg (cfg);
3910 int tmp_reg = alloc_preg (cfg);
3912 #ifndef DISABLE_REMOTING
3913 int klass_reg = alloc_preg (cfg);
3914 NEW_BBLOCK (cfg, end_bb);
3917 NEW_BBLOCK (cfg, ok_result_bb);
3919 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3920 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3922 save_cast_details (cfg, klass, obj_reg);
3924 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3925 #ifndef DISABLE_REMOTING
3926 NEW_BBLOCK (cfg, interface_fail_bb);
3928 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3929 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3930 MONO_START_BB (cfg, interface_fail_bb);
3931 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3933 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3935 tmp_reg = alloc_preg (cfg);
3936 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3937 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3938 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3940 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3941 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3943 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3944 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
3945 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
3948 #ifndef DISABLE_REMOTING
3949 NEW_BBLOCK (cfg, no_proxy_bb);
3951 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3952 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3953 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3955 tmp_reg = alloc_preg (cfg);
3956 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3957 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3959 tmp_reg = alloc_preg (cfg);
3960 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3961 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3962 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3964 NEW_BBLOCK (cfg, fail_1_bb);
3966 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3968 MONO_START_BB (cfg, fail_1_bb);
3970 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3971 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3973 MONO_START_BB (cfg, no_proxy_bb);
3975 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3977 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
3981 MONO_START_BB (cfg, ok_result_bb);
3983 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3985 #ifndef DISABLE_REMOTING
3986 MONO_START_BB (cfg, end_bb);
3990 MONO_INST_NEW (cfg, ins, OP_ICONST);
3992 ins->type = STACK_I4;
3998 * Returns NULL and set the cfg exception on error.
4000 static G_GNUC_UNUSED MonoInst*
4001 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
4005 gpointer *trampoline;
4006 MonoInst *obj, *method_ins, *tramp_ins;
4010 obj = handle_alloc (cfg, klass, FALSE, 0);
4014 /* Inline the contents of mono_delegate_ctor */
4016 /* Set target field */
4017 /* Optimize away setting of NULL target */
4018 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
4019 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4020 if (cfg->gen_write_barriers) {
4021 dreg = alloc_preg (cfg);
4022 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target));
4023 emit_write_barrier (cfg, ptr, target, 0);
4027 /* Set method field */
4028 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4029 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4030 if (cfg->gen_write_barriers) {
4031 dreg = alloc_preg (cfg);
4032 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method));
4033 emit_write_barrier (cfg, ptr, method_ins, 0);
4036 * To avoid looking up the compiled code belonging to the target method
4037 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4038 * store it, and we fill it after the method has been compiled.
4040 if (!cfg->compile_aot && !method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4041 MonoInst *code_slot_ins;
4044 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4046 domain = mono_domain_get ();
4047 mono_domain_lock (domain);
4048 if (!domain_jit_info (domain)->method_code_hash)
4049 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4050 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4052 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
4053 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4055 mono_domain_unlock (domain);
4057 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
4059 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4062 /* Set invoke_impl field */
4063 if (cfg->compile_aot) {
4064 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
4066 trampoline = mono_create_delegate_trampoline (cfg->domain, klass);
4067 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4069 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4071 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4077 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4079 MonoJitICallInfo *info;
4081 /* Need to register the icall so it gets an icall wrapper */
4082 info = mono_get_array_new_va_icall (rank);
4084 cfg->flags |= MONO_CFG_HAS_VARARGS;
4086 /* mono_array_new_va () needs a vararg calling convention */
4087 cfg->disable_llvm = TRUE;
4089 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4090 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4094 mono_emit_load_got_addr (MonoCompile *cfg)
4096 MonoInst *getaddr, *dummy_use;
4098 if (!cfg->got_var || cfg->got_var_allocated)
4101 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
4102 getaddr->cil_code = cfg->header->code;
4103 getaddr->dreg = cfg->got_var->dreg;
4105 /* Add it to the start of the first bblock */
4106 if (cfg->bb_entry->code) {
4107 getaddr->next = cfg->bb_entry->code;
4108 cfg->bb_entry->code = getaddr;
4111 MONO_ADD_INS (cfg->bb_entry, getaddr);
4113 cfg->got_var_allocated = TRUE;
4116 * Add a dummy use to keep the got_var alive, since real uses might
4117 * only be generated by the back ends.
4118 * Add it to end_bblock, so the variable's lifetime covers the whole
4120 * It would be better to make the usage of the got var explicit in all
4121 * cases when the backend needs it (i.e. calls, throw etc.), so this
4122 * wouldn't be needed.
4124 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
4125 MONO_ADD_INS (cfg->bb_exit, dummy_use);
4128 static int inline_limit;
4129 static gboolean inline_limit_inited;
4132 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
4134 MonoMethodHeaderSummary header;
4136 #ifdef MONO_ARCH_SOFT_FLOAT
4137 MonoMethodSignature *sig = mono_method_signature (method);
4141 if (cfg->generic_sharing_context)
4144 if (cfg->inline_depth > 10)
4147 #ifdef MONO_ARCH_HAVE_LMF_OPS
4148 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
4149 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
4150 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
4155 if (!mono_method_get_header_summary (method, &header))
4158 /*runtime, icall and pinvoke are checked by summary call*/
4159 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
4160 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
4161 (mono_class_is_marshalbyref (method->klass)) ||
4165 /* also consider num_locals? */
4166 /* Do the size check early to avoid creating vtables */
4167 if (!inline_limit_inited) {
4168 if (getenv ("MONO_INLINELIMIT"))
4169 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
4171 inline_limit = INLINE_LENGTH_LIMIT;
4172 inline_limit_inited = TRUE;
4174 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
4178 * if we can initialize the class of the method right away, we do,
4179 * otherwise we don't allow inlining if the class needs initialization,
4180 * since it would mean inserting a call to mono_runtime_class_init()
4181 * inside the inlined code
4183 if (!(cfg->opt & MONO_OPT_SHARED)) {
4184 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
4185 if (cfg->run_cctors && method->klass->has_cctor) {
4186 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
4187 if (!method->klass->runtime_info)
4188 /* No vtable created yet */
4190 vtable = mono_class_vtable (cfg->domain, method->klass);
4193 /* This makes so that inline cannot trigger */
4194 /* .cctors: too many apps depend on them */
4195 /* running with a specific order... */
4196 if (! vtable->initialized)
4198 mono_runtime_class_init (vtable);
4200 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
4201 if (!method->klass->runtime_info)
4202 /* No vtable created yet */
4204 vtable = mono_class_vtable (cfg->domain, method->klass);
4207 if (!vtable->initialized)
4212 * If we're compiling for shared code
4213 * the cctor will need to be run at aot method load time, for example,
4214 * or at the end of the compilation of the inlining method.
4216 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
4221 * CAS - do not inline methods with declarative security
4222 * Note: this has to be before any possible return TRUE;
4224 if (mono_security_method_has_declsec (method))
4227 #ifdef MONO_ARCH_SOFT_FLOAT
4229 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4231 for (i = 0; i < sig->param_count; ++i)
4232 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4240 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
4242 if (vtable->initialized && !cfg->compile_aot)
4245 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
4248 if (!mono_class_needs_cctor_run (vtable->klass, method))
4251 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
4252 /* The initialization is already done before the method is called */
4259 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4263 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4266 if (mini_is_gsharedvt_klass (cfg, klass)) {
4269 mono_class_init (klass);
4270 size = mono_class_array_element_size (klass);
4273 mult_reg = alloc_preg (cfg);
4274 array_reg = arr->dreg;
4275 index_reg = index->dreg;
4277 #if SIZEOF_REGISTER == 8
4278 /* The array reg is 64 bits but the index reg is only 32 */
4279 if (COMPILE_LLVM (cfg)) {
4281 index2_reg = index_reg;
4283 index2_reg = alloc_preg (cfg);
4284 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4287 if (index->type == STACK_I8) {
4288 index2_reg = alloc_preg (cfg);
4289 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4291 index2_reg = index_reg;
4296 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4298 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4299 if (size == 1 || size == 2 || size == 4 || size == 8) {
4300 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4302 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
4303 ins->klass = mono_class_get_element_class (klass);
4304 ins->type = STACK_MP;
4310 add_reg = alloc_ireg_mp (cfg);
4313 MonoInst *rgctx_ins;
4316 g_assert (cfg->generic_sharing_context);
4317 context_used = mini_class_check_context_used (cfg, klass);
4318 g_assert (context_used);
4319 rgctx_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
4320 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
4322 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4324 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4325 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4326 ins->klass = mono_class_get_element_class (klass);
4327 ins->type = STACK_MP;
4328 MONO_ADD_INS (cfg->cbb, ins);
4333 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4335 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4337 int bounds_reg = alloc_preg (cfg);
4338 int add_reg = alloc_ireg_mp (cfg);
4339 int mult_reg = alloc_preg (cfg);
4340 int mult2_reg = alloc_preg (cfg);
4341 int low1_reg = alloc_preg (cfg);
4342 int low2_reg = alloc_preg (cfg);
4343 int high1_reg = alloc_preg (cfg);
4344 int high2_reg = alloc_preg (cfg);
4345 int realidx1_reg = alloc_preg (cfg);
4346 int realidx2_reg = alloc_preg (cfg);
4347 int sum_reg = alloc_preg (cfg);
4348 int index1, index2, tmpreg;
4352 mono_class_init (klass);
4353 size = mono_class_array_element_size (klass);
4355 index1 = index_ins1->dreg;
4356 index2 = index_ins2->dreg;
4358 #if SIZEOF_REGISTER == 8
4359 /* The array reg is 64 bits but the index reg is only 32 */
4360 if (COMPILE_LLVM (cfg)) {
4363 tmpreg = alloc_preg (cfg);
4364 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
4366 tmpreg = alloc_preg (cfg);
4367 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
4371 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
4375 /* range checking */
4376 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4377 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4379 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4380 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4381 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4382 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4383 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4384 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4385 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4387 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4388 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4389 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4390 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4391 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
4392 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4393 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4395 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4396 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4397 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4398 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4399 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4401 ins->type = STACK_MP;
4403 MONO_ADD_INS (cfg->cbb, ins);
4410 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4414 MonoMethod *addr_method;
4417 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4420 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
4422 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4423 /* emit_ldelema_2 depends on OP_LMUL */
4424 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
4425 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
4429 element_size = mono_class_array_element_size (cmethod->klass->element_class);
4430 addr_method = mono_marshal_get_array_address (rank, element_size);
4431 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4436 static MonoBreakPolicy
4437 always_insert_breakpoint (MonoMethod *method)
4439 return MONO_BREAK_POLICY_ALWAYS;
4442 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4445 * mono_set_break_policy:
4446 * policy_callback: the new callback function
4448 * Allow embedders to decide wherther to actually obey breakpoint instructions
4449 * (both break IL instructions and Debugger.Break () method calls), for example
4450 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4451 * untrusted or semi-trusted code.
4453 * @policy_callback will be called every time a break point instruction needs to
4454 * be inserted with the method argument being the method that calls Debugger.Break()
4455 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4456 * if it wants the breakpoint to not be effective in the given method.
4457 * #MONO_BREAK_POLICY_ALWAYS is the default.
4460 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4462 if (policy_callback)
4463 break_policy_func = policy_callback;
4465 break_policy_func = always_insert_breakpoint;
4469 should_insert_brekpoint (MonoMethod *method) {
4470 switch (break_policy_func (method)) {
4471 case MONO_BREAK_POLICY_ALWAYS:
4473 case MONO_BREAK_POLICY_NEVER:
4475 case MONO_BREAK_POLICY_ON_DBG:
4476 return mono_debug_using_mono_debugger ();
4478 g_warning ("Incorrect value returned from break policy callback");
4483 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4485 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4487 MonoInst *addr, *store, *load;
4488 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4490 /* the bounds check is already done by the callers */
4491 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4493 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4494 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4495 if (mini_type_is_reference (cfg, fsig->params [2]))
4496 emit_write_barrier (cfg, addr, load, -1);
4498 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4499 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4506 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4508 return mini_type_is_reference (cfg, &klass->byval_arg);
4512 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
4514 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
4515 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
4516 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
4517 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
4518 MonoInst *iargs [3];
4521 mono_class_setup_vtable (obj_array);
4522 g_assert (helper->slot);
4524 if (sp [0]->type != STACK_OBJ)
4526 if (sp [2]->type != STACK_OBJ)
4533 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
4537 if (mini_is_gsharedvt_klass (cfg, klass)) {
4540 // FIXME-VT: OP_ICONST optimization
4541 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
4542 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
4543 ins->opcode = OP_STOREV_MEMBASE;
4544 } else if (sp [1]->opcode == OP_ICONST) {
4545 int array_reg = sp [0]->dreg;
4546 int index_reg = sp [1]->dreg;
4547 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
4550 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
4551 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
4553 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
4554 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
4555 if (generic_class_is_reference_type (cfg, klass))
4556 emit_write_barrier (cfg, addr, sp [2], -1);
4563 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4568 eklass = mono_class_from_mono_type (fsig->params [2]);
4570 eklass = mono_class_from_mono_type (fsig->ret);
4574 return emit_array_store (cfg, eklass, args, FALSE);
4576 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4577 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
4583 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4585 MonoInst *ins = NULL;
4586 #ifdef MONO_ARCH_SIMD_INTRINSICS
4587 if (cfg->opt & MONO_OPT_SIMD) {
4588 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4598 emit_memory_barrier (MonoCompile *cfg, int kind)
4600 MonoInst *ins = NULL;
4601 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4602 MONO_ADD_INS (cfg->cbb, ins);
4603 ins->backend.memory_barrier_kind = kind;
4609 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4611 MonoInst *ins = NULL;
4614 /* The LLVM backend supports these intrinsics */
4615 if (cmethod->klass == mono_defaults.math_class) {
4616 if (strcmp (cmethod->name, "Sin") == 0) {
4618 } else if (strcmp (cmethod->name, "Cos") == 0) {
4620 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
4622 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
4627 MONO_INST_NEW (cfg, ins, opcode);
4628 ins->type = STACK_R8;
4629 ins->dreg = mono_alloc_freg (cfg);
4630 ins->sreg1 = args [0]->dreg;
4631 MONO_ADD_INS (cfg->cbb, ins);
4635 if (cfg->opt & MONO_OPT_CMOV) {
4636 if (strcmp (cmethod->name, "Min") == 0) {
4637 if (fsig->params [0]->type == MONO_TYPE_I4)
4639 if (fsig->params [0]->type == MONO_TYPE_U4)
4640 opcode = OP_IMIN_UN;
4641 else if (fsig->params [0]->type == MONO_TYPE_I8)
4643 else if (fsig->params [0]->type == MONO_TYPE_U8)
4644 opcode = OP_LMIN_UN;
4645 } else if (strcmp (cmethod->name, "Max") == 0) {
4646 if (fsig->params [0]->type == MONO_TYPE_I4)
4648 if (fsig->params [0]->type == MONO_TYPE_U4)
4649 opcode = OP_IMAX_UN;
4650 else if (fsig->params [0]->type == MONO_TYPE_I8)
4652 else if (fsig->params [0]->type == MONO_TYPE_U8)
4653 opcode = OP_LMAX_UN;
4658 MONO_INST_NEW (cfg, ins, opcode);
4659 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
4660 ins->dreg = mono_alloc_ireg (cfg);
4661 ins->sreg1 = args [0]->dreg;
4662 ins->sreg2 = args [1]->dreg;
4663 MONO_ADD_INS (cfg->cbb, ins);
4671 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4673 if (cmethod->klass == mono_defaults.array_class) {
4674 if (strcmp (cmethod->name, "UnsafeStore") == 0)
4675 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
4676 if (strcmp (cmethod->name, "UnsafeLoad") == 0)
4677 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
4684 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4686 MonoInst *ins = NULL;
4688 static MonoClass *runtime_helpers_class = NULL;
4689 if (! runtime_helpers_class)
4690 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
4691 "System.Runtime.CompilerServices", "RuntimeHelpers");
4693 if (cmethod->klass == mono_defaults.string_class) {
4694 if (strcmp (cmethod->name, "get_Chars") == 0) {
4695 int dreg = alloc_ireg (cfg);
4696 int index_reg = alloc_preg (cfg);
4697 int mult_reg = alloc_preg (cfg);
4698 int add_reg = alloc_preg (cfg);
4700 #if SIZEOF_REGISTER == 8
4701 /* The array reg is 64 bits but the index reg is only 32 */
4702 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
4704 index_reg = args [1]->dreg;
4706 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
4708 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4709 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
4710 add_reg = ins->dreg;
4711 /* Avoid a warning */
4713 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4716 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
4717 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4718 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4719 add_reg, G_STRUCT_OFFSET (MonoString, chars));
4721 type_from_op (ins, NULL, NULL);
4723 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4724 int dreg = alloc_ireg (cfg);
4725 /* Decompose later to allow more optimizations */
4726 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
4727 ins->type = STACK_I4;
4728 ins->flags |= MONO_INST_FAULT;
4729 cfg->cbb->has_array_access = TRUE;
4730 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
4733 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
4734 int mult_reg = alloc_preg (cfg);
4735 int add_reg = alloc_preg (cfg);
4737 /* The corlib functions check for oob already. */
4738 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
4739 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4740 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
4741 return cfg->cbb->last_ins;
4744 } else if (cmethod->klass == mono_defaults.object_class) {
4746 if (strcmp (cmethod->name, "GetType") == 0) {
4747 int dreg = alloc_ireg_ref (cfg);
4748 int vt_reg = alloc_preg (cfg);
4749 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4750 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
4751 type_from_op (ins, NULL, NULL);
4754 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
4755 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
4756 int dreg = alloc_ireg (cfg);
4757 int t1 = alloc_ireg (cfg);
4759 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
4760 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
4761 ins->type = STACK_I4;
4765 } else if (strcmp (cmethod->name, ".ctor") == 0) {
4766 MONO_INST_NEW (cfg, ins, OP_NOP);
4767 MONO_ADD_INS (cfg->cbb, ins);
4771 } else if (cmethod->klass == mono_defaults.array_class) {
4772 if (strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
4773 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
4775 #ifndef MONO_BIG_ARRAYS
4777 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
4780 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
4781 int dreg = alloc_ireg (cfg);
4782 int bounds_reg = alloc_ireg_mp (cfg);
4783 MonoBasicBlock *end_bb, *szarray_bb;
4784 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
4786 NEW_BBLOCK (cfg, end_bb);
4787 NEW_BBLOCK (cfg, szarray_bb);
4789 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
4790 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4791 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4792 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
4793 /* Non-szarray case */
4795 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4796 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4798 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4799 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4800 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4801 MONO_START_BB (cfg, szarray_bb);
4804 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4805 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4807 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4808 MONO_START_BB (cfg, end_bb);
4810 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
4811 ins->type = STACK_I4;
4817 if (cmethod->name [0] != 'g')
4820 if (strcmp (cmethod->name, "get_Rank") == 0) {
4821 int dreg = alloc_ireg (cfg);
4822 int vtable_reg = alloc_preg (cfg);
4823 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
4824 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4825 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
4826 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4827 type_from_op (ins, NULL, NULL);
4830 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4831 int dreg = alloc_ireg (cfg);
4833 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4834 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4835 type_from_op (ins, NULL, NULL);
4840 } else if (cmethod->klass == runtime_helpers_class) {
4842 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
4843 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
4847 } else if (cmethod->klass == mono_defaults.thread_class) {
4848 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
4849 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
4850 MONO_ADD_INS (cfg->cbb, ins);
4852 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
4853 return emit_memory_barrier (cfg, FullBarrier);
4855 } else if (cmethod->klass == mono_defaults.monitor_class) {
4857 /* FIXME this should be integrated to the check below once we support the trampoline version */
4858 #if defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4859 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) {
4860 MonoMethod *fast_method = NULL;
4862 /* Avoid infinite recursion */
4863 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN && !strcmp (cfg->method->name, "FastMonitorEnterV4"))
4866 fast_method = mono_monitor_get_fast_path (cmethod);
4870 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4874 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4875 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
4878 if (COMPILE_LLVM (cfg)) {
4880 * Pass the argument normally, the LLVM backend will handle the
4881 * calling convention problems.
4883 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4885 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
4886 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4887 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4888 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4891 return (MonoInst*)call;
4892 } else if (strcmp (cmethod->name, "Exit") == 0) {
4895 if (COMPILE_LLVM (cfg)) {
4896 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4898 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
4899 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4900 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4901 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4904 return (MonoInst*)call;
4906 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4908 MonoMethod *fast_method = NULL;
4910 /* Avoid infinite recursion */
4911 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
4912 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
4913 strcmp (cfg->method->name, "FastMonitorExit") == 0))
4916 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) ||
4917 strcmp (cmethod->name, "Exit") == 0)
4918 fast_method = mono_monitor_get_fast_path (cmethod);
4922 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4925 } else if (cmethod->klass->image == mono_defaults.corlib &&
4926 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4927 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4930 #if SIZEOF_REGISTER == 8
4931 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
4932 /* 64 bit reads are already atomic */
4933 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
4934 ins->dreg = mono_alloc_preg (cfg);
4935 ins->inst_basereg = args [0]->dreg;
4936 ins->inst_offset = 0;
4937 MONO_ADD_INS (cfg->cbb, ins);
4941 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4942 if (strcmp (cmethod->name, "Increment") == 0) {
4943 MonoInst *ins_iconst;
4946 if (fsig->params [0]->type == MONO_TYPE_I4)
4947 opcode = OP_ATOMIC_ADD_NEW_I4;
4948 #if SIZEOF_REGISTER == 8
4949 else if (fsig->params [0]->type == MONO_TYPE_I8)
4950 opcode = OP_ATOMIC_ADD_NEW_I8;
4953 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4954 ins_iconst->inst_c0 = 1;
4955 ins_iconst->dreg = mono_alloc_ireg (cfg);
4956 MONO_ADD_INS (cfg->cbb, ins_iconst);
4958 MONO_INST_NEW (cfg, ins, opcode);
4959 ins->dreg = mono_alloc_ireg (cfg);
4960 ins->inst_basereg = args [0]->dreg;
4961 ins->inst_offset = 0;
4962 ins->sreg2 = ins_iconst->dreg;
4963 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4964 MONO_ADD_INS (cfg->cbb, ins);
4966 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4967 MonoInst *ins_iconst;
4970 if (fsig->params [0]->type == MONO_TYPE_I4)
4971 opcode = OP_ATOMIC_ADD_NEW_I4;
4972 #if SIZEOF_REGISTER == 8
4973 else if (fsig->params [0]->type == MONO_TYPE_I8)
4974 opcode = OP_ATOMIC_ADD_NEW_I8;
4977 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4978 ins_iconst->inst_c0 = -1;
4979 ins_iconst->dreg = mono_alloc_ireg (cfg);
4980 MONO_ADD_INS (cfg->cbb, ins_iconst);
4982 MONO_INST_NEW (cfg, ins, opcode);
4983 ins->dreg = mono_alloc_ireg (cfg);
4984 ins->inst_basereg = args [0]->dreg;
4985 ins->inst_offset = 0;
4986 ins->sreg2 = ins_iconst->dreg;
4987 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4988 MONO_ADD_INS (cfg->cbb, ins);
4990 } else if (strcmp (cmethod->name, "Add") == 0) {
4993 if (fsig->params [0]->type == MONO_TYPE_I4)
4994 opcode = OP_ATOMIC_ADD_NEW_I4;
4995 #if SIZEOF_REGISTER == 8
4996 else if (fsig->params [0]->type == MONO_TYPE_I8)
4997 opcode = OP_ATOMIC_ADD_NEW_I8;
5001 MONO_INST_NEW (cfg, ins, opcode);
5002 ins->dreg = mono_alloc_ireg (cfg);
5003 ins->inst_basereg = args [0]->dreg;
5004 ins->inst_offset = 0;
5005 ins->sreg2 = args [1]->dreg;
5006 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5007 MONO_ADD_INS (cfg->cbb, ins);
5010 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
5012 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
5013 if (strcmp (cmethod->name, "Exchange") == 0) {
5015 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
5017 if (fsig->params [0]->type == MONO_TYPE_I4)
5018 opcode = OP_ATOMIC_EXCHANGE_I4;
5019 #if SIZEOF_REGISTER == 8
5020 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
5021 (fsig->params [0]->type == MONO_TYPE_I))
5022 opcode = OP_ATOMIC_EXCHANGE_I8;
5024 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
5025 opcode = OP_ATOMIC_EXCHANGE_I4;
5030 MONO_INST_NEW (cfg, ins, opcode);
5031 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
5032 ins->inst_basereg = args [0]->dreg;
5033 ins->inst_offset = 0;
5034 ins->sreg2 = args [1]->dreg;
5035 MONO_ADD_INS (cfg->cbb, ins);
5037 switch (fsig->params [0]->type) {
5039 ins->type = STACK_I4;
5043 ins->type = STACK_I8;
5045 case MONO_TYPE_OBJECT:
5046 ins->type = STACK_OBJ;
5049 g_assert_not_reached ();
5052 if (cfg->gen_write_barriers && is_ref)
5053 emit_write_barrier (cfg, args [0], args [1], -1);
5055 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
5057 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
5058 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
5060 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
5061 if (fsig->params [1]->type == MONO_TYPE_I4)
5063 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
5064 size = sizeof (gpointer);
5065 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
5068 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
5069 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5070 ins->sreg1 = args [0]->dreg;
5071 ins->sreg2 = args [1]->dreg;
5072 ins->sreg3 = args [2]->dreg;
5073 ins->type = STACK_I4;
5074 MONO_ADD_INS (cfg->cbb, ins);
5075 } else if (size == 8) {
5076 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
5077 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5078 ins->sreg1 = args [0]->dreg;
5079 ins->sreg2 = args [1]->dreg;
5080 ins->sreg3 = args [2]->dreg;
5081 ins->type = STACK_I8;
5082 MONO_ADD_INS (cfg->cbb, ins);
5084 /* g_assert_not_reached (); */
5086 if (cfg->gen_write_barriers && is_ref)
5087 emit_write_barrier (cfg, args [0], args [1], -1);
5089 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
5091 if (strcmp (cmethod->name, "MemoryBarrier") == 0)
5092 ins = emit_memory_barrier (cfg, FullBarrier);
5096 } else if (cmethod->klass->image == mono_defaults.corlib) {
5097 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
5098 && strcmp (cmethod->klass->name, "Debugger") == 0) {
5099 if (should_insert_brekpoint (cfg->method)) {
5100 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
5102 MONO_INST_NEW (cfg, ins, OP_NOP);
5103 MONO_ADD_INS (cfg->cbb, ins);
5107 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
5108 && strcmp (cmethod->klass->name, "Environment") == 0) {
5110 EMIT_NEW_ICONST (cfg, ins, 1);
5112 EMIT_NEW_ICONST (cfg, ins, 0);
5116 } else if (cmethod->klass == mono_defaults.math_class) {
5118 * There is general branches code for Min/Max, but it does not work for
5120 * http://everything2.com/?node_id=1051618
5124 #ifdef MONO_ARCH_SIMD_INTRINSICS
5125 if (cfg->opt & MONO_OPT_SIMD) {
5126 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5132 if (COMPILE_LLVM (cfg)) {
5133 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
5138 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
5142 * This entry point could be used later for arbitrary method
5145 inline static MonoInst*
5146 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
5147 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
5149 if (method->klass == mono_defaults.string_class) {
5150 /* managed string allocation support */
5151 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
5152 MonoInst *iargs [2];
5153 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
5154 MonoMethod *managed_alloc = NULL;
5156 g_assert (vtable); /*Should not fail since it System.String*/
5157 #ifndef MONO_CROSS_COMPILE
5158 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
5162 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
5163 iargs [1] = args [0];
5164 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
5171 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
5173 MonoInst *store, *temp;
5176 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
5177 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
5180 * FIXME: We should use *args++ = sp [0], but that would mean the arg
5181 * would be different than the MonoInst's used to represent arguments, and
5182 * the ldelema implementation can't deal with that.
5183 * Solution: When ldelema is used on an inline argument, create a var for
5184 * it, emit ldelema on that var, and emit the saving code below in
5185 * inline_method () if needed.
5187 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
5188 cfg->args [i] = temp;
5189 /* This uses cfg->args [i] which is set by the preceeding line */
5190 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
5191 store->cil_code = sp [0]->cil_code;
5196 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
5197 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
5199 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5201 check_inline_called_method_name_limit (MonoMethod *called_method)
5204 static char *limit = NULL;
5206 if (limit == NULL) {
5207 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
5209 if (limit_string != NULL)
5210 limit = limit_string;
5212 limit = (char *) "";
5215 if (limit [0] != '\0') {
5216 char *called_method_name = mono_method_full_name (called_method, TRUE);
5218 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
5219 g_free (called_method_name);
5221 //return (strncmp_result <= 0);
5222 return (strncmp_result == 0);
5229 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5231 check_inline_caller_method_name_limit (MonoMethod *caller_method)
5234 static char *limit = NULL;
5236 if (limit == NULL) {
5237 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
5238 if (limit_string != NULL) {
5239 limit = limit_string;
5241 limit = (char *) "";
5245 if (limit [0] != '\0') {
5246 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
5248 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
5249 g_free (caller_method_name);
5251 //return (strncmp_result <= 0);
5252 return (strncmp_result == 0);
5260 emit_init_rvar (MonoCompile *cfg, MonoInst *rvar, MonoType *rtype)
5262 static double r8_0 = 0.0;
5265 switch (rvar->type) {
5267 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
5270 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
5275 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
5278 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5279 ins->type = STACK_R8;
5280 ins->inst_p0 = (void*)&r8_0;
5281 ins->dreg = rvar->dreg;
5282 MONO_ADD_INS (cfg->cbb, ins);
5285 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (rtype));
5288 g_assert_not_reached ();
5293 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
5294 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_always)
5296 MonoInst *ins, *rvar = NULL;
5297 MonoMethodHeader *cheader;
5298 MonoBasicBlock *ebblock, *sbblock;
5300 MonoMethod *prev_inlined_method;
5301 MonoInst **prev_locals, **prev_args;
5302 MonoType **prev_arg_types;
5303 guint prev_real_offset;
5304 GHashTable *prev_cbb_hash;
5305 MonoBasicBlock **prev_cil_offset_to_bb;
5306 MonoBasicBlock *prev_cbb;
5307 unsigned char* prev_cil_start;
5308 guint32 prev_cil_offset_to_bb_len;
5309 MonoMethod *prev_current_method;
5310 MonoGenericContext *prev_generic_context;
5311 gboolean ret_var_set, prev_ret_var_set, virtual = FALSE;
5313 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
5315 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5316 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
5319 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5320 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
5324 if (cfg->verbose_level > 2)
5325 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
5327 if (!cmethod->inline_info) {
5328 cfg->stat_inlineable_methods++;
5329 cmethod->inline_info = 1;
5332 /* allocate local variables */
5333 cheader = mono_method_get_header (cmethod);
5335 if (cheader == NULL || mono_loader_get_last_error ()) {
5336 MonoLoaderError *error = mono_loader_get_last_error ();
5339 mono_metadata_free_mh (cheader);
5340 if (inline_always && error)
5341 mono_cfg_set_exception (cfg, error->exception_type);
5343 mono_loader_clear_error ();
5347 /*Must verify before creating locals as it can cause the JIT to assert.*/
5348 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
5349 mono_metadata_free_mh (cheader);
5353 /* allocate space to store the return value */
5354 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
5355 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
5358 prev_locals = cfg->locals;
5359 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
5360 for (i = 0; i < cheader->num_locals; ++i)
5361 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
5363 /* allocate start and end blocks */
5364 /* This is needed so if the inline is aborted, we can clean up */
5365 NEW_BBLOCK (cfg, sbblock);
5366 sbblock->real_offset = real_offset;
5368 NEW_BBLOCK (cfg, ebblock);
5369 ebblock->block_num = cfg->num_bblocks++;
5370 ebblock->real_offset = real_offset;
5372 prev_args = cfg->args;
5373 prev_arg_types = cfg->arg_types;
5374 prev_inlined_method = cfg->inlined_method;
5375 cfg->inlined_method = cmethod;
5376 cfg->ret_var_set = FALSE;
5377 cfg->inline_depth ++;
5378 prev_real_offset = cfg->real_offset;
5379 prev_cbb_hash = cfg->cbb_hash;
5380 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
5381 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
5382 prev_cil_start = cfg->cil_start;
5383 prev_cbb = cfg->cbb;
5384 prev_current_method = cfg->current_method;
5385 prev_generic_context = cfg->generic_context;
5386 prev_ret_var_set = cfg->ret_var_set;
5388 if (*ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
5391 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, virtual);
5393 ret_var_set = cfg->ret_var_set;
5395 cfg->inlined_method = prev_inlined_method;
5396 cfg->real_offset = prev_real_offset;
5397 cfg->cbb_hash = prev_cbb_hash;
5398 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
5399 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
5400 cfg->cil_start = prev_cil_start;
5401 cfg->locals = prev_locals;
5402 cfg->args = prev_args;
5403 cfg->arg_types = prev_arg_types;
5404 cfg->current_method = prev_current_method;
5405 cfg->generic_context = prev_generic_context;
5406 cfg->ret_var_set = prev_ret_var_set;
5407 cfg->inline_depth --;
5409 if ((costs >= 0 && costs < 60) || inline_always) {
5410 if (cfg->verbose_level > 2)
5411 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
5413 cfg->stat_inlined_methods++;
5415 /* always add some code to avoid block split failures */
5416 MONO_INST_NEW (cfg, ins, OP_NOP);
5417 MONO_ADD_INS (prev_cbb, ins);
5419 prev_cbb->next_bb = sbblock;
5420 link_bblock (cfg, prev_cbb, sbblock);
5423 * Get rid of the begin and end bblocks if possible to aid local
5426 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
5428 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
5429 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
5431 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
5432 MonoBasicBlock *prev = ebblock->in_bb [0];
5433 mono_merge_basic_blocks (cfg, prev, ebblock);
5435 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
5436 mono_merge_basic_blocks (cfg, prev_cbb, prev);
5437 cfg->cbb = prev_cbb;
5441 * Its possible that the rvar is set in some prev bblock, but not in others.
5447 for (i = 0; i < ebblock->in_count; ++i) {
5448 bb = ebblock->in_bb [i];
5450 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
5453 emit_init_rvar (cfg, rvar, fsig->ret);
5463 * If the inlined method contains only a throw, then the ret var is not
5464 * set, so set it to a dummy value.
5467 emit_init_rvar (cfg, rvar, fsig->ret);
5469 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
5472 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5475 if (cfg->verbose_level > 2)
5476 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
5477 cfg->exception_type = MONO_EXCEPTION_NONE;
5478 mono_loader_clear_error ();
5480 /* This gets rid of the newly added bblocks */
5481 cfg->cbb = prev_cbb;
5483 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5488 * Some of these comments may well be out-of-date.
5489 * Design decisions: we do a single pass over the IL code (and we do bblock
5490 * splitting/merging in the few cases when it's required: a back jump to an IL
5491 * address that was not already seen as bblock starting point).
5492 * Code is validated as we go (full verification is still better left to metadata/verify.c).
5493 * Complex operations are decomposed in simpler ones right away. We need to let the
5494 * arch-specific code peek and poke inside this process somehow (except when the
5495 * optimizations can take advantage of the full semantic info of coarse opcodes).
5496 * All the opcodes of the form opcode.s are 'normalized' to opcode.
5497 * MonoInst->opcode initially is the IL opcode or some simplification of that
5498 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
5499 * opcode with value bigger than OP_LAST.
5500 * At this point the IR can be handed over to an interpreter, a dumb code generator
5501 * or to the optimizing code generator that will translate it to SSA form.
5503 * Profiling directed optimizations.
5504 * We may compile by default with few or no optimizations and instrument the code
5505 * or the user may indicate what methods to optimize the most either in a config file
5506 * or through repeated runs where the compiler applies offline the optimizations to
5507 * each method and then decides if it was worth it.
5510 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
5511 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
5512 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
5513 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
5514 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
5515 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
5516 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
5517 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; LOAD_ERROR;}
5519 /* offset from br.s -> br like opcodes */
5520 #define BIG_BRANCH_OFFSET 13
5523 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
5525 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
5527 return b == NULL || b == bb;
5531 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
5533 unsigned char *ip = start;
5534 unsigned char *target;
5537 MonoBasicBlock *bblock;
5538 const MonoOpcode *opcode;
5541 cli_addr = ip - start;
5542 i = mono_opcode_value ((const guint8 **)&ip, end);
5545 opcode = &mono_opcodes [i];
5546 switch (opcode->argument) {
5547 case MonoInlineNone:
5550 case MonoInlineString:
5551 case MonoInlineType:
5552 case MonoInlineField:
5553 case MonoInlineMethod:
5556 case MonoShortInlineR:
5563 case MonoShortInlineVar:
5564 case MonoShortInlineI:
5567 case MonoShortInlineBrTarget:
5568 target = start + cli_addr + 2 + (signed char)ip [1];
5569 GET_BBLOCK (cfg, bblock, target);
5572 GET_BBLOCK (cfg, bblock, ip);
5574 case MonoInlineBrTarget:
5575 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
5576 GET_BBLOCK (cfg, bblock, target);
5579 GET_BBLOCK (cfg, bblock, ip);
5581 case MonoInlineSwitch: {
5582 guint32 n = read32 (ip + 1);
5585 cli_addr += 5 + 4 * n;
5586 target = start + cli_addr;
5587 GET_BBLOCK (cfg, bblock, target);
5589 for (j = 0; j < n; ++j) {
5590 target = start + cli_addr + (gint32)read32 (ip);
5591 GET_BBLOCK (cfg, bblock, target);
5601 g_assert_not_reached ();
5604 if (i == CEE_THROW) {
5605 unsigned char *bb_start = ip - 1;
5607 /* Find the start of the bblock containing the throw */
5609 while ((bb_start >= start) && !bblock) {
5610 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
5614 bblock->out_of_line = 1;
5624 static inline MonoMethod *
5625 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5629 if (m->wrapper_type != MONO_WRAPPER_NONE) {
5630 method = mono_method_get_wrapper_data (m, token);
5632 method = mono_class_inflate_generic_method (method, context);
5634 method = mono_get_method_full (m->klass->image, token, klass, context);
5640 static inline MonoMethod *
5641 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5643 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
5645 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
5651 static inline MonoClass*
5652 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
5656 if (method->wrapper_type != MONO_WRAPPER_NONE) {
5657 klass = mono_method_get_wrapper_data (method, token);
5659 klass = mono_class_inflate_generic_class (klass, context);
5661 klass = mono_class_get_full (method->klass->image, token, context);
5664 mono_class_init (klass);
5668 static inline MonoMethodSignature*
5669 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
5671 MonoMethodSignature *fsig;
5673 if (method->wrapper_type != MONO_WRAPPER_NONE) {
5676 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
5678 fsig = mono_inflate_generic_signature (fsig, context, &error);
5680 g_assert (mono_error_ok (&error));
5683 fsig = mono_metadata_parse_signature (method->klass->image, token);
5689 * Returns TRUE if the JIT should abort inlining because "callee"
5690 * is influenced by security attributes.
5693 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
5697 if ((cfg->method != caller) && mono_security_method_has_declsec (callee)) {
5701 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
5702 if (result == MONO_JIT_SECURITY_OK)
5705 if (result == MONO_JIT_LINKDEMAND_ECMA) {
5706 /* Generate code to throw a SecurityException before the actual call/link */
5707 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5710 NEW_ICONST (cfg, args [0], 4);
5711 NEW_METHODCONST (cfg, args [1], caller);
5712 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
5713 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
5714 /* don't hide previous results */
5715 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
5716 cfg->exception_data = result;
5724 throw_exception (void)
5726 static MonoMethod *method = NULL;
5729 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5730 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
5737 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
5739 MonoMethod *thrower = throw_exception ();
5742 EMIT_NEW_PCONST (cfg, args [0], ex);
5743 mono_emit_method_call (cfg, thrower, args, NULL);
5747 * Return the original method is a wrapper is specified. We can only access
5748 * the custom attributes from the original method.
5751 get_original_method (MonoMethod *method)
5753 if (method->wrapper_type == MONO_WRAPPER_NONE)
5756 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
5757 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
5760 /* in other cases we need to find the original method */
5761 return mono_marshal_method_from_wrapper (method);
5765 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
5766 MonoBasicBlock *bblock, unsigned char *ip)
5768 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5769 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
5771 emit_throw_exception (cfg, ex);
5775 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
5776 MonoBasicBlock *bblock, unsigned char *ip)
5778 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5779 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
5781 emit_throw_exception (cfg, ex);
5785 * Check that the IL instructions at ip are the array initialization
5786 * sequence and return the pointer to the data and the size.
5789 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
5792 * newarr[System.Int32]
5794 * ldtoken field valuetype ...
5795 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
5797 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
5798 guint32 token = read32 (ip + 7);
5799 guint32 field_token = read32 (ip + 2);
5800 guint32 field_index = field_token & 0xffffff;
5802 const char *data_ptr;
5804 MonoMethod *cmethod;
5805 MonoClass *dummy_class;
5806 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
5812 *out_field_token = field_token;
5814 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
5817 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
5819 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
5820 case MONO_TYPE_BOOLEAN:
5824 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
5825 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
5826 case MONO_TYPE_CHAR:
5836 return NULL; /* stupid ARM FP swapped format */
5846 if (size > mono_type_size (field->type, &dummy_align))
5849 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
5850 if (!method->klass->image->dynamic) {
5851 field_index = read32 (ip + 2) & 0xffffff;
5852 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
5853 data_ptr = mono_image_rva_map (method->klass->image, rva);
5854 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
5855 /* for aot code we do the lookup on load */
5856 if (aot && data_ptr)
5857 return GUINT_TO_POINTER (rva);
5859 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
5861 data_ptr = mono_field_get_data (field);
5869 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
5871 char *method_fname = mono_method_full_name (method, TRUE);
5873 MonoMethodHeader *header = mono_method_get_header (method);
5875 if (header->code_size == 0)
5876 method_code = g_strdup ("method body is empty.");
5878 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
5879 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
5880 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
5881 g_free (method_fname);
5882 g_free (method_code);
5883 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
5887 set_exception_object (MonoCompile *cfg, MonoException *exception)
5889 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
5890 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
5891 cfg->exception_ptr = exception;
5895 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5898 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5899 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5900 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5901 /* Optimize reg-reg moves away */
5903 * Can't optimize other opcodes, since sp[0] might point to
5904 * the last ins of a decomposed opcode.
5906 sp [0]->dreg = (cfg)->locals [n]->dreg;
5908 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5913 * ldloca inhibits many optimizations so try to get rid of it in common
5916 static inline unsigned char *
5917 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5926 local = read16 (ip + 2);
5930 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5931 gboolean skip = FALSE;
5933 /* From the INITOBJ case */
5934 token = read32 (ip + 2);
5935 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5936 CHECK_TYPELOAD (klass);
5937 if (mini_type_is_reference (cfg, &klass->byval_arg)) {
5938 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5939 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5940 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5953 is_exception_class (MonoClass *class)
5956 if (class == mono_defaults.exception_class)
5958 class = class->parent;
5964 * is_jit_optimizer_disabled:
5966 * Determine whenever M's assembly has a DebuggableAttribute with the
5967 * IsJITOptimizerDisabled flag set.
5970 is_jit_optimizer_disabled (MonoMethod *m)
5972 MonoAssembly *ass = m->klass->image->assembly;
5973 MonoCustomAttrInfo* attrs;
5974 static MonoClass *klass;
5976 gboolean val = FALSE;
5979 if (ass->jit_optimizer_disabled_inited)
5980 return ass->jit_optimizer_disabled;
5983 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
5986 ass->jit_optimizer_disabled = FALSE;
5987 mono_memory_barrier ();
5988 ass->jit_optimizer_disabled_inited = TRUE;
5992 attrs = mono_custom_attrs_from_assembly (ass);
5994 for (i = 0; i < attrs->num_attrs; ++i) {
5995 MonoCustomAttrEntry *attr = &attrs->attrs [i];
5998 MonoMethodSignature *sig;
6000 if (!attr->ctor || attr->ctor->klass != klass)
6002 /* Decode the attribute. See reflection.c */
6003 len = attr->data_size;
6004 p = (const char*)attr->data;
6005 g_assert (read16 (p) == 0x0001);
6008 // FIXME: Support named parameters
6009 sig = mono_method_signature (attr->ctor);
6010 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
6012 /* Two boolean arguments */
6016 mono_custom_attrs_free (attrs);
6019 ass->jit_optimizer_disabled = val;
6020 mono_memory_barrier ();
6021 ass->jit_optimizer_disabled_inited = TRUE;
6027 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig)
6029 gboolean supported_tail_call;
6032 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6033 supported_tail_call = MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
6035 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6038 for (i = 0; i < fsig->param_count; ++i) {
6039 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
6040 /* These can point to the current method's stack */
6041 supported_tail_call = FALSE;
6043 if (fsig->hasthis && cmethod->klass->valuetype)
6044 /* this might point to the current method's stack */
6045 supported_tail_call = FALSE;
6046 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
6047 supported_tail_call = FALSE;
6048 if (cfg->method->save_lmf)
6049 supported_tail_call = FALSE;
6050 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
6051 supported_tail_call = FALSE;
6053 /* Debugging support */
6055 if (supported_tail_call) {
6056 if (!mono_debug_count ())
6057 supported_tail_call = FALSE;
6061 return supported_tail_call;
6064 /* the JIT intercepts ldflda instructions to the tlsdata field in ThreadLocal<T> and redirects
6065 * it to the thread local value based on the tls_offset field. Every other kind of access to
6066 * the field causes an assert.
6069 is_magic_tls_access (MonoClassField *field)
6071 if (strcmp (field->name, "tlsdata"))
6073 if (strcmp (field->parent->name, "ThreadLocal`1"))
6075 return field->parent->image == mono_defaults.corlib;
6078 /* emits the code needed to access a managed tls var (like ThreadStatic)
6079 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
6080 * pointer for the current thread.
6081 * Returns the MonoInst* representing the address of the tls var.
6084 emit_managed_static_data_access (MonoCompile *cfg, MonoInst *thread_ins, int offset_reg)
6087 int static_data_reg, array_reg, dreg;
6088 int offset2_reg, idx_reg;
6089 // inlined access to the tls data
6090 // idx = (offset >> 24) - 1;
6091 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
6092 static_data_reg = alloc_ireg (cfg);
6093 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
6094 idx_reg = alloc_ireg (cfg);
6095 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
6096 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
6097 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
6098 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
6099 array_reg = alloc_ireg (cfg);
6100 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
6101 offset2_reg = alloc_ireg (cfg);
6102 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
6103 dreg = alloc_ireg (cfg);
6104 EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
6109 * redirect access to the tlsdata field to the tls var given by the tls_offset field.
6110 * this address is cached per-method in cached_tls_addr.
6113 create_magic_tls_access (MonoCompile *cfg, MonoClassField *tls_field, MonoInst **cached_tls_addr, MonoInst *thread_local)
6115 MonoInst *load, *addr, *temp, *store, *thread_ins;
6116 MonoClassField *offset_field;
6118 if (*cached_tls_addr) {
6119 EMIT_NEW_TEMPLOAD (cfg, addr, (*cached_tls_addr)->inst_c0);
6122 thread_ins = mono_get_thread_intrinsic (cfg);
6123 offset_field = mono_class_get_field_from_name (tls_field->parent, "tls_offset");
6125 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, offset_field->type, thread_local->dreg, offset_field->offset);
6127 MONO_ADD_INS (cfg->cbb, thread_ins);
6129 MonoMethod *thread_method;
6130 thread_method = mono_class_get_method_from_name (mono_get_thread_class(), "CurrentInternalThread_internal", 0);
6131 thread_ins = mono_emit_method_call (cfg, thread_method, NULL, NULL);
6133 addr = emit_managed_static_data_access (cfg, thread_ins, load->dreg);
6134 addr->klass = mono_class_from_mono_type (tls_field->type);
6135 addr->type = STACK_MP;
6136 *cached_tls_addr = temp = mono_compile_create_var (cfg, type_from_stack_type (addr), OP_LOCAL);
6137 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, addr);
6139 EMIT_NEW_TEMPLOAD (cfg, addr, temp->inst_c0);
6144 * mono_method_to_ir:
6146 * Translate the .net IL into linear IR.
6149 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
6150 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
6151 guint inline_offset, gboolean is_virtual_call)
6154 MonoInst *ins, **sp, **stack_start;
6155 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
6156 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
6157 MonoMethod *cmethod, *method_definition;
6158 MonoInst **arg_array;
6159 MonoMethodHeader *header;
6161 guint32 token, ins_flag;
6163 MonoClass *constrained_call = NULL;
6164 unsigned char *ip, *end, *target, *err_pos;
6165 static double r8_0 = 0.0;
6166 MonoMethodSignature *sig;
6167 MonoGenericContext *generic_context = NULL;
6168 MonoGenericContainer *generic_container = NULL;
6169 MonoType **param_types;
6170 int i, n, start_new_bblock, dreg;
6171 int num_calls = 0, inline_costs = 0;
6172 int breakpoint_id = 0;
6174 MonoBoolean security, pinvoke;
6175 MonoSecurityManager* secman = NULL;
6176 MonoDeclSecurityActions actions;
6177 GSList *class_inits = NULL;
6178 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
6180 gboolean init_locals, seq_points, skip_dead_blocks;
6181 gboolean disable_inline, sym_seq_points = FALSE;
6182 MonoInst *cached_tls_addr = NULL;
6183 MonoDebugMethodInfo *minfo;
6184 MonoBitSet *seq_point_locs = NULL;
6185 MonoBitSet *seq_point_set_locs = NULL;
6187 disable_inline = is_jit_optimizer_disabled (method);
6189 /* serialization and xdomain stuff may need access to private fields and methods */
6190 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
6191 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
6192 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
6193 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
6194 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
6195 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
6197 dont_verify |= mono_security_smcs_hack_enabled ();
6199 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
6200 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
6201 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
6202 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
6203 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
6205 image = method->klass->image;
6206 header = mono_method_get_header (method);
6208 MonoLoaderError *error;
6210 if ((error = mono_loader_get_last_error ())) {
6211 mono_cfg_set_exception (cfg, error->exception_type);
6213 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
6214 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
6216 goto exception_exit;
6218 generic_container = mono_method_get_generic_container (method);
6219 sig = mono_method_signature (method);
6220 num_args = sig->hasthis + sig->param_count;
6221 ip = (unsigned char*)header->code;
6222 cfg->cil_start = ip;
6223 end = ip + header->code_size;
6224 cfg->stat_cil_code_size += header->code_size;
6225 init_locals = header->init_locals;
6227 seq_points = cfg->gen_seq_points && cfg->method == method;
6228 #ifdef PLATFORM_ANDROID
6229 seq_points &= cfg->method->wrapper_type == MONO_WRAPPER_NONE;
6232 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
6233 /* We could hit a seq point before attaching to the JIT (#8338) */
6237 if (cfg->gen_seq_points && cfg->method == method) {
6238 minfo = mono_debug_lookup_method (method);
6240 int i, n_il_offsets;
6244 mono_debug_symfile_get_line_numbers_full (minfo, NULL, NULL, &n_il_offsets, &il_offsets, &line_numbers, NULL, NULL);
6245 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6246 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6247 sym_seq_points = TRUE;
6248 for (i = 0; i < n_il_offsets; ++i) {
6249 if (il_offsets [i] < header->code_size)
6250 mono_bitset_set_fast (seq_point_locs, il_offsets [i]);
6256 * Methods without init_locals set could cause asserts in various passes
6261 method_definition = method;
6262 while (method_definition->is_inflated) {
6263 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
6264 method_definition = imethod->declaring;
6267 /* SkipVerification is not allowed if core-clr is enabled */
6268 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
6270 dont_verify_stloc = TRUE;
6273 if (mono_debug_using_mono_debugger ())
6274 cfg->keep_cil_nops = TRUE;
6276 if (sig->is_inflated)
6277 generic_context = mono_method_get_context (method);
6278 else if (generic_container)
6279 generic_context = &generic_container->context;
6280 cfg->generic_context = generic_context;
6282 if (!cfg->generic_sharing_context)
6283 g_assert (!sig->has_type_parameters);
6285 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
6286 g_assert (method->is_inflated);
6287 g_assert (mono_method_get_context (method)->method_inst);
6289 if (method->is_inflated && mono_method_get_context (method)->method_inst)
6290 g_assert (sig->generic_param_count);
6292 if (cfg->method == method) {
6293 cfg->real_offset = 0;
6295 cfg->real_offset = inline_offset;
6298 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
6299 cfg->cil_offset_to_bb_len = header->code_size;
6301 cfg->current_method = method;
6303 if (cfg->verbose_level > 2)
6304 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
6306 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
6308 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
6309 for (n = 0; n < sig->param_count; ++n)
6310 param_types [n + sig->hasthis] = sig->params [n];
6311 cfg->arg_types = param_types;
6313 dont_inline = g_list_prepend (dont_inline, method);
6314 if (cfg->method == method) {
6316 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
6317 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
6320 NEW_BBLOCK (cfg, start_bblock);
6321 cfg->bb_entry = start_bblock;
6322 start_bblock->cil_code = NULL;
6323 start_bblock->cil_length = 0;
6324 #if defined(__native_client_codegen__)
6325 MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
6326 ins->dreg = alloc_dreg (cfg, STACK_I4);
6327 MONO_ADD_INS (start_bblock, ins);
6331 NEW_BBLOCK (cfg, end_bblock);
6332 cfg->bb_exit = end_bblock;
6333 end_bblock->cil_code = NULL;
6334 end_bblock->cil_length = 0;
6335 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
6336 g_assert (cfg->num_bblocks == 2);
6338 arg_array = cfg->args;
6340 if (header->num_clauses) {
6341 cfg->spvars = g_hash_table_new (NULL, NULL);
6342 cfg->exvars = g_hash_table_new (NULL, NULL);
6344 /* handle exception clauses */
6345 for (i = 0; i < header->num_clauses; ++i) {
6346 MonoBasicBlock *try_bb;
6347 MonoExceptionClause *clause = &header->clauses [i];
6348 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
6349 try_bb->real_offset = clause->try_offset;
6350 try_bb->try_start = TRUE;
6351 try_bb->region = ((i + 1) << 8) | clause->flags;
6352 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
6353 tblock->real_offset = clause->handler_offset;
6354 tblock->flags |= BB_EXCEPTION_HANDLER;
6356 link_bblock (cfg, try_bb, tblock);
6358 if (*(ip + clause->handler_offset) == CEE_POP)
6359 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
6361 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
6362 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
6363 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
6364 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
6365 MONO_ADD_INS (tblock, ins);
6367 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) {
6368 /* finally clauses already have a seq point */
6369 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
6370 MONO_ADD_INS (tblock, ins);
6373 /* todo: is a fault block unsafe to optimize? */
6374 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
6375 tblock->flags |= BB_EXCEPTION_UNSAFE;
6379 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
6381 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
6383 /* catch and filter blocks get the exception object on the stack */
6384 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
6385 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
6386 MonoInst *dummy_use;
6388 /* mostly like handle_stack_args (), but just sets the input args */
6389 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
6390 tblock->in_scount = 1;
6391 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
6392 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
6395 * Add a dummy use for the exvar so its liveness info will be
6399 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
6401 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
6402 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
6403 tblock->flags |= BB_EXCEPTION_HANDLER;
6404 tblock->real_offset = clause->data.filter_offset;
6405 tblock->in_scount = 1;
6406 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
6407 /* The filter block shares the exvar with the handler block */
6408 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
6409 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
6410 MONO_ADD_INS (tblock, ins);
6414 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
6415 clause->data.catch_class &&
6416 cfg->generic_sharing_context &&
6417 mono_class_check_context_used (clause->data.catch_class)) {
6419 * In shared generic code with catch
6420 * clauses containing type variables
6421 * the exception handling code has to
6422 * be able to get to the rgctx.
6423 * Therefore we have to make sure that
6424 * the vtable/mrgctx argument (for
6425 * static or generic methods) or the
6426 * "this" argument (for non-static
6427 * methods) are live.
6429 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
6430 mini_method_get_context (method)->method_inst ||
6431 method->klass->valuetype) {
6432 mono_get_vtable_var (cfg);
6434 MonoInst *dummy_use;
6436 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
6441 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
6442 cfg->cbb = start_bblock;
6443 cfg->args = arg_array;
6444 mono_save_args (cfg, sig, inline_args);
6447 /* FIRST CODE BLOCK */
6448 NEW_BBLOCK (cfg, bblock);
6449 bblock->cil_code = ip;
6453 ADD_BBLOCK (cfg, bblock);
6455 if (cfg->method == method) {
6456 breakpoint_id = mono_debugger_method_has_breakpoint (method);
6457 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
6458 MONO_INST_NEW (cfg, ins, OP_BREAK);
6459 MONO_ADD_INS (bblock, ins);
6463 if (mono_security_cas_enabled ())
6464 secman = mono_security_manager_get_methods ();
6466 security = (secman && mono_security_method_has_declsec (method));
6467 /* at this point having security doesn't mean we have any code to generate */
6468 if (security && (cfg->method == method)) {
6469 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
6470 * And we do not want to enter the next section (with allocation) if we
6471 * have nothing to generate */
6472 security = mono_declsec_get_demands (method, &actions);
6475 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
6476 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
6478 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
6479 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6480 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
6482 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
6483 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
6487 mono_custom_attrs_free (custom);
6490 custom = mono_custom_attrs_from_class (wrapped->klass);
6491 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
6495 mono_custom_attrs_free (custom);
6498 /* not a P/Invoke after all */
6503 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
6504 /* we use a separate basic block for the initialization code */
6505 NEW_BBLOCK (cfg, init_localsbb);
6506 cfg->bb_init = init_localsbb;
6507 init_localsbb->real_offset = cfg->real_offset;
6508 start_bblock->next_bb = init_localsbb;
6509 init_localsbb->next_bb = bblock;
6510 link_bblock (cfg, start_bblock, init_localsbb);
6511 link_bblock (cfg, init_localsbb, bblock);
6513 cfg->cbb = init_localsbb;
6515 start_bblock->next_bb = bblock;
6516 link_bblock (cfg, start_bblock, bblock);
6519 /* at this point we know, if security is TRUE, that some code needs to be generated */
6520 if (security && (cfg->method == method)) {
6523 cfg->stat_cas_demand_generation++;
6525 if (actions.demand.blob) {
6526 /* Add code for SecurityAction.Demand */
6527 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
6528 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
6529 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
6530 mono_emit_method_call (cfg, secman->demand, args, NULL);
6532 if (actions.noncasdemand.blob) {
6533 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
6534 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
6535 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
6536 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
6537 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
6538 mono_emit_method_call (cfg, secman->demand, args, NULL);
6540 if (actions.demandchoice.blob) {
6541 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
6542 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
6543 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
6544 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
6545 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
6549 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
6551 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
6554 if (mono_security_core_clr_enabled ()) {
6555 /* check if this is native code, e.g. an icall or a p/invoke */
6556 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
6557 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
6559 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
6560 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
6562 /* if this ia a native call then it can only be JITted from platform code */
6563 if ((icall || pinvk) && method->klass && method->klass->image) {
6564 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
6565 MonoException *ex = icall ? mono_get_exception_security () :
6566 mono_get_exception_method_access ();
6567 emit_throw_exception (cfg, ex);
6574 if (header->code_size == 0)
6577 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
6582 if (cfg->method == method)
6583 mono_debug_init_method (cfg, bblock, breakpoint_id);
6585 for (n = 0; n < header->num_locals; ++n) {
6586 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
6591 /* We force the vtable variable here for all shared methods
6592 for the possibility that they might show up in a stack
6593 trace where their exact instantiation is needed. */
6594 if (cfg->generic_sharing_context && method == cfg->method) {
6595 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
6596 mini_method_get_context (method)->method_inst ||
6597 method->klass->valuetype) {
6598 mono_get_vtable_var (cfg);
6600 /* FIXME: Is there a better way to do this?
6601 We need the variable live for the duration
6602 of the whole method. */
6603 cfg->args [0]->flags |= MONO_INST_INDIRECT;
6607 /* add a check for this != NULL to inlined methods */
6608 if (is_virtual_call) {
6611 NEW_ARGLOAD (cfg, arg_ins, 0);
6612 MONO_ADD_INS (cfg->cbb, arg_ins);
6613 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
6616 skip_dead_blocks = !dont_verify;
6617 if (skip_dead_blocks) {
6618 original_bb = bb = mono_basic_block_split (method, &error);
6619 if (!mono_error_ok (&error)) {
6620 mono_error_cleanup (&error);
6626 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
6627 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
6630 start_new_bblock = 0;
6633 if (cfg->method == method)
6634 cfg->real_offset = ip - header->code;
6636 cfg->real_offset = inline_offset;
6641 if (start_new_bblock) {
6642 bblock->cil_length = ip - bblock->cil_code;
6643 if (start_new_bblock == 2) {
6644 g_assert (ip == tblock->cil_code);
6646 GET_BBLOCK (cfg, tblock, ip);
6648 bblock->next_bb = tblock;
6651 start_new_bblock = 0;
6652 for (i = 0; i < bblock->in_scount; ++i) {
6653 if (cfg->verbose_level > 3)
6654 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
6655 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
6659 g_slist_free (class_inits);
6662 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
6663 link_bblock (cfg, bblock, tblock);
6664 if (sp != stack_start) {
6665 handle_stack_args (cfg, stack_start, sp - stack_start);
6667 CHECK_UNVERIFIABLE (cfg);
6669 bblock->next_bb = tblock;
6672 for (i = 0; i < bblock->in_scount; ++i) {
6673 if (cfg->verbose_level > 3)
6674 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
6675 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
6678 g_slist_free (class_inits);
6683 if (skip_dead_blocks) {
6684 int ip_offset = ip - header->code;
6686 if (ip_offset == bb->end)
6690 int op_size = mono_opcode_size (ip, end);
6691 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
6693 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
6695 if (ip_offset + op_size == bb->end) {
6696 MONO_INST_NEW (cfg, ins, OP_NOP);
6697 MONO_ADD_INS (bblock, ins);
6698 start_new_bblock = 1;
6706 * Sequence points are points where the debugger can place a breakpoint.
6707 * Currently, we generate these automatically at points where the IL
6710 if (seq_points && ((sp == stack_start) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
6712 * Make methods interruptable at the beginning, and at the targets of
6713 * backward branches.
6714 * Also, do this at the start of every bblock in methods with clauses too,
6715 * to be able to handle instructions with inprecise control flow like
6717 * Backward branches are handled at the end of method-to-ir ().
6719 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
6721 /* Avoid sequence points on empty IL like .volatile */
6722 // FIXME: Enable this
6723 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
6724 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
6725 MONO_ADD_INS (cfg->cbb, ins);
6728 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
6731 bblock->real_offset = cfg->real_offset;
6733 if ((cfg->method == method) && cfg->coverage_info) {
6734 guint32 cil_offset = ip - header->code;
6735 cfg->coverage_info->data [cil_offset].cil_code = ip;
6737 /* TODO: Use an increment here */
6738 #if defined(TARGET_X86)
6739 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
6740 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
6742 MONO_ADD_INS (cfg->cbb, ins);
6744 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
6745 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
6749 if (cfg->verbose_level > 3)
6750 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
6754 if (seq_points && !sym_seq_points && sp != stack_start) {
6756 * The C# compiler uses these nops to notify the JIT that it should
6757 * insert seq points.
6759 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
6760 MONO_ADD_INS (cfg->cbb, ins);
6762 if (cfg->keep_cil_nops)
6763 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
6765 MONO_INST_NEW (cfg, ins, OP_NOP);
6767 MONO_ADD_INS (bblock, ins);
6770 if (should_insert_brekpoint (cfg->method)) {
6771 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6773 MONO_INST_NEW (cfg, ins, OP_NOP);
6776 MONO_ADD_INS (bblock, ins);
6782 CHECK_STACK_OVF (1);
6783 n = (*ip)-CEE_LDARG_0;
6785 EMIT_NEW_ARGLOAD (cfg, ins, n);
6793 CHECK_STACK_OVF (1);
6794 n = (*ip)-CEE_LDLOC_0;
6796 EMIT_NEW_LOCLOAD (cfg, ins, n);
6805 n = (*ip)-CEE_STLOC_0;
6808 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
6810 emit_stloc_ir (cfg, sp, header, n);
6817 CHECK_STACK_OVF (1);
6820 EMIT_NEW_ARGLOAD (cfg, ins, n);
6826 CHECK_STACK_OVF (1);
6829 NEW_ARGLOADA (cfg, ins, n);
6830 MONO_ADD_INS (cfg->cbb, ins);
6840 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
6842 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
6847 CHECK_STACK_OVF (1);
6850 EMIT_NEW_LOCLOAD (cfg, ins, n);
6854 case CEE_LDLOCA_S: {
6855 unsigned char *tmp_ip;
6857 CHECK_STACK_OVF (1);
6858 CHECK_LOCAL (ip [1]);
6860 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
6866 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
6875 CHECK_LOCAL (ip [1]);
6876 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
6878 emit_stloc_ir (cfg, sp, header, ip [1]);
6883 CHECK_STACK_OVF (1);
6884 EMIT_NEW_PCONST (cfg, ins, NULL);
6885 ins->type = STACK_OBJ;
6890 CHECK_STACK_OVF (1);
6891 EMIT_NEW_ICONST (cfg, ins, -1);
6904 CHECK_STACK_OVF (1);
6905 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
6911 CHECK_STACK_OVF (1);
6913 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
6919 CHECK_STACK_OVF (1);
6920 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
6926 CHECK_STACK_OVF (1);
6927 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6928 ins->type = STACK_I8;
6929 ins->dreg = alloc_dreg (cfg, STACK_I8);
6931 ins->inst_l = (gint64)read64 (ip);
6932 MONO_ADD_INS (bblock, ins);
6938 gboolean use_aotconst = FALSE;
6940 #ifdef TARGET_POWERPC
6941 /* FIXME: Clean this up */
6942 if (cfg->compile_aot)
6943 use_aotconst = TRUE;
6946 /* FIXME: we should really allocate this only late in the compilation process */
6947 f = mono_domain_alloc (cfg->domain, sizeof (float));
6949 CHECK_STACK_OVF (1);
6955 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
6957 dreg = alloc_freg (cfg);
6958 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
6959 ins->type = STACK_R8;
6961 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6962 ins->type = STACK_R8;
6963 ins->dreg = alloc_dreg (cfg, STACK_R8);
6965 MONO_ADD_INS (bblock, ins);
6975 gboolean use_aotconst = FALSE;
6977 #ifdef TARGET_POWERPC
6978 /* FIXME: Clean this up */
6979 if (cfg->compile_aot)
6980 use_aotconst = TRUE;
6983 /* FIXME: we should really allocate this only late in the compilation process */
6984 d = mono_domain_alloc (cfg->domain, sizeof (double));
6986 CHECK_STACK_OVF (1);
6992 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
6994 dreg = alloc_freg (cfg);
6995 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
6996 ins->type = STACK_R8;
6998 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6999 ins->type = STACK_R8;
7000 ins->dreg = alloc_dreg (cfg, STACK_R8);
7002 MONO_ADD_INS (bblock, ins);
7011 MonoInst *temp, *store;
7013 CHECK_STACK_OVF (1);
7017 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
7018 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
7020 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
7023 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
7036 if (sp [0]->type == STACK_R8)
7037 /* we need to pop the value from the x86 FP stack */
7038 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
7044 INLINE_FAILURE ("jmp");
7045 GSHAREDVT_FAILURE (*ip);
7048 if (stack_start != sp)
7050 token = read32 (ip + 1);
7051 /* FIXME: check the signature matches */
7052 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7054 if (!cmethod || mono_loader_get_last_error ())
7057 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
7058 GENERIC_SHARING_FAILURE (CEE_JMP);
7060 if (mono_security_cas_enabled ())
7061 CHECK_CFG_EXCEPTION;
7063 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
7065 MonoMethodSignature *fsig = mono_method_signature (cmethod);
7068 /* Handle tail calls similarly to calls */
7069 n = fsig->param_count + fsig->hasthis;
7071 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
7072 call->method = cmethod;
7073 call->tail_call = TRUE;
7074 call->signature = mono_method_signature (cmethod);
7075 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
7076 call->inst.inst_p0 = cmethod;
7077 for (i = 0; i < n; ++i)
7078 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
7080 mono_arch_emit_call (cfg, call);
7081 MONO_ADD_INS (bblock, (MonoInst*)call);
7084 for (i = 0; i < num_args; ++i)
7085 /* Prevent arguments from being optimized away */
7086 arg_array [i]->flags |= MONO_INST_VOLATILE;
7088 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
7089 ins = (MonoInst*)call;
7090 ins->inst_p0 = cmethod;
7091 MONO_ADD_INS (bblock, ins);
7095 start_new_bblock = 1;
7100 case CEE_CALLVIRT: {
7101 MonoInst *addr = NULL;
7102 MonoMethodSignature *fsig = NULL;
7104 int virtual = *ip == CEE_CALLVIRT;
7105 int calli = *ip == CEE_CALLI;
7106 gboolean pass_imt_from_rgctx = FALSE;
7107 MonoInst *imt_arg = NULL;
7108 MonoInst *keep_this_alive = NULL;
7109 gboolean pass_vtable = FALSE;
7110 gboolean pass_mrgctx = FALSE;
7111 MonoInst *vtable_arg = NULL;
7112 gboolean check_this = FALSE;
7113 gboolean supported_tail_call = FALSE;
7114 gboolean need_seq_point = FALSE;
7115 guint32 call_opcode = *ip;
7116 gboolean emit_widen = TRUE;
7117 gboolean push_res = TRUE;
7118 gboolean skip_ret = FALSE;
7119 gboolean delegate_invoke = FALSE;
7122 token = read32 (ip + 1);
7127 //GSHAREDVT_FAILURE (*ip);
7132 fsig = mini_get_signature (method, token, generic_context);
7133 n = fsig->param_count + fsig->hasthis;
7135 if (method->dynamic && fsig->pinvoke) {
7139 * This is a call through a function pointer using a pinvoke
7140 * signature. Have to create a wrapper and call that instead.
7141 * FIXME: This is very slow, need to create a wrapper at JIT time
7142 * instead based on the signature.
7144 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
7145 EMIT_NEW_PCONST (cfg, args [1], fsig);
7147 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
7150 MonoMethod *cil_method;
7152 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7153 cil_method = cmethod;
7155 if (constrained_call) {
7156 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7157 if (cfg->verbose_level > 2)
7158 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_call));
7159 if (!((constrained_call->byval_arg.type == MONO_TYPE_VAR ||
7160 constrained_call->byval_arg.type == MONO_TYPE_MVAR) &&
7161 cfg->generic_sharing_context)) {
7162 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_call, generic_context);
7165 if (cfg->verbose_level > 2)
7166 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_call));
7168 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
7170 * This is needed since get_method_constrained can't find
7171 * the method in klass representing a type var.
7172 * The type var is guaranteed to be a reference type in this
7175 if (!mini_is_gsharedvt_klass (cfg, constrained_call))
7176 g_assert (!cmethod->klass->valuetype);
7178 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
7183 if (!cmethod || mono_loader_get_last_error ())
7185 if (!dont_verify && !cfg->skip_visibility) {
7186 MonoMethod *target_method = cil_method;
7187 if (method->is_inflated) {
7188 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
7190 if (!mono_method_can_access_method (method_definition, target_method) &&
7191 !mono_method_can_access_method (method, cil_method))
7192 METHOD_ACCESS_FAILURE;
7195 if (mono_security_core_clr_enabled ())
7196 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
7198 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
7199 /* MS.NET seems to silently convert this to a callvirt */
7204 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
7205 * converts to a callvirt.
7207 * tests/bug-515884.il is an example of this behavior
7209 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
7210 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
7211 if (!virtual && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
7215 if (!cmethod->klass->inited)
7216 if (!mono_class_init (cmethod->klass))
7217 TYPE_LOAD_ERROR (cmethod->klass);
7219 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
7220 mini_class_is_system_array (cmethod->klass)) {
7221 array_rank = cmethod->klass->rank;
7222 fsig = mono_method_signature (cmethod);
7224 fsig = mono_method_signature (cmethod);
7229 if (fsig->pinvoke) {
7230 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
7231 check_for_pending_exc, FALSE);
7232 fsig = mono_method_signature (wrapper);
7233 } else if (constrained_call) {
7234 fsig = mono_method_signature (cmethod);
7236 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
7240 mono_save_token_info (cfg, image, token, cil_method);
7242 if (!MONO_TYPE_IS_VOID (fsig->ret) && !sym_seq_points) {
7244 * Need to emit an implicit seq point after every non-void call so single stepping through nested calls like
7245 * foo (bar (), baz ())
7246 * works correctly. MS does this also:
7247 * http://stackoverflow.com/questions/6937198/making-your-net-language-step-correctly-in-the-debugger
7248 * The problem with this approach is that the debugger will stop after all calls returning a value,
7249 * even for simple cases, like:
7252 /* Special case a few common successor opcodes */
7253 if (!(ip + 5 < end && ip [5] == CEE_POP))
7254 need_seq_point = TRUE;
7257 n = fsig->param_count + fsig->hasthis;
7259 /* Don't support calls made using type arguments for now */
7261 if (cfg->gsharedvt) {
7262 if (mini_is_gsharedvt_signature (cfg, fsig))
7263 GSHAREDVT_FAILURE (*ip);
7267 if (mono_security_cas_enabled ()) {
7268 if (check_linkdemand (cfg, method, cmethod))
7269 INLINE_FAILURE ("linkdemand");
7270 CHECK_CFG_EXCEPTION;
7273 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
7274 g_assert_not_reached ();
7277 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
7280 if (!cfg->generic_sharing_context && cmethod)
7281 g_assert (!mono_method_check_context_used (cmethod));
7285 //g_assert (!virtual || fsig->hasthis);
7289 if (constrained_call) {
7290 if (mini_is_gsharedvt_klass (cfg, constrained_call)) {
7292 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
7294 /* Special case Object:ToString () as its easy to implement */
7295 if (cmethod->klass == mono_defaults.object_class && !strcmp (cmethod->name, "ToString")) {
7299 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
7300 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_call), constrained_call, MONO_RGCTX_INFO_KLASS);
7301 ins = mono_emit_jit_icall (cfg, mono_object_tostring_gsharedvt, args);
7303 } else if (cmethod->klass == mono_defaults.object_class && !strcmp (cmethod->name, "GetHashCode")) {
7307 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
7308 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_call), constrained_call, MONO_RGCTX_INFO_KLASS);
7309 ins = mono_emit_jit_icall (cfg, mono_object_gethashcode_gsharedvt, args);
7311 } else if (constrained_call->valuetype && cmethod->klass->valuetype) {
7312 /* The 'Own method' case below */
7314 GSHAREDVT_FAILURE (*ip);
7318 * We have the `constrained.' prefix opcode.
7320 if (constrained_call->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
7322 * The type parameter is instantiated as a valuetype,
7323 * but that type doesn't override the method we're
7324 * calling, so we need to box `this'.
7326 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
7327 ins->klass = constrained_call;
7328 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call));
7330 CHECK_CFG_EXCEPTION;
7331 } else if (!constrained_call->valuetype) {
7332 int dreg = alloc_ireg_ref (cfg);
7335 * The type parameter is instantiated as a reference
7336 * type. We have a managed pointer on the stack, so
7337 * we need to dereference it here.
7339 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
7340 ins->type = STACK_OBJ;
7343 if (cmethod->klass->valuetype) {
7346 /* Interface method */
7349 mono_class_setup_vtable (constrained_call);
7350 CHECK_TYPELOAD (constrained_call);
7351 ioffset = mono_class_interface_offset (constrained_call, cmethod->klass);
7353 TYPE_LOAD_ERROR (constrained_call);
7354 slot = mono_method_get_vtable_slot (cmethod);
7356 TYPE_LOAD_ERROR (cmethod->klass);
7357 cmethod = constrained_call->vtable [ioffset + slot];
7359 if (cmethod->klass == mono_defaults.enum_class) {
7360 /* Enum implements some interfaces, so treat this as the first case */
7361 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
7362 ins->klass = constrained_call;
7363 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call));
7365 CHECK_CFG_EXCEPTION;
7370 constrained_call = NULL;
7373 if (!calli && check_call_signature (cfg, fsig, sp))
7376 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
7377 if (cmethod && (cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
7378 delegate_invoke = TRUE;
7381 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
7383 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7384 type_to_eval_stack_type ((cfg), fsig->ret, ins);
7392 * If the callee is a shared method, then its static cctor
7393 * might not get called after the call was patched.
7395 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
7396 emit_generic_class_init (cfg, cmethod->klass);
7397 CHECK_TYPELOAD (cmethod->klass);
7400 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
7401 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
7402 gboolean sharable = FALSE;
7404 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
7407 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
7408 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
7409 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
7411 sharable = sharing_enabled && context_sharable;
7415 * Pass vtable iff target method might
7416 * be shared, which means that sharing
7417 * is enabled for its class and its
7418 * context is sharable (and it's not a
7421 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
7425 if (cmethod && mini_method_get_context (cmethod) &&
7426 mini_method_get_context (cmethod)->method_inst) {
7427 g_assert (!pass_vtable);
7429 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
7432 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
7433 MonoGenericContext *context = mini_method_get_context (cmethod);
7434 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
7436 if (sharing_enabled && context_sharable)
7438 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
7443 if (cfg->generic_sharing_context && cmethod) {
7444 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
7446 context_used = mini_method_check_context_used (cfg, cmethod);
7448 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7449 /* Generic method interface
7450 calls are resolved via a
7451 helper function and don't
7453 if (!cmethod_context || !cmethod_context->method_inst)
7454 pass_imt_from_rgctx = TRUE;
7458 * If a shared method calls another
7459 * shared method then the caller must
7460 * have a generic sharing context
7461 * because the magic trampoline
7462 * requires it. FIXME: We shouldn't
7463 * have to force the vtable/mrgctx
7464 * variable here. Instead there
7465 * should be a flag in the cfg to
7466 * request a generic sharing context.
7469 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
7470 mono_get_vtable_var (cfg);
7475 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7477 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7479 CHECK_TYPELOAD (cmethod->klass);
7480 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7485 g_assert (!vtable_arg);
7487 if (!cfg->compile_aot) {
7489 * emit_get_rgctx_method () calls mono_class_vtable () so check
7490 * for type load errors before.
7492 mono_class_setup_vtable (cmethod->klass);
7493 CHECK_TYPELOAD (cmethod->klass);
7496 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7498 /* !marshalbyref is needed to properly handle generic methods + remoting */
7499 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
7500 MONO_METHOD_IS_FINAL (cmethod)) &&
7501 !mono_class_is_marshalbyref (cmethod->klass)) {
7508 if (pass_imt_from_rgctx) {
7509 g_assert (!pass_vtable);
7512 imt_arg = emit_get_rgctx_method (cfg, context_used,
7513 cmethod, MONO_RGCTX_INFO_METHOD);
7517 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
7519 /* Calling virtual generic methods */
7520 if (cmethod && virtual &&
7521 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
7522 !(MONO_METHOD_IS_FINAL (cmethod) &&
7523 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
7524 fsig->generic_param_count &&
7525 !(cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))) {
7526 MonoInst *this_temp, *this_arg_temp, *store;
7527 MonoInst *iargs [4];
7528 gboolean use_imt = FALSE;
7530 g_assert (fsig->is_inflated);
7532 /* Prevent inlining of methods that contain indirect calls */
7533 INLINE_FAILURE ("virtual generic call");
7535 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
7536 GSHAREDVT_FAILURE (*ip);
7538 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
7539 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt)
7544 g_assert (!imt_arg);
7546 g_assert (cmethod->is_inflated);
7547 imt_arg = emit_get_rgctx_method (cfg, context_used,
7548 cmethod, MONO_RGCTX_INFO_METHOD);
7549 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg, NULL);
7551 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
7552 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
7553 MONO_ADD_INS (bblock, store);
7555 /* FIXME: This should be a managed pointer */
7556 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7558 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
7559 iargs [1] = emit_get_rgctx_method (cfg, context_used,
7560 cmethod, MONO_RGCTX_INFO_METHOD);
7561 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
7562 addr = mono_emit_jit_icall (cfg,
7563 mono_helper_compile_generic_method, iargs);
7565 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
7567 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
7574 * Implement a workaround for the inherent races involved in locking:
7580 * If a thread abort happens between the call to Monitor.Enter () and the start of the
7581 * try block, the Exit () won't be executed, see:
7582 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
7583 * To work around this, we extend such try blocks to include the last x bytes
7584 * of the Monitor.Enter () call.
7586 if (cmethod && cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
7587 MonoBasicBlock *tbb;
7589 GET_BBLOCK (cfg, tbb, ip + 5);
7591 * Only extend try blocks with a finally, to avoid catching exceptions thrown
7592 * from Monitor.Enter like ArgumentNullException.
7594 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
7595 /* Mark this bblock as needing to be extended */
7596 tbb->extend_try_block = TRUE;
7600 /* Conversion to a JIT intrinsic */
7601 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
7603 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7604 type_to_eval_stack_type ((cfg), fsig->ret, ins);
7611 if (cmethod && (cfg->opt & MONO_OPT_INLINE) &&
7612 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
7613 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
7614 !g_list_find (dont_inline, cmethod)) {
7616 gboolean always = FALSE;
7618 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
7619 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
7620 /* Prevent inlining of methods that call wrappers */
7621 INLINE_FAILURE ("wrapper call");
7622 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
7626 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, always);
7628 cfg->real_offset += 5;
7631 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7632 /* *sp is already set by inline_method */
7637 inline_costs += costs;
7643 /* Tail recursion elimination */
7644 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
7645 gboolean has_vtargs = FALSE;
7648 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
7649 INLINE_FAILURE ("tail call");
7651 /* keep it simple */
7652 for (i = fsig->param_count - 1; i >= 0; i--) {
7653 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
7658 for (i = 0; i < n; ++i)
7659 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
7660 MONO_INST_NEW (cfg, ins, OP_BR);
7661 MONO_ADD_INS (bblock, ins);
7662 tblock = start_bblock->out_bb [0];
7663 link_bblock (cfg, bblock, tblock);
7664 ins->inst_target_bb = tblock;
7665 start_new_bblock = 1;
7667 /* skip the CEE_RET, too */
7668 if (ip_in_bb (cfg, bblock, ip + 5))
7675 inline_costs += 10 * num_calls++;
7678 * Making generic calls out of gsharedvt methods.
7680 if (cmethod && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
7681 MonoRgctxInfoType info_type;
7684 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
7685 //GSHAREDVT_FAILURE (*ip);
7686 // disable for possible remoting calls
7687 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
7688 GSHAREDVT_FAILURE (*ip);
7689 if (fsig->generic_param_count) {
7690 /* virtual generic call */
7691 g_assert (mono_use_imt);
7692 g_assert (!imt_arg);
7693 /* Same as the virtual generic case above */
7694 imt_arg = emit_get_rgctx_method (cfg, context_used,
7695 cmethod, MONO_RGCTX_INFO_METHOD);
7696 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
7701 if (cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)
7702 /* test_0_multi_dim_arrays () in gshared.cs */
7703 GSHAREDVT_FAILURE (*ip);
7705 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
7706 keep_this_alive = sp [0];
7708 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
7709 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
7711 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
7712 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
7714 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
7716 } else if (calli && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
7718 * We pass the address to the gsharedvt trampoline in the rgctx reg
7720 MonoInst *callee = addr;
7722 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
7724 GSHAREDVT_FAILURE (*ip);
7726 addr = emit_get_rgctx_sig (cfg, context_used,
7727 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
7728 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
7732 /* Generic sharing */
7733 /* FIXME: only do this for generic methods if
7734 they are not shared! */
7735 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
7736 (!mono_method_is_generic_sharable (cmethod, TRUE) ||
7737 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
7738 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
7739 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
7740 INLINE_FAILURE ("gshared");
7742 g_assert (cfg->generic_sharing_context && cmethod);
7746 * We are compiling a call to a
7747 * generic method from shared code,
7748 * which means that we have to look up
7749 * the method in the rgctx and do an
7753 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
7755 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7756 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
7760 /* Indirect calls */
7762 if (call_opcode == CEE_CALL)
7763 g_assert (context_used);
7764 else if (call_opcode == CEE_CALLI)
7765 g_assert (!vtable_arg);
7767 /* FIXME: what the hell is this??? */
7768 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
7769 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
7771 /* Prevent inlining of methods with indirect calls */
7772 INLINE_FAILURE ("indirect call");
7774 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
7779 * Instead of emitting an indirect call, emit a direct call
7780 * with the contents of the aotconst as the patch info.
7782 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
7783 info_type = addr->inst_c1;
7784 info_data = addr->inst_p0;
7786 info_type = addr->inst_right->inst_c1;
7787 info_data = addr->inst_right->inst_left;
7790 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
7791 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
7796 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
7804 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
7805 MonoInst *val = sp [fsig->param_count];
7807 if (val->type == STACK_OBJ) {
7808 MonoInst *iargs [2];
7813 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
7816 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
7817 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
7818 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
7819 emit_write_barrier (cfg, addr, val, 0);
7820 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
7821 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
7823 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
7824 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
7825 if (!cmethod->klass->element_class->valuetype && !readonly)
7826 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
7827 CHECK_TYPELOAD (cmethod->klass);
7830 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
7833 g_assert_not_reached ();
7840 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
7844 /* Tail prefix / tail call optimization */
7846 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
7847 /* FIXME: runtime generic context pointer for jumps? */
7848 /* FIXME: handle this for generic sharing eventually */
7850 ((((ins_flag & MONO_INST_TAILCALL) && (call_opcode == CEE_CALL))
7851 ))//|| ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && ip [5] == CEE_RET))
7852 && !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig))
7853 supported_tail_call = TRUE;
7854 if (supported_tail_call) {
7857 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
7858 INLINE_FAILURE ("tail call");
7860 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7862 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
7863 /* Handle tail calls similarly to calls */
7864 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE, FALSE, FALSE);
7866 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
7867 call->tail_call = TRUE;
7868 call->method = cmethod;
7869 call->signature = mono_method_signature (cmethod);
7872 * We implement tail calls by storing the actual arguments into the
7873 * argument variables, then emitting a CEE_JMP.
7875 for (i = 0; i < n; ++i) {
7876 /* Prevent argument from being register allocated */
7877 arg_array [i]->flags |= MONO_INST_VOLATILE;
7878 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
7882 ins = (MonoInst*)call;
7883 ins->inst_p0 = cmethod;
7884 ins->inst_p1 = arg_array [0];
7885 MONO_ADD_INS (bblock, ins);
7886 link_bblock (cfg, bblock, end_bblock);
7887 start_new_bblock = 1;
7889 // FIXME: Eliminate unreachable epilogs
7892 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
7893 * only reachable from this call.
7895 GET_BBLOCK (cfg, tblock, ip + 5);
7896 if (tblock == bblock || tblock->in_count == 0)
7904 * Synchronized wrappers.
7905 * Its hard to determine where to replace a method with its synchronized
7906 * wrapper without causing an infinite recursion. The current solution is
7907 * to add the synchronized wrapper in the trampolines, and to
7908 * change the called method to a dummy wrapper, and resolve that wrapper
7909 * to the real method in mono_jit_compile_method ().
7911 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED && mono_marshal_method_from_wrapper (cfg->method) == cmethod)
7912 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
7915 INLINE_FAILURE ("call");
7916 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
7917 imt_arg, vtable_arg);
7921 /* End of call, INS should contain the result of the call, if any */
7923 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
7926 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7931 if (keep_this_alive) {
7932 MonoInst *dummy_use;
7934 /* See mono_emit_method_call_full () */
7935 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
7938 CHECK_CFG_EXCEPTION;
7942 g_assert (*ip == CEE_RET);
7946 constrained_call = NULL;
7948 emit_seq_point (cfg, method, ip, FALSE);
7952 if (cfg->method != method) {
7953 /* return from inlined method */
7955 * If in_count == 0, that means the ret is unreachable due to
7956 * being preceeded by a throw. In that case, inline_method () will
7957 * handle setting the return value
7958 * (test case: test_0_inline_throw ()).
7960 if (return_var && cfg->cbb->in_count) {
7961 MonoType *ret_type = mono_method_signature (method)->ret;
7967 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
7970 //g_assert (returnvar != -1);
7971 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
7972 cfg->ret_var_set = TRUE;
7976 MonoType *ret_type = mono_method_signature (method)->ret;
7978 if (seq_points && !sym_seq_points) {
7980 * Place a seq point here too even through the IL stack is not
7981 * empty, so a step over on
7984 * will work correctly.
7986 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
7987 MONO_ADD_INS (cfg->cbb, ins);
7990 g_assert (!return_var);
7994 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
7997 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
8000 if (!cfg->vret_addr) {
8003 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
8005 EMIT_NEW_RETLOADA (cfg, ret_addr);
8007 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
8008 ins->klass = mono_class_from_mono_type (ret_type);
8011 #ifdef MONO_ARCH_SOFT_FLOAT
8012 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
8013 MonoInst *iargs [1];
8017 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
8018 mono_arch_emit_setret (cfg, method, conv);
8020 mono_arch_emit_setret (cfg, method, *sp);
8023 mono_arch_emit_setret (cfg, method, *sp);
8028 if (sp != stack_start)
8030 MONO_INST_NEW (cfg, ins, OP_BR);
8032 ins->inst_target_bb = end_bblock;
8033 MONO_ADD_INS (bblock, ins);
8034 link_bblock (cfg, bblock, end_bblock);
8035 start_new_bblock = 1;
8039 MONO_INST_NEW (cfg, ins, OP_BR);
8041 target = ip + 1 + (signed char)(*ip);
8043 GET_BBLOCK (cfg, tblock, target);
8044 link_bblock (cfg, bblock, tblock);
8045 ins->inst_target_bb = tblock;
8046 if (sp != stack_start) {
8047 handle_stack_args (cfg, stack_start, sp - stack_start);
8049 CHECK_UNVERIFIABLE (cfg);
8051 MONO_ADD_INS (bblock, ins);
8052 start_new_bblock = 1;
8053 inline_costs += BRANCH_COST;
8067 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
8069 target = ip + 1 + *(signed char*)ip;
8075 inline_costs += BRANCH_COST;
8079 MONO_INST_NEW (cfg, ins, OP_BR);
8082 target = ip + 4 + (gint32)read32(ip);
8084 GET_BBLOCK (cfg, tblock, target);
8085 link_bblock (cfg, bblock, tblock);
8086 ins->inst_target_bb = tblock;
8087 if (sp != stack_start) {
8088 handle_stack_args (cfg, stack_start, sp - stack_start);
8090 CHECK_UNVERIFIABLE (cfg);
8093 MONO_ADD_INS (bblock, ins);
8095 start_new_bblock = 1;
8096 inline_costs += BRANCH_COST;
8103 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
8104 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
8105 guint32 opsize = is_short ? 1 : 4;
8107 CHECK_OPSIZE (opsize);
8109 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
8112 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
8117 GET_BBLOCK (cfg, tblock, target);
8118 link_bblock (cfg, bblock, tblock);
8119 GET_BBLOCK (cfg, tblock, ip);
8120 link_bblock (cfg, bblock, tblock);
8122 if (sp != stack_start) {
8123 handle_stack_args (cfg, stack_start, sp - stack_start);
8124 CHECK_UNVERIFIABLE (cfg);
8127 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
8128 cmp->sreg1 = sp [0]->dreg;
8129 type_from_op (cmp, sp [0], NULL);
8132 #if SIZEOF_REGISTER == 4
8133 if (cmp->opcode == OP_LCOMPARE_IMM) {
8134 /* Convert it to OP_LCOMPARE */
8135 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8136 ins->type = STACK_I8;
8137 ins->dreg = alloc_dreg (cfg, STACK_I8);
8139 MONO_ADD_INS (bblock, ins);
8140 cmp->opcode = OP_LCOMPARE;
8141 cmp->sreg2 = ins->dreg;
8144 MONO_ADD_INS (bblock, cmp);
8146 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
8147 type_from_op (ins, sp [0], NULL);
8148 MONO_ADD_INS (bblock, ins);
8149 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
8150 GET_BBLOCK (cfg, tblock, target);
8151 ins->inst_true_bb = tblock;
8152 GET_BBLOCK (cfg, tblock, ip);
8153 ins->inst_false_bb = tblock;
8154 start_new_bblock = 2;
8157 inline_costs += BRANCH_COST;
8172 MONO_INST_NEW (cfg, ins, *ip);
8174 target = ip + 4 + (gint32)read32(ip);
8180 inline_costs += BRANCH_COST;
8184 MonoBasicBlock **targets;
8185 MonoBasicBlock *default_bblock;
8186 MonoJumpInfoBBTable *table;
8187 int offset_reg = alloc_preg (cfg);
8188 int target_reg = alloc_preg (cfg);
8189 int table_reg = alloc_preg (cfg);
8190 int sum_reg = alloc_preg (cfg);
8191 gboolean use_op_switch;
8195 n = read32 (ip + 1);
8198 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
8202 CHECK_OPSIZE (n * sizeof (guint32));
8203 target = ip + n * sizeof (guint32);
8205 GET_BBLOCK (cfg, default_bblock, target);
8206 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
8208 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
8209 for (i = 0; i < n; ++i) {
8210 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
8211 targets [i] = tblock;
8212 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
8216 if (sp != stack_start) {
8218 * Link the current bb with the targets as well, so handle_stack_args
8219 * will set their in_stack correctly.
8221 link_bblock (cfg, bblock, default_bblock);
8222 for (i = 0; i < n; ++i)
8223 link_bblock (cfg, bblock, targets [i]);
8225 handle_stack_args (cfg, stack_start, sp - stack_start);
8227 CHECK_UNVERIFIABLE (cfg);
8230 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
8231 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
8234 for (i = 0; i < n; ++i)
8235 link_bblock (cfg, bblock, targets [i]);
8237 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
8238 table->table = targets;
8239 table->table_size = n;
8241 use_op_switch = FALSE;
8243 /* ARM implements SWITCH statements differently */
8244 /* FIXME: Make it use the generic implementation */
8245 if (!cfg->compile_aot)
8246 use_op_switch = TRUE;
8249 if (COMPILE_LLVM (cfg))
8250 use_op_switch = TRUE;
8252 cfg->cbb->has_jump_table = 1;
8254 if (use_op_switch) {
8255 MONO_INST_NEW (cfg, ins, OP_SWITCH);
8256 ins->sreg1 = src1->dreg;
8257 ins->inst_p0 = table;
8258 ins->inst_many_bb = targets;
8259 ins->klass = GUINT_TO_POINTER (n);
8260 MONO_ADD_INS (cfg->cbb, ins);
8262 if (sizeof (gpointer) == 8)
8263 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
8265 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
8267 #if SIZEOF_REGISTER == 8
8268 /* The upper word might not be zero, and we add it to a 64 bit address later */
8269 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
8272 if (cfg->compile_aot) {
8273 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
8275 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
8276 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
8277 ins->inst_p0 = table;
8278 ins->dreg = table_reg;
8279 MONO_ADD_INS (cfg->cbb, ins);
8282 /* FIXME: Use load_memindex */
8283 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
8284 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
8285 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
8287 start_new_bblock = 1;
8288 inline_costs += (BRANCH_COST * 2);
8308 dreg = alloc_freg (cfg);
8311 dreg = alloc_lreg (cfg);
8314 dreg = alloc_ireg_ref (cfg);
8317 dreg = alloc_preg (cfg);
8320 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
8321 ins->type = ldind_type [*ip - CEE_LDIND_I1];
8322 ins->flags |= ins_flag;
8324 MONO_ADD_INS (bblock, ins);
8326 if (ins->flags & MONO_INST_VOLATILE) {
8327 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
8328 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
8329 emit_memory_barrier (cfg, FullBarrier);
8344 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
8345 ins->flags |= ins_flag;
8348 if (ins->flags & MONO_INST_VOLATILE) {
8349 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
8350 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
8351 emit_memory_barrier (cfg, FullBarrier);
8354 MONO_ADD_INS (bblock, ins);
8356 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
8357 emit_write_barrier (cfg, sp [0], sp [1], -1);
8366 MONO_INST_NEW (cfg, ins, (*ip));
8368 ins->sreg1 = sp [0]->dreg;
8369 ins->sreg2 = sp [1]->dreg;
8370 type_from_op (ins, sp [0], sp [1]);
8372 ins->dreg = alloc_dreg ((cfg), (ins)->type);
8374 /* Use the immediate opcodes if possible */
8375 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
8376 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
8377 if (imm_opcode != -1) {
8378 ins->opcode = imm_opcode;
8379 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
8382 sp [1]->opcode = OP_NOP;
8386 MONO_ADD_INS ((cfg)->cbb, (ins));
8388 *sp++ = mono_decompose_opcode (cfg, ins);
8405 MONO_INST_NEW (cfg, ins, (*ip));
8407 ins->sreg1 = sp [0]->dreg;
8408 ins->sreg2 = sp [1]->dreg;
8409 type_from_op (ins, sp [0], sp [1]);
8411 ADD_WIDEN_OP (ins, sp [0], sp [1]);
8412 ins->dreg = alloc_dreg ((cfg), (ins)->type);
8414 /* FIXME: Pass opcode to is_inst_imm */
8416 /* Use the immediate opcodes if possible */
8417 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
8420 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
8421 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
8422 /* Keep emulated opcodes which are optimized away later */
8423 if ((ins->opcode == OP_IREM_UN || ins->opcode == OP_IDIV_UN_IMM) && (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) && sp [1]->opcode == OP_ICONST && mono_is_power_of_two (sp [1]->inst_c0) >= 0) {
8424 imm_opcode = mono_op_to_op_imm (ins->opcode);
8427 if (imm_opcode != -1) {
8428 ins->opcode = imm_opcode;
8429 if (sp [1]->opcode == OP_I8CONST) {
8430 #if SIZEOF_REGISTER == 8
8431 ins->inst_imm = sp [1]->inst_l;
8433 ins->inst_ls_word = sp [1]->inst_ls_word;
8434 ins->inst_ms_word = sp [1]->inst_ms_word;
8438 ins->inst_imm = (gssize)(sp [1]->inst_c0);
8441 /* Might be followed by an instruction added by ADD_WIDEN_OP */
8442 if (sp [1]->next == NULL)
8443 sp [1]->opcode = OP_NOP;
8446 MONO_ADD_INS ((cfg)->cbb, (ins));
8448 *sp++ = mono_decompose_opcode (cfg, ins);
8461 case CEE_CONV_OVF_I8:
8462 case CEE_CONV_OVF_U8:
8466 /* Special case this earlier so we have long constants in the IR */
8467 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
8468 int data = sp [-1]->inst_c0;
8469 sp [-1]->opcode = OP_I8CONST;
8470 sp [-1]->type = STACK_I8;
8471 #if SIZEOF_REGISTER == 8
8472 if ((*ip) == CEE_CONV_U8)
8473 sp [-1]->inst_c0 = (guint32)data;
8475 sp [-1]->inst_c0 = data;
8477 sp [-1]->inst_ls_word = data;
8478 if ((*ip) == CEE_CONV_U8)
8479 sp [-1]->inst_ms_word = 0;
8481 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
8483 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
8490 case CEE_CONV_OVF_I4:
8491 case CEE_CONV_OVF_I1:
8492 case CEE_CONV_OVF_I2:
8493 case CEE_CONV_OVF_I:
8494 case CEE_CONV_OVF_U:
8497 if (sp [-1]->type == STACK_R8) {
8498 ADD_UNOP (CEE_CONV_OVF_I8);
8505 case CEE_CONV_OVF_U1:
8506 case CEE_CONV_OVF_U2:
8507 case CEE_CONV_OVF_U4:
8510 if (sp [-1]->type == STACK_R8) {
8511 ADD_UNOP (CEE_CONV_OVF_U8);
8518 case CEE_CONV_OVF_I1_UN:
8519 case CEE_CONV_OVF_I2_UN:
8520 case CEE_CONV_OVF_I4_UN:
8521 case CEE_CONV_OVF_I8_UN:
8522 case CEE_CONV_OVF_U1_UN:
8523 case CEE_CONV_OVF_U2_UN:
8524 case CEE_CONV_OVF_U4_UN:
8525 case CEE_CONV_OVF_U8_UN:
8526 case CEE_CONV_OVF_I_UN:
8527 case CEE_CONV_OVF_U_UN:
8534 CHECK_CFG_EXCEPTION;
8538 case CEE_ADD_OVF_UN:
8540 case CEE_MUL_OVF_UN:
8542 case CEE_SUB_OVF_UN:
8548 GSHAREDVT_FAILURE (*ip);
8551 token = read32 (ip + 1);
8552 klass = mini_get_class (method, token, generic_context);
8553 CHECK_TYPELOAD (klass);
8555 if (generic_class_is_reference_type (cfg, klass)) {
8556 MonoInst *store, *load;
8557 int dreg = alloc_ireg_ref (cfg);
8559 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
8560 load->flags |= ins_flag;
8561 MONO_ADD_INS (cfg->cbb, load);
8563 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
8564 store->flags |= ins_flag;
8565 MONO_ADD_INS (cfg->cbb, store);
8567 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
8568 emit_write_barrier (cfg, sp [0], sp [1], -1);
8570 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
8582 token = read32 (ip + 1);
8583 klass = mini_get_class (method, token, generic_context);
8584 CHECK_TYPELOAD (klass);
8586 /* Optimize the common ldobj+stloc combination */
8596 loc_index = ip [5] - CEE_STLOC_0;
8603 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
8604 CHECK_LOCAL (loc_index);
8606 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
8607 ins->dreg = cfg->locals [loc_index]->dreg;
8613 /* Optimize the ldobj+stobj combination */
8614 /* The reference case ends up being a load+store anyway */
8615 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
8620 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
8627 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
8636 CHECK_STACK_OVF (1);
8638 n = read32 (ip + 1);
8640 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
8641 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
8642 ins->type = STACK_OBJ;
8645 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
8646 MonoInst *iargs [1];
8648 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
8649 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
8651 if (cfg->opt & MONO_OPT_SHARED) {
8652 MonoInst *iargs [3];
8654 if (cfg->compile_aot) {
8655 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
8657 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8658 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
8659 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
8660 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
8661 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
8663 if (bblock->out_of_line) {
8664 MonoInst *iargs [2];
8666 if (image == mono_defaults.corlib) {
8668 * Avoid relocations in AOT and save some space by using a
8669 * version of helper_ldstr specialized to mscorlib.
8671 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
8672 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
8674 /* Avoid creating the string object */
8675 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8676 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
8677 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
8681 if (cfg->compile_aot) {
8682 NEW_LDSTRCONST (cfg, ins, image, n);
8684 MONO_ADD_INS (bblock, ins);
8687 NEW_PCONST (cfg, ins, NULL);
8688 ins->type = STACK_OBJ;
8689 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
8691 OUT_OF_MEMORY_FAILURE;
8694 MONO_ADD_INS (bblock, ins);
8703 MonoInst *iargs [2];
8704 MonoMethodSignature *fsig;
8707 MonoInst *vtable_arg = NULL;
8710 token = read32 (ip + 1);
8711 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8712 if (!cmethod || mono_loader_get_last_error ())
8714 fsig = mono_method_get_signature (cmethod, image, token);
8718 mono_save_token_info (cfg, image, token, cmethod);
8720 if (!mono_class_init (cmethod->klass))
8721 TYPE_LOAD_ERROR (cmethod->klass);
8723 context_used = mini_method_check_context_used (cfg, cmethod);
8725 if (mono_security_cas_enabled ()) {
8726 if (check_linkdemand (cfg, method, cmethod))
8727 INLINE_FAILURE ("linkdemand");
8728 CHECK_CFG_EXCEPTION;
8729 } else if (mono_security_core_clr_enabled ()) {
8730 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
8733 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8734 emit_generic_class_init (cfg, cmethod->klass);
8735 CHECK_TYPELOAD (cmethod->klass);
8739 if (cfg->gsharedvt) {
8740 if (mini_is_gsharedvt_variable_signature (sig))
8741 GSHAREDVT_FAILURE (*ip);
8745 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
8746 mono_method_is_generic_sharable (cmethod, TRUE)) {
8747 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
8748 mono_class_vtable (cfg->domain, cmethod->klass);
8749 CHECK_TYPELOAD (cmethod->klass);
8751 vtable_arg = emit_get_rgctx_method (cfg, context_used,
8752 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8755 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
8756 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8758 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8760 CHECK_TYPELOAD (cmethod->klass);
8761 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8766 n = fsig->param_count;
8770 * Generate smaller code for the common newobj <exception> instruction in
8771 * argument checking code.
8773 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
8774 is_exception_class (cmethod->klass) && n <= 2 &&
8775 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
8776 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
8777 MonoInst *iargs [3];
8779 g_assert (!vtable_arg);
8783 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
8786 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
8790 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
8795 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
8798 g_assert_not_reached ();
8806 /* move the args to allow room for 'this' in the first position */
8812 /* check_call_signature () requires sp[0] to be set */
8813 this_ins.type = STACK_OBJ;
8815 if (check_call_signature (cfg, fsig, sp))
8820 if (mini_class_is_system_array (cmethod->klass)) {
8821 g_assert (!vtable_arg);
8823 *sp = emit_get_rgctx_method (cfg, context_used,
8824 cmethod, MONO_RGCTX_INFO_METHOD);
8826 /* Avoid varargs in the common case */
8827 if (fsig->param_count == 1)
8828 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
8829 else if (fsig->param_count == 2)
8830 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
8831 else if (fsig->param_count == 3)
8832 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
8833 else if (fsig->param_count == 4)
8834 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
8836 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
8837 } else if (cmethod->string_ctor) {
8838 g_assert (!context_used);
8839 g_assert (!vtable_arg);
8840 /* we simply pass a null pointer */
8841 EMIT_NEW_PCONST (cfg, *sp, NULL);
8842 /* now call the string ctor */
8843 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL, NULL);
8845 MonoInst* callvirt_this_arg = NULL;
8847 if (cmethod->klass->valuetype) {
8848 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
8849 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
8850 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
8855 * The code generated by mini_emit_virtual_call () expects
8856 * iargs [0] to be a boxed instance, but luckily the vcall
8857 * will be transformed into a normal call there.
8859 } else if (context_used) {
8860 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
8863 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8865 CHECK_TYPELOAD (cmethod->klass);
8868 * TypeInitializationExceptions thrown from the mono_runtime_class_init
8869 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
8870 * As a workaround, we call class cctors before allocating objects.
8872 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8873 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8874 if (cfg->verbose_level > 2)
8875 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
8876 class_inits = g_slist_prepend (class_inits, vtable);
8879 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
8882 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
8885 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
8887 /* Now call the actual ctor */
8888 /* Avoid virtual calls to ctors if possible */
8889 if (mono_class_is_marshalbyref (cmethod->klass))
8890 callvirt_this_arg = sp [0];
8893 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
8894 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8895 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8900 CHECK_CFG_EXCEPTION;
8901 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
8902 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
8903 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
8904 !g_list_find (dont_inline, cmethod)) {
8907 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
8908 cfg->real_offset += 5;
8911 inline_costs += costs - 5;
8913 INLINE_FAILURE ("inline failure");
8914 // FIXME-VT: Clean this up
8915 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
8916 GSHAREDVT_FAILURE(*ip);
8917 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL, NULL);
8919 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
8922 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
8923 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
8924 } else if (context_used &&
8925 (!mono_method_is_generic_sharable (cmethod, TRUE) ||
8926 !mono_class_generic_sharing_enabled (cmethod->klass))) {
8927 MonoInst *cmethod_addr;
8929 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
8930 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8932 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
8934 INLINE_FAILURE ("ctor call");
8935 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp,
8936 callvirt_this_arg, NULL, vtable_arg);
8940 if (alloc == NULL) {
8942 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
8943 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
8957 token = read32 (ip + 1);
8958 klass = mini_get_class (method, token, generic_context);
8959 CHECK_TYPELOAD (klass);
8960 if (sp [0]->type != STACK_OBJ)
8963 context_used = mini_class_check_context_used (cfg, klass);
8965 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
8966 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
8973 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8976 if (cfg->compile_aot)
8977 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
8979 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8981 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
8982 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
8985 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8986 MonoMethod *mono_castclass;
8987 MonoInst *iargs [1];
8990 mono_castclass = mono_marshal_get_castclass (klass);
8993 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
8994 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8995 CHECK_CFG_EXCEPTION;
8996 g_assert (costs > 0);
8999 cfg->real_offset += 5;
9004 inline_costs += costs;
9007 ins = handle_castclass (cfg, klass, *sp, context_used);
9008 CHECK_CFG_EXCEPTION;
9018 token = read32 (ip + 1);
9019 klass = mini_get_class (method, token, generic_context);
9020 CHECK_TYPELOAD (klass);
9021 if (sp [0]->type != STACK_OBJ)
9024 context_used = mini_class_check_context_used (cfg, klass);
9026 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9027 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
9034 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9037 if (cfg->compile_aot)
9038 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9040 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9042 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
9045 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9046 MonoMethod *mono_isinst;
9047 MonoInst *iargs [1];
9050 mono_isinst = mono_marshal_get_isinst (klass);
9053 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
9054 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9055 CHECK_CFG_EXCEPTION;
9056 g_assert (costs > 0);
9059 cfg->real_offset += 5;
9064 inline_costs += costs;
9067 ins = handle_isinst (cfg, klass, *sp, context_used);
9068 CHECK_CFG_EXCEPTION;
9075 case CEE_UNBOX_ANY: {
9079 token = read32 (ip + 1);
9080 klass = mini_get_class (method, token, generic_context);
9081 CHECK_TYPELOAD (klass);
9083 mono_save_token_info (cfg, image, token, klass);
9085 context_used = mini_class_check_context_used (cfg, klass);
9087 if (mini_is_gsharedvt_klass (cfg, klass)) {
9088 MonoInst *obj, *addr, *klass_inst, *is_ref, *args[16];
9089 MonoBasicBlock *is_ref_bb, *end_bb;
9092 /* Need to check for nullable types at runtime, but those are disabled in mini_is_gsharedvt_sharable_method*/
9093 if (mono_class_is_nullable (klass))
9094 GSHAREDVT_FAILURE (*ip);
9098 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
9104 args [1] = klass_inst;
9107 obj = mono_emit_jit_icall (cfg, mono_object_castclass, args);
9109 NEW_BBLOCK (cfg, is_ref_bb);
9110 NEW_BBLOCK (cfg, end_bb);
9111 is_ref = emit_get_rgctx_klass (cfg, context_used, klass,
9112 MONO_RGCTX_INFO_CLASS_IS_REF);
9113 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
9114 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
9116 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
9117 addr_reg = alloc_dreg (cfg, STACK_MP);
9121 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
9122 MONO_ADD_INS (cfg->cbb, addr);
9124 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9127 MONO_START_BB (cfg, is_ref_bb);
9129 /* Save the ref to a temporary */
9130 dreg = alloc_ireg (cfg);
9131 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
9132 addr->dreg = addr_reg;
9133 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
9134 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9136 MONO_START_BB (cfg, end_bb);
9140 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
9148 if (generic_class_is_reference_type (cfg, klass)) {
9149 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
9150 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9151 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
9158 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9161 /*FIXME AOT support*/
9162 if (cfg->compile_aot)
9163 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9165 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9167 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
9168 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
9171 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9172 MonoMethod *mono_castclass;
9173 MonoInst *iargs [1];
9176 mono_castclass = mono_marshal_get_castclass (klass);
9179 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
9180 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9181 CHECK_CFG_EXCEPTION;
9182 g_assert (costs > 0);
9185 cfg->real_offset += 5;
9189 inline_costs += costs;
9191 ins = handle_castclass (cfg, klass, *sp, context_used);
9192 CHECK_CFG_EXCEPTION;
9200 if (mono_class_is_nullable (klass)) {
9201 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
9208 ins = handle_unbox (cfg, klass, sp, context_used);
9214 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9227 token = read32 (ip + 1);
9228 klass = mini_get_class (method, token, generic_context);
9229 CHECK_TYPELOAD (klass);
9231 mono_save_token_info (cfg, image, token, klass);
9233 context_used = mini_class_check_context_used (cfg, klass);
9235 if (generic_class_is_reference_type (cfg, klass)) {
9241 if (klass == mono_defaults.void_class)
9243 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
9245 /* frequent check in generic code: box (struct), brtrue */
9247 // FIXME: LLVM can't handle the inconsistent bb linking
9248 if (!mono_class_is_nullable (klass) &&
9249 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
9250 (ip [5] == CEE_BRTRUE ||
9251 ip [5] == CEE_BRTRUE_S ||
9252 ip [5] == CEE_BRFALSE ||
9253 ip [5] == CEE_BRFALSE_S)) {
9254 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
9256 MonoBasicBlock *true_bb, *false_bb;
9260 if (cfg->verbose_level > 3) {
9261 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9262 printf ("<box+brtrue opt>\n");
9270 target = ip + 1 + (signed char)(*ip);
9277 target = ip + 4 + (gint)(read32 (ip));
9281 g_assert_not_reached ();
9285 * We need to link both bblocks, since it is needed for handling stack
9286 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
9287 * Branching to only one of them would lead to inconsistencies, so
9288 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
9290 GET_BBLOCK (cfg, true_bb, target);
9291 GET_BBLOCK (cfg, false_bb, ip);
9293 mono_link_bblock (cfg, cfg->cbb, true_bb);
9294 mono_link_bblock (cfg, cfg->cbb, false_bb);
9296 if (sp != stack_start) {
9297 handle_stack_args (cfg, stack_start, sp - stack_start);
9299 CHECK_UNVERIFIABLE (cfg);
9302 if (COMPILE_LLVM (cfg)) {
9303 dreg = alloc_ireg (cfg);
9304 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
9305 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
9307 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
9309 /* The JIT can't eliminate the iconst+compare */
9310 MONO_INST_NEW (cfg, ins, OP_BR);
9311 ins->inst_target_bb = is_true ? true_bb : false_bb;
9312 MONO_ADD_INS (cfg->cbb, ins);
9315 start_new_bblock = 1;
9319 *sp++ = handle_box (cfg, val, klass, context_used);
9322 CHECK_CFG_EXCEPTION;
9331 token = read32 (ip + 1);
9332 klass = mini_get_class (method, token, generic_context);
9333 CHECK_TYPELOAD (klass);
9335 mono_save_token_info (cfg, image, token, klass);
9337 context_used = mini_class_check_context_used (cfg, klass);
9339 if (mono_class_is_nullable (klass)) {
9342 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
9343 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
9347 ins = handle_unbox (cfg, klass, sp, context_used);
9360 MonoClassField *field;
9361 #ifndef DISABLE_REMOTING
9365 gboolean is_instance;
9367 gpointer addr = NULL;
9368 gboolean is_special_static;
9370 MonoInst *store_val = NULL;
9373 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
9375 if (op == CEE_STFLD) {
9383 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
9385 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
9388 if (op == CEE_STSFLD) {
9396 token = read32 (ip + 1);
9397 if (method->wrapper_type != MONO_WRAPPER_NONE) {
9398 field = mono_method_get_wrapper_data (method, token);
9399 klass = field->parent;
9402 field = mono_field_from_token (image, token, &klass, generic_context);
9406 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
9407 FIELD_ACCESS_FAILURE;
9408 mono_class_init (klass);
9410 if (is_instance && *ip != CEE_LDFLDA && is_magic_tls_access (field))
9413 /* if the class is Critical then transparent code cannot access it's fields */
9414 if (!is_instance && mono_security_core_clr_enabled ())
9415 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
9417 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
9418 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
9419 if (mono_security_core_clr_enabled ())
9420 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
9424 * LDFLD etc. is usable on static fields as well, so convert those cases to
9427 if (is_instance && field->type->attrs & FIELD_ATTRIBUTE_STATIC) {
9439 g_assert_not_reached ();
9441 is_instance = FALSE;
9444 context_used = mini_class_check_context_used (cfg, klass);
9448 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
9449 if (op == CEE_STFLD) {
9450 if (target_type_is_incompatible (cfg, field->type, sp [1]))
9452 #ifndef DISABLE_REMOTING
9453 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
9454 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
9455 MonoInst *iargs [5];
9457 GSHAREDVT_FAILURE (op);
9460 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9461 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
9462 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
9466 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
9467 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
9468 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9469 CHECK_CFG_EXCEPTION;
9470 g_assert (costs > 0);
9472 cfg->real_offset += 5;
9475 inline_costs += costs;
9477 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
9484 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
9486 if (mini_is_gsharedvt_klass (cfg, klass)) {
9487 MonoInst *offset_ins;
9489 context_used = mini_class_check_context_used (cfg, klass);
9491 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9492 dreg = alloc_ireg_mp (cfg);
9493 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
9494 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
9495 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
9497 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
9499 if (sp [0]->opcode != OP_LDADDR)
9500 store->flags |= MONO_INST_FAULT;
9502 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
9503 /* insert call to write barrier */
9507 dreg = alloc_ireg_mp (cfg);
9508 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
9509 emit_write_barrier (cfg, ptr, sp [1], -1);
9512 store->flags |= ins_flag;
9519 #ifndef DISABLE_REMOTING
9520 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
9521 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
9522 MonoInst *iargs [4];
9524 GSHAREDVT_FAILURE (op);
9527 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9528 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
9529 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
9530 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
9531 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
9532 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9533 CHECK_CFG_EXCEPTION;
9535 g_assert (costs > 0);
9537 cfg->real_offset += 5;
9541 inline_costs += costs;
9543 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
9549 if (sp [0]->type == STACK_VTYPE) {
9552 /* Have to compute the address of the variable */
9554 var = get_vreg_to_inst (cfg, sp [0]->dreg);
9556 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
9558 g_assert (var->klass == klass);
9560 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
9564 if (op == CEE_LDFLDA) {
9565 if (is_magic_tls_access (field)) {
9566 GSHAREDVT_FAILURE (*ip);
9568 *sp++ = create_magic_tls_access (cfg, field, &cached_tls_addr, ins);
9570 if (sp [0]->type == STACK_OBJ) {
9571 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
9572 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
9575 dreg = alloc_ireg_mp (cfg);
9577 if (mini_is_gsharedvt_klass (cfg, klass)) {
9578 MonoInst *offset_ins;
9580 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9581 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
9583 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
9585 ins->klass = mono_class_from_mono_type (field->type);
9586 ins->type = STACK_MP;
9592 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
9594 if (mini_is_gsharedvt_klass (cfg, klass)) {
9595 MonoInst *offset_ins;
9597 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9598 dreg = alloc_ireg_mp (cfg);
9599 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
9600 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
9602 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
9604 load->flags |= ins_flag;
9605 if (sp [0]->opcode != OP_LDADDR)
9606 load->flags |= MONO_INST_FAULT;
9620 * We can only support shared generic static
9621 * field access on architectures where the
9622 * trampoline code has been extended to handle
9623 * the generic class init.
9625 #ifndef MONO_ARCH_VTABLE_REG
9626 GENERIC_SHARING_FAILURE (op);
9629 context_used = mini_class_check_context_used (cfg, klass);
9631 ftype = mono_field_get_type (field);
9633 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
9636 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
9637 * to be called here.
9639 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
9640 mono_class_vtable (cfg->domain, klass);
9641 CHECK_TYPELOAD (klass);
9643 mono_domain_lock (cfg->domain);
9644 if (cfg->domain->special_static_fields)
9645 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
9646 mono_domain_unlock (cfg->domain);
9648 is_special_static = mono_class_field_is_special_static (field);
9650 /* Generate IR to compute the field address */
9651 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
9653 * Fast access to TLS data
9654 * Inline version of get_thread_static_data () in
9658 int idx, static_data_reg, array_reg, dreg;
9659 MonoInst *thread_ins;
9661 GSHAREDVT_FAILURE (op);
9663 // offset &= 0x7fffffff;
9664 // idx = (offset >> 24) - 1;
9665 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
9667 thread_ins = mono_get_thread_intrinsic (cfg);
9668 MONO_ADD_INS (cfg->cbb, thread_ins);
9669 static_data_reg = alloc_ireg (cfg);
9670 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
9672 if (cfg->compile_aot) {
9673 int offset_reg, offset2_reg, idx_reg;
9675 /* For TLS variables, this will return the TLS offset */
9676 EMIT_NEW_SFLDACONST (cfg, ins, field);
9677 offset_reg = ins->dreg;
9678 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
9679 idx_reg = alloc_ireg (cfg);
9680 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
9681 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
9682 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
9683 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
9684 array_reg = alloc_ireg (cfg);
9685 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
9686 offset2_reg = alloc_ireg (cfg);
9687 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
9688 dreg = alloc_ireg (cfg);
9689 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
9691 offset = (gsize)addr & 0x7fffffff;
9692 idx = (offset >> 24) - 1;
9694 array_reg = alloc_ireg (cfg);
9695 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
9696 dreg = alloc_ireg (cfg);
9697 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
9699 } else if ((cfg->opt & MONO_OPT_SHARED) ||
9700 (cfg->compile_aot && is_special_static) ||
9701 (context_used && is_special_static)) {
9702 MonoInst *iargs [2];
9704 g_assert (field->parent);
9705 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9707 iargs [1] = emit_get_rgctx_field (cfg, context_used,
9708 field, MONO_RGCTX_INFO_CLASS_FIELD);
9710 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
9712 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
9713 } else if (context_used) {
9714 MonoInst *static_data;
9717 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
9718 method->klass->name_space, method->klass->name, method->name,
9719 depth, field->offset);
9722 if (mono_class_needs_cctor_run (klass, method))
9723 emit_generic_class_init (cfg, klass);
9726 * The pointer we're computing here is
9728 * super_info.static_data + field->offset
9730 static_data = emit_get_rgctx_klass (cfg, context_used,
9731 klass, MONO_RGCTX_INFO_STATIC_DATA);
9733 if (mini_is_gsharedvt_klass (cfg, klass)) {
9734 MonoInst *offset_ins;
9736 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9737 dreg = alloc_ireg_mp (cfg);
9738 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
9739 } else if (field->offset == 0) {
9742 int addr_reg = mono_alloc_preg (cfg);
9743 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
9745 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
9746 MonoInst *iargs [2];
9748 g_assert (field->parent);
9749 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9750 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
9751 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
9753 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
9755 CHECK_TYPELOAD (klass);
9757 if (mini_field_access_needs_cctor_run (cfg, method, vtable)) {
9758 if (!(g_slist_find (class_inits, vtable))) {
9759 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
9760 if (cfg->verbose_level > 2)
9761 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
9762 class_inits = g_slist_prepend (class_inits, vtable);
9765 if (cfg->run_cctors) {
9767 /* This makes so that inline cannot trigger */
9768 /* .cctors: too many apps depend on them */
9769 /* running with a specific order... */
9770 if (! vtable->initialized)
9771 INLINE_FAILURE ("class init");
9772 ex = mono_runtime_class_init_full (vtable, FALSE);
9774 set_exception_object (cfg, ex);
9775 goto exception_exit;
9779 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
9781 if (cfg->compile_aot)
9782 EMIT_NEW_SFLDACONST (cfg, ins, field);
9784 EMIT_NEW_PCONST (cfg, ins, addr);
9786 MonoInst *iargs [1];
9787 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
9788 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
9792 /* Generate IR to do the actual load/store operation */
9794 if (op == CEE_LDSFLDA) {
9795 ins->klass = mono_class_from_mono_type (ftype);
9796 ins->type = STACK_PTR;
9798 } else if (op == CEE_STSFLD) {
9801 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
9802 store->flags |= ins_flag;
9804 gboolean is_const = FALSE;
9805 MonoVTable *vtable = NULL;
9806 gpointer addr = NULL;
9808 if (!context_used) {
9809 vtable = mono_class_vtable (cfg->domain, klass);
9810 CHECK_TYPELOAD (klass);
9812 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
9813 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
9814 int ro_type = ftype->type;
9816 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
9817 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
9818 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
9821 GSHAREDVT_FAILURE (op);
9823 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
9826 case MONO_TYPE_BOOLEAN:
9828 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
9832 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
9835 case MONO_TYPE_CHAR:
9837 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
9841 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
9846 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
9850 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
9856 case MONO_TYPE_FNPTR:
9857 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
9858 type_to_eval_stack_type ((cfg), field->type, *sp);
9861 case MONO_TYPE_STRING:
9862 case MONO_TYPE_OBJECT:
9863 case MONO_TYPE_CLASS:
9864 case MONO_TYPE_SZARRAY:
9865 case MONO_TYPE_ARRAY:
9866 if (!mono_gc_is_moving ()) {
9867 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
9868 type_to_eval_stack_type ((cfg), field->type, *sp);
9876 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
9881 case MONO_TYPE_VALUETYPE:
9891 CHECK_STACK_OVF (1);
9893 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
9894 load->flags |= ins_flag;
9907 token = read32 (ip + 1);
9908 klass = mini_get_class (method, token, generic_context);
9909 CHECK_TYPELOAD (klass);
9910 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
9911 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
9912 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
9913 generic_class_is_reference_type (cfg, klass)) {
9914 /* insert call to write barrier */
9915 emit_write_barrier (cfg, sp [0], sp [1], -1);
9927 const char *data_ptr;
9929 guint32 field_token;
9935 token = read32 (ip + 1);
9937 klass = mini_get_class (method, token, generic_context);
9938 CHECK_TYPELOAD (klass);
9940 context_used = mini_class_check_context_used (cfg, klass);
9942 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
9943 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
9944 ins->sreg1 = sp [0]->dreg;
9945 ins->type = STACK_I4;
9946 ins->dreg = alloc_ireg (cfg);
9947 MONO_ADD_INS (cfg->cbb, ins);
9948 *sp = mono_decompose_opcode (cfg, ins);
9953 MonoClass *array_class = mono_array_class_get (klass, 1);
9954 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
9956 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
9959 args [0] = emit_get_rgctx_klass (cfg, context_used,
9960 array_class, MONO_RGCTX_INFO_VTABLE);
9965 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
9967 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
9969 if (cfg->opt & MONO_OPT_SHARED) {
9970 /* Decompose now to avoid problems with references to the domainvar */
9971 MonoInst *iargs [3];
9973 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9974 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9977 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
9979 /* Decompose later since it is needed by abcrem */
9980 MonoClass *array_type = mono_array_class_get (klass, 1);
9981 mono_class_vtable (cfg->domain, array_type);
9982 CHECK_TYPELOAD (array_type);
9984 MONO_INST_NEW (cfg, ins, OP_NEWARR);
9985 ins->dreg = alloc_ireg_ref (cfg);
9986 ins->sreg1 = sp [0]->dreg;
9987 ins->inst_newa_class = klass;
9988 ins->type = STACK_OBJ;
9989 ins->klass = array_type;
9990 MONO_ADD_INS (cfg->cbb, ins);
9991 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
9992 cfg->cbb->has_array_access = TRUE;
9994 /* Needed so mono_emit_load_get_addr () gets called */
9995 mono_get_got_var (cfg);
10005 * we inline/optimize the initialization sequence if possible.
10006 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
10007 * for small sizes open code the memcpy
10008 * ensure the rva field is big enough
10010 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
10011 MonoMethod *memcpy_method = get_memcpy_method ();
10012 MonoInst *iargs [3];
10013 int add_reg = alloc_ireg_mp (cfg);
10015 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
10016 if (cfg->compile_aot) {
10017 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
10019 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
10021 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
10022 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
10031 if (sp [0]->type != STACK_OBJ)
10034 MONO_INST_NEW (cfg, ins, OP_LDLEN);
10035 ins->dreg = alloc_preg (cfg);
10036 ins->sreg1 = sp [0]->dreg;
10037 ins->type = STACK_I4;
10038 /* This flag will be inherited by the decomposition */
10039 ins->flags |= MONO_INST_FAULT;
10040 MONO_ADD_INS (cfg->cbb, ins);
10041 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10042 cfg->cbb->has_array_access = TRUE;
10050 if (sp [0]->type != STACK_OBJ)
10053 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10055 klass = mini_get_class (method, read32 (ip + 1), generic_context);
10056 CHECK_TYPELOAD (klass);
10057 /* we need to make sure that this array is exactly the type it needs
10058 * to be for correctness. the wrappers are lax with their usage
10059 * so we need to ignore them here
10061 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
10062 MonoClass *array_class = mono_array_class_get (klass, 1);
10063 mini_emit_check_array_type (cfg, sp [0], array_class);
10064 CHECK_TYPELOAD (array_class);
10068 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10073 case CEE_LDELEM_I1:
10074 case CEE_LDELEM_U1:
10075 case CEE_LDELEM_I2:
10076 case CEE_LDELEM_U2:
10077 case CEE_LDELEM_I4:
10078 case CEE_LDELEM_U4:
10079 case CEE_LDELEM_I8:
10081 case CEE_LDELEM_R4:
10082 case CEE_LDELEM_R8:
10083 case CEE_LDELEM_REF: {
10089 if (*ip == CEE_LDELEM) {
10091 token = read32 (ip + 1);
10092 klass = mini_get_class (method, token, generic_context);
10093 CHECK_TYPELOAD (klass);
10094 mono_class_init (klass);
10097 klass = array_access_to_klass (*ip);
10099 if (sp [0]->type != STACK_OBJ)
10102 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10104 if (mini_is_gsharedvt_klass (cfg, klass)) {
10105 // FIXME-VT: OP_ICONST optimization
10106 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10107 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10108 ins->opcode = OP_LOADV_MEMBASE;
10109 } else if (sp [1]->opcode == OP_ICONST) {
10110 int array_reg = sp [0]->dreg;
10111 int index_reg = sp [1]->dreg;
10112 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
10114 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
10115 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
10117 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10118 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10121 if (*ip == CEE_LDELEM)
10128 case CEE_STELEM_I1:
10129 case CEE_STELEM_I2:
10130 case CEE_STELEM_I4:
10131 case CEE_STELEM_I8:
10132 case CEE_STELEM_R4:
10133 case CEE_STELEM_R8:
10134 case CEE_STELEM_REF:
10139 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10141 if (*ip == CEE_STELEM) {
10143 token = read32 (ip + 1);
10144 klass = mini_get_class (method, token, generic_context);
10145 CHECK_TYPELOAD (klass);
10146 mono_class_init (klass);
10149 klass = array_access_to_klass (*ip);
10151 if (sp [0]->type != STACK_OBJ)
10154 emit_array_store (cfg, klass, sp, TRUE);
10156 if (*ip == CEE_STELEM)
10163 case CEE_CKFINITE: {
10167 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
10168 ins->sreg1 = sp [0]->dreg;
10169 ins->dreg = alloc_freg (cfg);
10170 ins->type = STACK_R8;
10171 MONO_ADD_INS (bblock, ins);
10173 *sp++ = mono_decompose_opcode (cfg, ins);
10178 case CEE_REFANYVAL: {
10179 MonoInst *src_var, *src;
10181 int klass_reg = alloc_preg (cfg);
10182 int dreg = alloc_preg (cfg);
10184 GSHAREDVT_FAILURE (*ip);
10187 MONO_INST_NEW (cfg, ins, *ip);
10190 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
10191 CHECK_TYPELOAD (klass);
10192 mono_class_init (klass);
10194 context_used = mini_class_check_context_used (cfg, klass);
10197 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10199 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
10200 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
10201 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
10203 if (context_used) {
10204 MonoInst *klass_ins;
10206 klass_ins = emit_get_rgctx_klass (cfg, context_used,
10207 klass, MONO_RGCTX_INFO_KLASS);
10210 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
10211 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
10213 mini_emit_class_check (cfg, klass_reg, klass);
10215 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
10216 ins->type = STACK_MP;
10221 case CEE_MKREFANY: {
10222 MonoInst *loc, *addr;
10224 GSHAREDVT_FAILURE (*ip);
10227 MONO_INST_NEW (cfg, ins, *ip);
10230 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
10231 CHECK_TYPELOAD (klass);
10232 mono_class_init (klass);
10234 context_used = mini_class_check_context_used (cfg, klass);
10236 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
10237 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
10239 if (context_used) {
10240 MonoInst *const_ins;
10241 int type_reg = alloc_preg (cfg);
10243 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
10244 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
10245 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
10246 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
10247 } else if (cfg->compile_aot) {
10248 int const_reg = alloc_preg (cfg);
10249 int type_reg = alloc_preg (cfg);
10251 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
10252 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
10253 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
10254 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
10256 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
10257 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
10259 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
10261 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
10262 ins->type = STACK_VTYPE;
10263 ins->klass = mono_defaults.typed_reference_class;
10268 case CEE_LDTOKEN: {
10270 MonoClass *handle_class;
10272 CHECK_STACK_OVF (1);
10275 n = read32 (ip + 1);
10277 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
10278 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
10279 handle = mono_method_get_wrapper_data (method, n);
10280 handle_class = mono_method_get_wrapper_data (method, n + 1);
10281 if (handle_class == mono_defaults.typehandle_class)
10282 handle = &((MonoClass*)handle)->byval_arg;
10285 handle = mono_ldtoken (image, n, &handle_class, generic_context);
10289 mono_class_init (handle_class);
10290 if (cfg->generic_sharing_context) {
10291 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
10292 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
10293 /* This case handles ldtoken
10294 of an open type, like for
10297 } else if (handle_class == mono_defaults.typehandle_class) {
10298 /* If we get a MONO_TYPE_CLASS
10299 then we need to provide the
10301 instantiation of it. */
10302 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
10305 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type (handle));
10306 } else if (handle_class == mono_defaults.fieldhandle_class)
10307 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
10308 else if (handle_class == mono_defaults.methodhandle_class)
10309 context_used = mini_method_check_context_used (cfg, handle);
10311 g_assert_not_reached ();
10314 if ((cfg->opt & MONO_OPT_SHARED) &&
10315 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
10316 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
10317 MonoInst *addr, *vtvar, *iargs [3];
10318 int method_context_used;
10320 method_context_used = mini_method_check_context_used (cfg, method);
10322 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
10324 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10325 EMIT_NEW_ICONST (cfg, iargs [1], n);
10326 if (method_context_used) {
10327 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
10328 method, MONO_RGCTX_INFO_METHOD);
10329 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
10331 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
10332 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
10334 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
10336 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
10338 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
10340 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
10341 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
10342 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
10343 (cmethod->klass == mono_defaults.monotype_class->parent) &&
10344 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
10345 MonoClass *tclass = mono_class_from_mono_type (handle);
10347 mono_class_init (tclass);
10348 if (context_used) {
10349 ins = emit_get_rgctx_klass (cfg, context_used,
10350 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
10351 } else if (cfg->compile_aot) {
10352 if (method->wrapper_type) {
10353 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
10354 /* Special case for static synchronized wrappers */
10355 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
10357 /* FIXME: n is not a normal token */
10358 cfg->disable_aot = TRUE;
10359 EMIT_NEW_PCONST (cfg, ins, NULL);
10362 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
10365 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
10367 ins->type = STACK_OBJ;
10368 ins->klass = cmethod->klass;
10371 MonoInst *addr, *vtvar;
10373 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
10375 if (context_used) {
10376 if (handle_class == mono_defaults.typehandle_class) {
10377 ins = emit_get_rgctx_klass (cfg, context_used,
10378 mono_class_from_mono_type (handle),
10379 MONO_RGCTX_INFO_TYPE);
10380 } else if (handle_class == mono_defaults.methodhandle_class) {
10381 ins = emit_get_rgctx_method (cfg, context_used,
10382 handle, MONO_RGCTX_INFO_METHOD);
10383 } else if (handle_class == mono_defaults.fieldhandle_class) {
10384 ins = emit_get_rgctx_field (cfg, context_used,
10385 handle, MONO_RGCTX_INFO_CLASS_FIELD);
10387 g_assert_not_reached ();
10389 } else if (cfg->compile_aot) {
10390 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
10392 EMIT_NEW_PCONST (cfg, ins, handle);
10394 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
10395 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
10396 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
10406 MONO_INST_NEW (cfg, ins, OP_THROW);
10408 ins->sreg1 = sp [0]->dreg;
10410 bblock->out_of_line = TRUE;
10411 MONO_ADD_INS (bblock, ins);
10412 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
10413 MONO_ADD_INS (bblock, ins);
10416 link_bblock (cfg, bblock, end_bblock);
10417 start_new_bblock = 1;
10419 case CEE_ENDFINALLY:
10420 /* mono_save_seq_point_info () depends on this */
10421 if (sp != stack_start)
10422 emit_seq_point (cfg, method, ip, FALSE);
10423 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
10424 MONO_ADD_INS (bblock, ins);
10426 start_new_bblock = 1;
10429 * Control will leave the method so empty the stack, otherwise
10430 * the next basic block will start with a nonempty stack.
10432 while (sp != stack_start) {
10437 case CEE_LEAVE_S: {
10440 if (*ip == CEE_LEAVE) {
10442 target = ip + 5 + (gint32)read32(ip + 1);
10445 target = ip + 2 + (signed char)(ip [1]);
10448 /* empty the stack */
10449 while (sp != stack_start) {
10454 * If this leave statement is in a catch block, check for a
10455 * pending exception, and rethrow it if necessary.
10456 * We avoid doing this in runtime invoke wrappers, since those are called
10457 * by native code which excepts the wrapper to catch all exceptions.
10459 for (i = 0; i < header->num_clauses; ++i) {
10460 MonoExceptionClause *clause = &header->clauses [i];
10463 * Use <= in the final comparison to handle clauses with multiple
10464 * leave statements, like in bug #78024.
10465 * The ordering of the exception clauses guarantees that we find the
10466 * innermost clause.
10468 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
10470 MonoBasicBlock *dont_throw;
10475 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
10478 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
10480 NEW_BBLOCK (cfg, dont_throw);
10483 * Currently, we always rethrow the abort exception, despite the
10484 * fact that this is not correct. See thread6.cs for an example.
10485 * But propagating the abort exception is more important than
10486 * getting the sematics right.
10488 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
10489 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
10490 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
10492 MONO_START_BB (cfg, dont_throw);
10497 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
10499 MonoExceptionClause *clause;
10501 for (tmp = handlers; tmp; tmp = tmp->next) {
10502 clause = tmp->data;
10503 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
10505 link_bblock (cfg, bblock, tblock);
10506 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
10507 ins->inst_target_bb = tblock;
10508 ins->inst_eh_block = clause;
10509 MONO_ADD_INS (bblock, ins);
10510 bblock->has_call_handler = 1;
10511 if (COMPILE_LLVM (cfg)) {
10512 MonoBasicBlock *target_bb;
10515 * Link the finally bblock with the target, since it will
10516 * conceptually branch there.
10517 * FIXME: Have to link the bblock containing the endfinally.
10519 GET_BBLOCK (cfg, target_bb, target);
10520 link_bblock (cfg, tblock, target_bb);
10523 g_list_free (handlers);
10526 MONO_INST_NEW (cfg, ins, OP_BR);
10527 MONO_ADD_INS (bblock, ins);
10528 GET_BBLOCK (cfg, tblock, target);
10529 link_bblock (cfg, bblock, tblock);
10530 ins->inst_target_bb = tblock;
10531 start_new_bblock = 1;
10533 if (*ip == CEE_LEAVE)
10542 * Mono specific opcodes
10544 case MONO_CUSTOM_PREFIX: {
10546 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10550 case CEE_MONO_ICALL: {
10552 MonoJitICallInfo *info;
10554 token = read32 (ip + 2);
10555 func = mono_method_get_wrapper_data (method, token);
10556 info = mono_find_jit_icall_by_addr (func);
10558 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
10561 CHECK_STACK (info->sig->param_count);
10562 sp -= info->sig->param_count;
10564 ins = mono_emit_jit_icall (cfg, info->func, sp);
10565 if (!MONO_TYPE_IS_VOID (info->sig->ret))
10569 inline_costs += 10 * num_calls++;
10573 case CEE_MONO_LDPTR: {
10576 CHECK_STACK_OVF (1);
10578 token = read32 (ip + 2);
10580 ptr = mono_method_get_wrapper_data (method, token);
10581 /* FIXME: Generalize this */
10582 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
10583 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
10588 EMIT_NEW_PCONST (cfg, ins, ptr);
10591 inline_costs += 10 * num_calls++;
10592 /* Can't embed random pointers into AOT code */
10593 cfg->disable_aot = 1;
10596 case CEE_MONO_JIT_ICALL_ADDR: {
10597 MonoJitICallInfo *callinfo;
10600 CHECK_STACK_OVF (1);
10602 token = read32 (ip + 2);
10604 ptr = mono_method_get_wrapper_data (method, token);
10605 callinfo = mono_find_jit_icall_by_addr (ptr);
10606 g_assert (callinfo);
10607 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
10610 inline_costs += 10 * num_calls++;
10613 case CEE_MONO_ICALL_ADDR: {
10614 MonoMethod *cmethod;
10617 CHECK_STACK_OVF (1);
10619 token = read32 (ip + 2);
10621 cmethod = mono_method_get_wrapper_data (method, token);
10623 if (cfg->compile_aot) {
10624 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
10626 ptr = mono_lookup_internal_call (cmethod);
10628 EMIT_NEW_PCONST (cfg, ins, ptr);
10634 case CEE_MONO_VTADDR: {
10635 MonoInst *src_var, *src;
10641 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10642 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
10647 case CEE_MONO_NEWOBJ: {
10648 MonoInst *iargs [2];
10650 CHECK_STACK_OVF (1);
10652 token = read32 (ip + 2);
10653 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
10654 mono_class_init (klass);
10655 NEW_DOMAINCONST (cfg, iargs [0]);
10656 MONO_ADD_INS (cfg->cbb, iargs [0]);
10657 NEW_CLASSCONST (cfg, iargs [1], klass);
10658 MONO_ADD_INS (cfg->cbb, iargs [1]);
10659 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
10661 inline_costs += 10 * num_calls++;
10664 case CEE_MONO_OBJADDR:
10667 MONO_INST_NEW (cfg, ins, OP_MOVE);
10668 ins->dreg = alloc_ireg_mp (cfg);
10669 ins->sreg1 = sp [0]->dreg;
10670 ins->type = STACK_MP;
10671 MONO_ADD_INS (cfg->cbb, ins);
10675 case CEE_MONO_LDNATIVEOBJ:
10677 * Similar to LDOBJ, but instead load the unmanaged
10678 * representation of the vtype to the stack.
10683 token = read32 (ip + 2);
10684 klass = mono_method_get_wrapper_data (method, token);
10685 g_assert (klass->valuetype);
10686 mono_class_init (klass);
10689 MonoInst *src, *dest, *temp;
10692 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
10693 temp->backend.is_pinvoke = 1;
10694 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
10695 mini_emit_stobj (cfg, dest, src, klass, TRUE);
10697 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
10698 dest->type = STACK_VTYPE;
10699 dest->klass = klass;
10705 case CEE_MONO_RETOBJ: {
10707 * Same as RET, but return the native representation of a vtype
10710 g_assert (cfg->ret);
10711 g_assert (mono_method_signature (method)->pinvoke);
10716 token = read32 (ip + 2);
10717 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
10719 if (!cfg->vret_addr) {
10720 g_assert (cfg->ret_var_is_local);
10722 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
10724 EMIT_NEW_RETLOADA (cfg, ins);
10726 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
10728 if (sp != stack_start)
10731 MONO_INST_NEW (cfg, ins, OP_BR);
10732 ins->inst_target_bb = end_bblock;
10733 MONO_ADD_INS (bblock, ins);
10734 link_bblock (cfg, bblock, end_bblock);
10735 start_new_bblock = 1;
10739 case CEE_MONO_CISINST:
10740 case CEE_MONO_CCASTCLASS: {
10745 token = read32 (ip + 2);
10746 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
10747 if (ip [1] == CEE_MONO_CISINST)
10748 ins = handle_cisinst (cfg, klass, sp [0]);
10750 ins = handle_ccastclass (cfg, klass, sp [0]);
10756 case CEE_MONO_SAVE_LMF:
10757 case CEE_MONO_RESTORE_LMF:
10758 #ifdef MONO_ARCH_HAVE_LMF_OPS
10759 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
10760 MONO_ADD_INS (bblock, ins);
10761 cfg->need_lmf_area = TRUE;
10765 case CEE_MONO_CLASSCONST:
10766 CHECK_STACK_OVF (1);
10768 token = read32 (ip + 2);
10769 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
10772 inline_costs += 10 * num_calls++;
10774 case CEE_MONO_NOT_TAKEN:
10775 bblock->out_of_line = TRUE;
10779 CHECK_STACK_OVF (1);
10781 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
10782 ins->dreg = alloc_preg (cfg);
10783 ins->inst_offset = (gint32)read32 (ip + 2);
10784 ins->type = STACK_PTR;
10785 MONO_ADD_INS (bblock, ins);
10789 case CEE_MONO_DYN_CALL: {
10790 MonoCallInst *call;
10792 /* It would be easier to call a trampoline, but that would put an
10793 * extra frame on the stack, confusing exception handling. So
10794 * implement it inline using an opcode for now.
10797 if (!cfg->dyn_call_var) {
10798 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
10799 /* prevent it from being register allocated */
10800 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
10803 /* Has to use a call inst since it local regalloc expects it */
10804 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
10805 ins = (MonoInst*)call;
10807 ins->sreg1 = sp [0]->dreg;
10808 ins->sreg2 = sp [1]->dreg;
10809 MONO_ADD_INS (bblock, ins);
10811 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
10812 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
10816 inline_costs += 10 * num_calls++;
10820 case CEE_MONO_MEMORY_BARRIER: {
10822 emit_memory_barrier (cfg, (int)read32 (ip + 1));
10826 case CEE_MONO_JIT_ATTACH: {
10827 MonoInst *args [16];
10828 MonoInst *ad_ins, *lmf_ins;
10829 MonoBasicBlock *next_bb = NULL;
10831 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
10833 EMIT_NEW_PCONST (cfg, ins, NULL);
10834 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
10840 ad_ins = mono_get_domain_intrinsic (cfg);
10841 lmf_ins = mono_get_lmf_intrinsic (cfg);
10844 #ifdef MONO_ARCH_HAVE_TLS_GET
10845 if (MONO_ARCH_HAVE_TLS_GET && ad_ins && lmf_ins) {
10846 NEW_BBLOCK (cfg, next_bb);
10848 MONO_ADD_INS (cfg->cbb, ad_ins);
10849 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ad_ins->dreg, 0);
10850 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
10852 MONO_ADD_INS (cfg->cbb, lmf_ins);
10853 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, lmf_ins->dreg, 0);
10854 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
10858 if (cfg->compile_aot) {
10859 /* AOT code is only used in the root domain */
10860 EMIT_NEW_PCONST (cfg, args [0], NULL);
10862 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
10864 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
10865 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
10868 MONO_START_BB (cfg, next_bb);
10874 case CEE_MONO_JIT_DETACH: {
10875 MonoInst *args [16];
10877 /* Restore the original domain */
10878 dreg = alloc_ireg (cfg);
10879 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
10880 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
10885 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
10891 case CEE_PREFIX1: {
10894 case CEE_ARGLIST: {
10895 /* somewhat similar to LDTOKEN */
10896 MonoInst *addr, *vtvar;
10897 CHECK_STACK_OVF (1);
10898 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
10900 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
10901 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
10903 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
10904 ins->type = STACK_VTYPE;
10905 ins->klass = mono_defaults.argumenthandle_class;
10918 * The following transforms:
10919 * CEE_CEQ into OP_CEQ
10920 * CEE_CGT into OP_CGT
10921 * CEE_CGT_UN into OP_CGT_UN
10922 * CEE_CLT into OP_CLT
10923 * CEE_CLT_UN into OP_CLT_UN
10925 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
10927 MONO_INST_NEW (cfg, ins, cmp->opcode);
10929 cmp->sreg1 = sp [0]->dreg;
10930 cmp->sreg2 = sp [1]->dreg;
10931 type_from_op (cmp, sp [0], sp [1]);
10933 if ((sp [0]->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
10934 cmp->opcode = OP_LCOMPARE;
10935 else if (sp [0]->type == STACK_R8)
10936 cmp->opcode = OP_FCOMPARE;
10938 cmp->opcode = OP_ICOMPARE;
10939 MONO_ADD_INS (bblock, cmp);
10940 ins->type = STACK_I4;
10941 ins->dreg = alloc_dreg (cfg, ins->type);
10942 type_from_op (ins, sp [0], sp [1]);
10944 if (cmp->opcode == OP_FCOMPARE) {
10946 * The backends expect the fceq opcodes to do the
10949 cmp->opcode = OP_NOP;
10950 ins->sreg1 = cmp->sreg1;
10951 ins->sreg2 = cmp->sreg2;
10953 MONO_ADD_INS (bblock, ins);
10959 MonoInst *argconst;
10960 MonoMethod *cil_method;
10962 CHECK_STACK_OVF (1);
10964 n = read32 (ip + 2);
10965 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
10966 if (!cmethod || mono_loader_get_last_error ())
10968 mono_class_init (cmethod->klass);
10970 mono_save_token_info (cfg, image, n, cmethod);
10972 context_used = mini_method_check_context_used (cfg, cmethod);
10974 cil_method = cmethod;
10975 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
10976 METHOD_ACCESS_FAILURE;
10978 if (mono_security_cas_enabled ()) {
10979 if (check_linkdemand (cfg, method, cmethod))
10980 INLINE_FAILURE ("linkdemand");
10981 CHECK_CFG_EXCEPTION;
10982 } else if (mono_security_core_clr_enabled ()) {
10983 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
10987 * Optimize the common case of ldftn+delegate creation
10989 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
10990 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
10991 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
10992 MonoInst *target_ins;
10993 MonoMethod *invoke;
10994 int invoke_context_used;
10996 invoke = mono_get_delegate_invoke (ctor_method->klass);
10997 if (!invoke || !mono_method_signature (invoke))
11000 invoke_context_used = mini_method_check_context_used (cfg, invoke);
11002 target_ins = sp [-1];
11004 if (mono_security_core_clr_enabled ())
11005 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
11007 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
11008 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
11009 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
11010 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
11011 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
11015 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
11016 /* FIXME: SGEN support */
11017 if (invoke_context_used == 0) {
11019 if (cfg->verbose_level > 3)
11020 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
11022 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
11023 CHECK_CFG_EXCEPTION;
11032 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
11033 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
11037 inline_costs += 10 * num_calls++;
11040 case CEE_LDVIRTFTN: {
11041 MonoInst *args [2];
11045 n = read32 (ip + 2);
11046 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
11047 if (!cmethod || mono_loader_get_last_error ())
11049 mono_class_init (cmethod->klass);
11051 context_used = mini_method_check_context_used (cfg, cmethod);
11053 if (mono_security_cas_enabled ()) {
11054 if (check_linkdemand (cfg, method, cmethod))
11055 INLINE_FAILURE ("linkdemand");
11056 CHECK_CFG_EXCEPTION;
11057 } else if (mono_security_core_clr_enabled ()) {
11058 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
11064 args [1] = emit_get_rgctx_method (cfg, context_used,
11065 cmethod, MONO_RGCTX_INFO_METHOD);
11068 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
11070 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
11073 inline_costs += 10 * num_calls++;
11077 CHECK_STACK_OVF (1);
11079 n = read16 (ip + 2);
11081 EMIT_NEW_ARGLOAD (cfg, ins, n);
11086 CHECK_STACK_OVF (1);
11088 n = read16 (ip + 2);
11090 NEW_ARGLOADA (cfg, ins, n);
11091 MONO_ADD_INS (cfg->cbb, ins);
11099 n = read16 (ip + 2);
11101 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
11103 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
11107 CHECK_STACK_OVF (1);
11109 n = read16 (ip + 2);
11111 EMIT_NEW_LOCLOAD (cfg, ins, n);
11116 unsigned char *tmp_ip;
11117 CHECK_STACK_OVF (1);
11119 n = read16 (ip + 2);
11122 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
11128 EMIT_NEW_LOCLOADA (cfg, ins, n);
11137 n = read16 (ip + 2);
11139 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
11141 emit_stloc_ir (cfg, sp, header, n);
11148 if (sp != stack_start)
11150 if (cfg->method != method)
11152 * Inlining this into a loop in a parent could lead to
11153 * stack overflows which is different behavior than the
11154 * non-inlined case, thus disable inlining in this case.
11156 goto inline_failure;
11158 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
11159 ins->dreg = alloc_preg (cfg);
11160 ins->sreg1 = sp [0]->dreg;
11161 ins->type = STACK_PTR;
11162 MONO_ADD_INS (cfg->cbb, ins);
11164 cfg->flags |= MONO_CFG_HAS_ALLOCA;
11166 ins->flags |= MONO_INST_INIT;
11171 case CEE_ENDFILTER: {
11172 MonoExceptionClause *clause, *nearest;
11173 int cc, nearest_num;
11177 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
11179 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
11180 ins->sreg1 = (*sp)->dreg;
11181 MONO_ADD_INS (bblock, ins);
11182 start_new_bblock = 1;
11187 for (cc = 0; cc < header->num_clauses; ++cc) {
11188 clause = &header->clauses [cc];
11189 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
11190 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
11191 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
11196 g_assert (nearest);
11197 if ((ip - header->code) != nearest->handler_offset)
11202 case CEE_UNALIGNED_:
11203 ins_flag |= MONO_INST_UNALIGNED;
11204 /* FIXME: record alignment? we can assume 1 for now */
11208 case CEE_VOLATILE_:
11209 ins_flag |= MONO_INST_VOLATILE;
11213 ins_flag |= MONO_INST_TAILCALL;
11214 cfg->flags |= MONO_CFG_HAS_TAIL;
11215 /* Can't inline tail calls at this time */
11216 inline_costs += 100000;
11223 token = read32 (ip + 2);
11224 klass = mini_get_class (method, token, generic_context);
11225 CHECK_TYPELOAD (klass);
11226 if (generic_class_is_reference_type (cfg, klass))
11227 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
11229 mini_emit_initobj (cfg, *sp, NULL, klass);
11233 case CEE_CONSTRAINED_:
11235 token = read32 (ip + 2);
11236 constrained_call = mini_get_class (method, token, generic_context);
11237 CHECK_TYPELOAD (constrained_call);
11241 case CEE_INITBLK: {
11242 MonoInst *iargs [3];
11246 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
11247 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
11248 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
11249 /* emit_memset only works when val == 0 */
11250 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
11252 iargs [0] = sp [0];
11253 iargs [1] = sp [1];
11254 iargs [2] = sp [2];
11255 if (ip [1] == CEE_CPBLK) {
11256 MonoMethod *memcpy_method = get_memcpy_method ();
11257 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11259 MonoMethod *memset_method = get_memset_method ();
11260 mono_emit_method_call (cfg, memset_method, iargs, NULL);
11270 ins_flag |= MONO_INST_NOTYPECHECK;
11272 ins_flag |= MONO_INST_NORANGECHECK;
11273 /* we ignore the no-nullcheck for now since we
11274 * really do it explicitly only when doing callvirt->call
11278 case CEE_RETHROW: {
11280 int handler_offset = -1;
11282 for (i = 0; i < header->num_clauses; ++i) {
11283 MonoExceptionClause *clause = &header->clauses [i];
11284 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
11285 handler_offset = clause->handler_offset;
11290 bblock->flags |= BB_EXCEPTION_UNSAFE;
11292 g_assert (handler_offset != -1);
11294 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
11295 MONO_INST_NEW (cfg, ins, OP_RETHROW);
11296 ins->sreg1 = load->dreg;
11297 MONO_ADD_INS (bblock, ins);
11299 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11300 MONO_ADD_INS (bblock, ins);
11303 link_bblock (cfg, bblock, end_bblock);
11304 start_new_bblock = 1;
11312 GSHAREDVT_FAILURE (*ip);
11314 CHECK_STACK_OVF (1);
11316 token = read32 (ip + 2);
11317 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic && !generic_context) {
11318 MonoType *type = mono_type_create_from_typespec (image, token);
11319 val = mono_type_size (type, &ialign);
11321 MonoClass *klass = mono_class_get_full (image, token, generic_context);
11322 CHECK_TYPELOAD (klass);
11323 mono_class_init (klass);
11324 val = mono_type_size (&klass->byval_arg, &ialign);
11326 EMIT_NEW_ICONST (cfg, ins, val);
11331 case CEE_REFANYTYPE: {
11332 MonoInst *src_var, *src;
11334 GSHAREDVT_FAILURE (*ip);
11340 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11342 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11343 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11344 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
11349 case CEE_READONLY_:
11362 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
11372 g_warning ("opcode 0x%02x not handled", *ip);
11376 if (start_new_bblock != 1)
11379 bblock->cil_length = ip - bblock->cil_code;
11380 if (bblock->next_bb) {
11381 /* This could already be set because of inlining, #693905 */
11382 MonoBasicBlock *bb = bblock;
11384 while (bb->next_bb)
11386 bb->next_bb = end_bblock;
11388 bblock->next_bb = end_bblock;
11391 if (cfg->method == method && cfg->domainvar) {
11393 MonoInst *get_domain;
11395 cfg->cbb = init_localsbb;
11397 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
11398 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
11401 get_domain->dreg = alloc_preg (cfg);
11402 MONO_ADD_INS (cfg->cbb, get_domain);
11404 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
11405 MONO_ADD_INS (cfg->cbb, store);
11408 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
11409 if (cfg->compile_aot)
11410 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
11411 mono_get_got_var (cfg);
11414 if (cfg->method == method && cfg->got_var)
11415 mono_emit_load_got_addr (cfg);
11420 cfg->cbb = init_localsbb;
11422 for (i = 0; i < header->num_locals; ++i) {
11423 MonoType *ptype = header->locals [i];
11424 int t = ptype->type;
11425 dreg = cfg->locals [i]->dreg;
11427 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
11428 t = mono_class_enum_basetype (ptype->data.klass)->type;
11429 if (ptype->byref) {
11430 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
11431 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
11432 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
11433 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
11434 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
11435 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
11436 MONO_INST_NEW (cfg, ins, OP_R8CONST);
11437 ins->type = STACK_R8;
11438 ins->inst_p0 = (void*)&r8_0;
11439 ins->dreg = alloc_dreg (cfg, STACK_R8);
11440 MONO_ADD_INS (init_localsbb, ins);
11441 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
11442 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
11443 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
11444 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
11445 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, ptype)) {
11446 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
11448 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
11453 if (cfg->init_ref_vars && cfg->method == method) {
11454 /* Emit initialization for ref vars */
11455 // FIXME: Avoid duplication initialization for IL locals.
11456 for (i = 0; i < cfg->num_varinfo; ++i) {
11457 MonoInst *ins = cfg->varinfo [i];
11459 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
11460 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
11465 MonoBasicBlock *bb;
11468 * Make seq points at backward branch targets interruptable.
11470 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
11471 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
11472 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
11475 /* Add a sequence point for method entry/exit events */
11477 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
11478 MONO_ADD_INS (init_localsbb, ins);
11479 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
11480 MONO_ADD_INS (cfg->bb_exit, ins);
11484 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
11485 * the code they refer to was dead (#11880).
11487 if (sym_seq_points) {
11488 for (i = 0; i < header->code_size; ++i) {
11489 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
11492 NEW_SEQ_POINT (cfg, ins, i, FALSE);
11493 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
11500 if (cfg->method == method) {
11501 MonoBasicBlock *bb;
11502 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
11503 bb->region = mono_find_block_region (cfg, bb->real_offset);
11505 mono_create_spvar_for_region (cfg, bb->region);
11506 if (cfg->verbose_level > 2)
11507 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
11511 g_slist_free (class_inits);
11512 dont_inline = g_list_remove (dont_inline, method);
11514 if (inline_costs < 0) {
11517 /* Method is too large */
11518 mname = mono_method_full_name (method, TRUE);
11519 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
11520 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
11522 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
11523 mono_basic_block_free (original_bb);
11527 if ((cfg->verbose_level > 2) && (cfg->method == method))
11528 mono_print_code (cfg, "AFTER METHOD-TO-IR");
11530 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
11531 mono_basic_block_free (original_bb);
11532 return inline_costs;
11535 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
11542 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
11546 set_exception_type_from_invalid_il (cfg, method, ip);
11550 g_slist_free (class_inits);
11551 mono_basic_block_free (original_bb);
11552 dont_inline = g_list_remove (dont_inline, method);
11553 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
11558 store_membase_reg_to_store_membase_imm (int opcode)
11561 case OP_STORE_MEMBASE_REG:
11562 return OP_STORE_MEMBASE_IMM;
11563 case OP_STOREI1_MEMBASE_REG:
11564 return OP_STOREI1_MEMBASE_IMM;
11565 case OP_STOREI2_MEMBASE_REG:
11566 return OP_STOREI2_MEMBASE_IMM;
11567 case OP_STOREI4_MEMBASE_REG:
11568 return OP_STOREI4_MEMBASE_IMM;
11569 case OP_STOREI8_MEMBASE_REG:
11570 return OP_STOREI8_MEMBASE_IMM;
11572 g_assert_not_reached ();
11579 mono_op_to_op_imm (int opcode)
11583 return OP_IADD_IMM;
11585 return OP_ISUB_IMM;
11587 return OP_IDIV_IMM;
11589 return OP_IDIV_UN_IMM;
11591 return OP_IREM_IMM;
11593 return OP_IREM_UN_IMM;
11595 return OP_IMUL_IMM;
11597 return OP_IAND_IMM;
11601 return OP_IXOR_IMM;
11603 return OP_ISHL_IMM;
11605 return OP_ISHR_IMM;
11607 return OP_ISHR_UN_IMM;
11610 return OP_LADD_IMM;
11612 return OP_LSUB_IMM;
11614 return OP_LAND_IMM;
11618 return OP_LXOR_IMM;
11620 return OP_LSHL_IMM;
11622 return OP_LSHR_IMM;
11624 return OP_LSHR_UN_IMM;
11627 return OP_COMPARE_IMM;
11629 return OP_ICOMPARE_IMM;
11631 return OP_LCOMPARE_IMM;
11633 case OP_STORE_MEMBASE_REG:
11634 return OP_STORE_MEMBASE_IMM;
11635 case OP_STOREI1_MEMBASE_REG:
11636 return OP_STOREI1_MEMBASE_IMM;
11637 case OP_STOREI2_MEMBASE_REG:
11638 return OP_STOREI2_MEMBASE_IMM;
11639 case OP_STOREI4_MEMBASE_REG:
11640 return OP_STOREI4_MEMBASE_IMM;
11642 #if defined(TARGET_X86) || defined (TARGET_AMD64)
11644 return OP_X86_PUSH_IMM;
11645 case OP_X86_COMPARE_MEMBASE_REG:
11646 return OP_X86_COMPARE_MEMBASE_IMM;
11648 #if defined(TARGET_AMD64)
11649 case OP_AMD64_ICOMPARE_MEMBASE_REG:
11650 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
11652 case OP_VOIDCALL_REG:
11653 return OP_VOIDCALL;
11661 return OP_LOCALLOC_IMM;
11668 ldind_to_load_membase (int opcode)
11672 return OP_LOADI1_MEMBASE;
11674 return OP_LOADU1_MEMBASE;
11676 return OP_LOADI2_MEMBASE;
11678 return OP_LOADU2_MEMBASE;
11680 return OP_LOADI4_MEMBASE;
11682 return OP_LOADU4_MEMBASE;
11684 return OP_LOAD_MEMBASE;
11685 case CEE_LDIND_REF:
11686 return OP_LOAD_MEMBASE;
11688 return OP_LOADI8_MEMBASE;
11690 return OP_LOADR4_MEMBASE;
11692 return OP_LOADR8_MEMBASE;
11694 g_assert_not_reached ();
11701 stind_to_store_membase (int opcode)
11705 return OP_STOREI1_MEMBASE_REG;
11707 return OP_STOREI2_MEMBASE_REG;
11709 return OP_STOREI4_MEMBASE_REG;
11711 case CEE_STIND_REF:
11712 return OP_STORE_MEMBASE_REG;
11714 return OP_STOREI8_MEMBASE_REG;
11716 return OP_STORER4_MEMBASE_REG;
11718 return OP_STORER8_MEMBASE_REG;
11720 g_assert_not_reached ();
11727 mono_load_membase_to_load_mem (int opcode)
11729 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
11730 #if defined(TARGET_X86) || defined(TARGET_AMD64)
11732 case OP_LOAD_MEMBASE:
11733 return OP_LOAD_MEM;
11734 case OP_LOADU1_MEMBASE:
11735 return OP_LOADU1_MEM;
11736 case OP_LOADU2_MEMBASE:
11737 return OP_LOADU2_MEM;
11738 case OP_LOADI4_MEMBASE:
11739 return OP_LOADI4_MEM;
11740 case OP_LOADU4_MEMBASE:
11741 return OP_LOADU4_MEM;
11742 #if SIZEOF_REGISTER == 8
11743 case OP_LOADI8_MEMBASE:
11744 return OP_LOADI8_MEM;
11753 op_to_op_dest_membase (int store_opcode, int opcode)
11755 #if defined(TARGET_X86)
11756 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
11761 return OP_X86_ADD_MEMBASE_REG;
11763 return OP_X86_SUB_MEMBASE_REG;
11765 return OP_X86_AND_MEMBASE_REG;
11767 return OP_X86_OR_MEMBASE_REG;
11769 return OP_X86_XOR_MEMBASE_REG;
11772 return OP_X86_ADD_MEMBASE_IMM;
11775 return OP_X86_SUB_MEMBASE_IMM;
11778 return OP_X86_AND_MEMBASE_IMM;
11781 return OP_X86_OR_MEMBASE_IMM;
11784 return OP_X86_XOR_MEMBASE_IMM;
11790 #if defined(TARGET_AMD64)
11791 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
11796 return OP_X86_ADD_MEMBASE_REG;
11798 return OP_X86_SUB_MEMBASE_REG;
11800 return OP_X86_AND_MEMBASE_REG;
11802 return OP_X86_OR_MEMBASE_REG;
11804 return OP_X86_XOR_MEMBASE_REG;
11806 return OP_X86_ADD_MEMBASE_IMM;
11808 return OP_X86_SUB_MEMBASE_IMM;
11810 return OP_X86_AND_MEMBASE_IMM;
11812 return OP_X86_OR_MEMBASE_IMM;
11814 return OP_X86_XOR_MEMBASE_IMM;
11816 return OP_AMD64_ADD_MEMBASE_REG;
11818 return OP_AMD64_SUB_MEMBASE_REG;
11820 return OP_AMD64_AND_MEMBASE_REG;
11822 return OP_AMD64_OR_MEMBASE_REG;
11824 return OP_AMD64_XOR_MEMBASE_REG;
11827 return OP_AMD64_ADD_MEMBASE_IMM;
11830 return OP_AMD64_SUB_MEMBASE_IMM;
11833 return OP_AMD64_AND_MEMBASE_IMM;
11836 return OP_AMD64_OR_MEMBASE_IMM;
11839 return OP_AMD64_XOR_MEMBASE_IMM;
11849 op_to_op_store_membase (int store_opcode, int opcode)
11851 #if defined(TARGET_X86) || defined(TARGET_AMD64)
11854 if (store_opcode == OP_STOREI1_MEMBASE_REG)
11855 return OP_X86_SETEQ_MEMBASE;
11857 if (store_opcode == OP_STOREI1_MEMBASE_REG)
11858 return OP_X86_SETNE_MEMBASE;
11866 op_to_op_src1_membase (int load_opcode, int opcode)
11869 /* FIXME: This has sign extension issues */
11871 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
11872 return OP_X86_COMPARE_MEMBASE8_IMM;
11875 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
11880 return OP_X86_PUSH_MEMBASE;
11881 case OP_COMPARE_IMM:
11882 case OP_ICOMPARE_IMM:
11883 return OP_X86_COMPARE_MEMBASE_IMM;
11886 return OP_X86_COMPARE_MEMBASE_REG;
11890 #ifdef TARGET_AMD64
11891 /* FIXME: This has sign extension issues */
11893 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
11894 return OP_X86_COMPARE_MEMBASE8_IMM;
11899 #ifdef __mono_ilp32__
11900 if (load_opcode == OP_LOADI8_MEMBASE)
11902 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
11904 return OP_X86_PUSH_MEMBASE;
11906 /* FIXME: This only works for 32 bit immediates
11907 case OP_COMPARE_IMM:
11908 case OP_LCOMPARE_IMM:
11909 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
11910 return OP_AMD64_COMPARE_MEMBASE_IMM;
11912 case OP_ICOMPARE_IMM:
11913 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
11914 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
11918 #ifdef __mono_ilp32__
11919 if (load_opcode == OP_LOAD_MEMBASE)
11920 return OP_AMD64_ICOMPARE_MEMBASE_REG;
11921 if (load_opcode == OP_LOADI8_MEMBASE)
11923 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
11925 return OP_AMD64_COMPARE_MEMBASE_REG;
11928 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
11929 return OP_AMD64_ICOMPARE_MEMBASE_REG;
11938 op_to_op_src2_membase (int load_opcode, int opcode)
11941 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
11947 return OP_X86_COMPARE_REG_MEMBASE;
11949 return OP_X86_ADD_REG_MEMBASE;
11951 return OP_X86_SUB_REG_MEMBASE;
11953 return OP_X86_AND_REG_MEMBASE;
11955 return OP_X86_OR_REG_MEMBASE;
11957 return OP_X86_XOR_REG_MEMBASE;
11961 #ifdef TARGET_AMD64
11962 #ifdef __mono_ilp32__
11963 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
11965 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
11969 return OP_AMD64_ICOMPARE_REG_MEMBASE;
11971 return OP_X86_ADD_REG_MEMBASE;
11973 return OP_X86_SUB_REG_MEMBASE;
11975 return OP_X86_AND_REG_MEMBASE;
11977 return OP_X86_OR_REG_MEMBASE;
11979 return OP_X86_XOR_REG_MEMBASE;
11981 #ifdef __mono_ilp32__
11982 } else if (load_opcode == OP_LOADI8_MEMBASE) {
11984 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
11989 return OP_AMD64_COMPARE_REG_MEMBASE;
11991 return OP_AMD64_ADD_REG_MEMBASE;
11993 return OP_AMD64_SUB_REG_MEMBASE;
11995 return OP_AMD64_AND_REG_MEMBASE;
11997 return OP_AMD64_OR_REG_MEMBASE;
11999 return OP_AMD64_XOR_REG_MEMBASE;
12008 mono_op_to_op_imm_noemul (int opcode)
12011 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
12017 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
12024 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
12029 return mono_op_to_op_imm (opcode);
12034 * mono_handle_global_vregs:
12036 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
12040 mono_handle_global_vregs (MonoCompile *cfg)
12042 gint32 *vreg_to_bb;
12043 MonoBasicBlock *bb;
12046 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
12048 #ifdef MONO_ARCH_SIMD_INTRINSICS
12049 if (cfg->uses_simd_intrinsics)
12050 mono_simd_simplify_indirection (cfg);
12053 /* Find local vregs used in more than one bb */
12054 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12055 MonoInst *ins = bb->code;
12056 int block_num = bb->block_num;
12058 if (cfg->verbose_level > 2)
12059 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
12062 for (; ins; ins = ins->next) {
12063 const char *spec = INS_INFO (ins->opcode);
12064 int regtype = 0, regindex;
12067 if (G_UNLIKELY (cfg->verbose_level > 2))
12068 mono_print_ins (ins);
12070 g_assert (ins->opcode >= MONO_CEE_LAST);
12072 for (regindex = 0; regindex < 4; regindex ++) {
12075 if (regindex == 0) {
12076 regtype = spec [MONO_INST_DEST];
12077 if (regtype == ' ')
12080 } else if (regindex == 1) {
12081 regtype = spec [MONO_INST_SRC1];
12082 if (regtype == ' ')
12085 } else if (regindex == 2) {
12086 regtype = spec [MONO_INST_SRC2];
12087 if (regtype == ' ')
12090 } else if (regindex == 3) {
12091 regtype = spec [MONO_INST_SRC3];
12092 if (regtype == ' ')
12097 #if SIZEOF_REGISTER == 4
12098 /* In the LLVM case, the long opcodes are not decomposed */
12099 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
12101 * Since some instructions reference the original long vreg,
12102 * and some reference the two component vregs, it is quite hard
12103 * to determine when it needs to be global. So be conservative.
12105 if (!get_vreg_to_inst (cfg, vreg)) {
12106 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
12108 if (cfg->verbose_level > 2)
12109 printf ("LONG VREG R%d made global.\n", vreg);
12113 * Make the component vregs volatile since the optimizations can
12114 * get confused otherwise.
12116 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
12117 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
12121 g_assert (vreg != -1);
12123 prev_bb = vreg_to_bb [vreg];
12124 if (prev_bb == 0) {
12125 /* 0 is a valid block num */
12126 vreg_to_bb [vreg] = block_num + 1;
12127 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
12128 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
12131 if (!get_vreg_to_inst (cfg, vreg)) {
12132 if (G_UNLIKELY (cfg->verbose_level > 2))
12133 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
12137 if (vreg_is_ref (cfg, vreg))
12138 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
12140 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
12143 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
12146 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
12149 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
12152 g_assert_not_reached ();
12156 /* Flag as having been used in more than one bb */
12157 vreg_to_bb [vreg] = -1;
12163 /* If a variable is used in only one bblock, convert it into a local vreg */
12164 for (i = 0; i < cfg->num_varinfo; i++) {
12165 MonoInst *var = cfg->varinfo [i];
12166 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
12168 switch (var->type) {
12174 #if SIZEOF_REGISTER == 8
12177 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
12178 /* Enabling this screws up the fp stack on x86 */
12181 /* Arguments are implicitly global */
12182 /* Putting R4 vars into registers doesn't work currently */
12183 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
12185 * Make that the variable's liveness interval doesn't contain a call, since
12186 * that would cause the lvreg to be spilled, making the whole optimization
12189 /* This is too slow for JIT compilation */
12191 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
12193 int def_index, call_index, ins_index;
12194 gboolean spilled = FALSE;
12199 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
12200 const char *spec = INS_INFO (ins->opcode);
12202 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
12203 def_index = ins_index;
12205 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
12206 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
12207 if (call_index > def_index) {
12213 if (MONO_IS_CALL (ins))
12214 call_index = ins_index;
12224 if (G_UNLIKELY (cfg->verbose_level > 2))
12225 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
12226 var->flags |= MONO_INST_IS_DEAD;
12227 cfg->vreg_to_inst [var->dreg] = NULL;
12234 * Compress the varinfo and vars tables so the liveness computation is faster and
12235 * takes up less space.
12238 for (i = 0; i < cfg->num_varinfo; ++i) {
12239 MonoInst *var = cfg->varinfo [i];
12240 if (pos < i && cfg->locals_start == i)
12241 cfg->locals_start = pos;
12242 if (!(var->flags & MONO_INST_IS_DEAD)) {
12244 cfg->varinfo [pos] = cfg->varinfo [i];
12245 cfg->varinfo [pos]->inst_c0 = pos;
12246 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
12247 cfg->vars [pos].idx = pos;
12248 #if SIZEOF_REGISTER == 4
12249 if (cfg->varinfo [pos]->type == STACK_I8) {
12250 /* Modify the two component vars too */
12253 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
12254 var1->inst_c0 = pos;
12255 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
12256 var1->inst_c0 = pos;
12263 cfg->num_varinfo = pos;
12264 if (cfg->locals_start > cfg->num_varinfo)
12265 cfg->locals_start = cfg->num_varinfo;
12269 * mono_spill_global_vars:
12271 * Generate spill code for variables which are not allocated to registers,
12272 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
12273 * code is generated which could be optimized by the local optimization passes.
12276 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
12278 MonoBasicBlock *bb;
12280 int orig_next_vreg;
12281 guint32 *vreg_to_lvreg;
12283 guint32 i, lvregs_len;
12284 gboolean dest_has_lvreg = FALSE;
12285 guint32 stacktypes [128];
12286 MonoInst **live_range_start, **live_range_end;
12287 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
12289 *need_local_opts = FALSE;
12291 memset (spec2, 0, sizeof (spec2));
12293 /* FIXME: Move this function to mini.c */
12294 stacktypes ['i'] = STACK_PTR;
12295 stacktypes ['l'] = STACK_I8;
12296 stacktypes ['f'] = STACK_R8;
12297 #ifdef MONO_ARCH_SIMD_INTRINSICS
12298 stacktypes ['x'] = STACK_VTYPE;
12301 #if SIZEOF_REGISTER == 4
12302 /* Create MonoInsts for longs */
12303 for (i = 0; i < cfg->num_varinfo; i++) {
12304 MonoInst *ins = cfg->varinfo [i];
12306 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
12307 switch (ins->type) {
12312 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
12315 g_assert (ins->opcode == OP_REGOFFSET);
12317 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
12319 tree->opcode = OP_REGOFFSET;
12320 tree->inst_basereg = ins->inst_basereg;
12321 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
12323 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
12325 tree->opcode = OP_REGOFFSET;
12326 tree->inst_basereg = ins->inst_basereg;
12327 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
12337 if (cfg->compute_gc_maps) {
12338 /* registers need liveness info even for !non refs */
12339 for (i = 0; i < cfg->num_varinfo; i++) {
12340 MonoInst *ins = cfg->varinfo [i];
12342 if (ins->opcode == OP_REGVAR)
12343 ins->flags |= MONO_INST_GC_TRACK;
12347 /* FIXME: widening and truncation */
12350 * As an optimization, when a variable allocated to the stack is first loaded into
12351 * an lvreg, we will remember the lvreg and use it the next time instead of loading
12352 * the variable again.
12354 orig_next_vreg = cfg->next_vreg;
12355 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
12356 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
12360 * These arrays contain the first and last instructions accessing a given
12362 * Since we emit bblocks in the same order we process them here, and we
12363 * don't split live ranges, these will precisely describe the live range of
12364 * the variable, i.e. the instruction range where a valid value can be found
12365 * in the variables location.
12366 * The live range is computed using the liveness info computed by the liveness pass.
12367 * We can't use vmv->range, since that is an abstract live range, and we need
12368 * one which is instruction precise.
12369 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
12371 /* FIXME: Only do this if debugging info is requested */
12372 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
12373 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
12374 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
12375 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
12377 /* Add spill loads/stores */
12378 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12381 if (cfg->verbose_level > 2)
12382 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
12384 /* Clear vreg_to_lvreg array */
12385 for (i = 0; i < lvregs_len; i++)
12386 vreg_to_lvreg [lvregs [i]] = 0;
12390 MONO_BB_FOR_EACH_INS (bb, ins) {
12391 const char *spec = INS_INFO (ins->opcode);
12392 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
12393 gboolean store, no_lvreg;
12394 int sregs [MONO_MAX_SRC_REGS];
12396 if (G_UNLIKELY (cfg->verbose_level > 2))
12397 mono_print_ins (ins);
12399 if (ins->opcode == OP_NOP)
12403 * We handle LDADDR here as well, since it can only be decomposed
12404 * when variable addresses are known.
12406 if (ins->opcode == OP_LDADDR) {
12407 MonoInst *var = ins->inst_p0;
12409 if (var->opcode == OP_VTARG_ADDR) {
12410 /* Happens on SPARC/S390 where vtypes are passed by reference */
12411 MonoInst *vtaddr = var->inst_left;
12412 if (vtaddr->opcode == OP_REGVAR) {
12413 ins->opcode = OP_MOVE;
12414 ins->sreg1 = vtaddr->dreg;
12416 else if (var->inst_left->opcode == OP_REGOFFSET) {
12417 ins->opcode = OP_LOAD_MEMBASE;
12418 ins->inst_basereg = vtaddr->inst_basereg;
12419 ins->inst_offset = vtaddr->inst_offset;
12423 g_assert (var->opcode == OP_REGOFFSET);
12425 ins->opcode = OP_ADD_IMM;
12426 ins->sreg1 = var->inst_basereg;
12427 ins->inst_imm = var->inst_offset;
12430 *need_local_opts = TRUE;
12431 spec = INS_INFO (ins->opcode);
12434 if (ins->opcode < MONO_CEE_LAST) {
12435 mono_print_ins (ins);
12436 g_assert_not_reached ();
12440 * Store opcodes have destbasereg in the dreg, but in reality, it is an
12444 if (MONO_IS_STORE_MEMBASE (ins)) {
12445 tmp_reg = ins->dreg;
12446 ins->dreg = ins->sreg2;
12447 ins->sreg2 = tmp_reg;
12450 spec2 [MONO_INST_DEST] = ' ';
12451 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
12452 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
12453 spec2 [MONO_INST_SRC3] = ' ';
12455 } else if (MONO_IS_STORE_MEMINDEX (ins))
12456 g_assert_not_reached ();
12461 if (G_UNLIKELY (cfg->verbose_level > 2)) {
12462 printf ("\t %.3s %d", spec, ins->dreg);
12463 num_sregs = mono_inst_get_src_registers (ins, sregs);
12464 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
12465 printf (" %d", sregs [srcindex]);
12472 regtype = spec [MONO_INST_DEST];
12473 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
12476 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
12477 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
12478 MonoInst *store_ins;
12480 MonoInst *def_ins = ins;
12481 int dreg = ins->dreg; /* The original vreg */
12483 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
12485 if (var->opcode == OP_REGVAR) {
12486 ins->dreg = var->dreg;
12487 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
12489 * Instead of emitting a load+store, use a _membase opcode.
12491 g_assert (var->opcode == OP_REGOFFSET);
12492 if (ins->opcode == OP_MOVE) {
12496 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
12497 ins->inst_basereg = var->inst_basereg;
12498 ins->inst_offset = var->inst_offset;
12501 spec = INS_INFO (ins->opcode);
12505 g_assert (var->opcode == OP_REGOFFSET);
12507 prev_dreg = ins->dreg;
12509 /* Invalidate any previous lvreg for this vreg */
12510 vreg_to_lvreg [ins->dreg] = 0;
12514 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
12516 store_opcode = OP_STOREI8_MEMBASE_REG;
12519 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
12521 #if SIZEOF_REGISTER != 8
12522 if (regtype == 'l') {
12523 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
12524 mono_bblock_insert_after_ins (bb, ins, store_ins);
12525 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
12526 mono_bblock_insert_after_ins (bb, ins, store_ins);
12527 def_ins = store_ins;
12532 g_assert (store_opcode != OP_STOREV_MEMBASE);
12534 /* Try to fuse the store into the instruction itself */
12535 /* FIXME: Add more instructions */
12536 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
12537 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
12538 ins->inst_imm = ins->inst_c0;
12539 ins->inst_destbasereg = var->inst_basereg;
12540 ins->inst_offset = var->inst_offset;
12541 spec = INS_INFO (ins->opcode);
12542 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
12543 ins->opcode = store_opcode;
12544 ins->inst_destbasereg = var->inst_basereg;
12545 ins->inst_offset = var->inst_offset;
12549 tmp_reg = ins->dreg;
12550 ins->dreg = ins->sreg2;
12551 ins->sreg2 = tmp_reg;
12554 spec2 [MONO_INST_DEST] = ' ';
12555 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
12556 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
12557 spec2 [MONO_INST_SRC3] = ' ';
12559 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
12560 // FIXME: The backends expect the base reg to be in inst_basereg
12561 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
12563 ins->inst_basereg = var->inst_basereg;
12564 ins->inst_offset = var->inst_offset;
12565 spec = INS_INFO (ins->opcode);
12567 /* printf ("INS: "); mono_print_ins (ins); */
12568 /* Create a store instruction */
12569 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
12571 /* Insert it after the instruction */
12572 mono_bblock_insert_after_ins (bb, ins, store_ins);
12574 def_ins = store_ins;
12577 * We can't assign ins->dreg to var->dreg here, since the
12578 * sregs could use it. So set a flag, and do it after
12581 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
12582 dest_has_lvreg = TRUE;
12587 if (def_ins && !live_range_start [dreg]) {
12588 live_range_start [dreg] = def_ins;
12589 live_range_start_bb [dreg] = bb;
12592 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
12595 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
12596 tmp->inst_c1 = dreg;
12597 mono_bblock_insert_after_ins (bb, def_ins, tmp);
12604 num_sregs = mono_inst_get_src_registers (ins, sregs);
12605 for (srcindex = 0; srcindex < 3; ++srcindex) {
12606 regtype = spec [MONO_INST_SRC1 + srcindex];
12607 sreg = sregs [srcindex];
12609 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
12610 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
12611 MonoInst *var = get_vreg_to_inst (cfg, sreg);
12612 MonoInst *use_ins = ins;
12613 MonoInst *load_ins;
12614 guint32 load_opcode;
12616 if (var->opcode == OP_REGVAR) {
12617 sregs [srcindex] = var->dreg;
12618 //mono_inst_set_src_registers (ins, sregs);
12619 live_range_end [sreg] = use_ins;
12620 live_range_end_bb [sreg] = bb;
12622 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
12625 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
12626 /* var->dreg is a hreg */
12627 tmp->inst_c1 = sreg;
12628 mono_bblock_insert_after_ins (bb, ins, tmp);
12634 g_assert (var->opcode == OP_REGOFFSET);
12636 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
12638 g_assert (load_opcode != OP_LOADV_MEMBASE);
12640 if (vreg_to_lvreg [sreg]) {
12641 g_assert (vreg_to_lvreg [sreg] != -1);
12643 /* The variable is already loaded to an lvreg */
12644 if (G_UNLIKELY (cfg->verbose_level > 2))
12645 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
12646 sregs [srcindex] = vreg_to_lvreg [sreg];
12647 //mono_inst_set_src_registers (ins, sregs);
12651 /* Try to fuse the load into the instruction */
12652 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
12653 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
12654 sregs [0] = var->inst_basereg;
12655 //mono_inst_set_src_registers (ins, sregs);
12656 ins->inst_offset = var->inst_offset;
12657 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
12658 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
12659 sregs [1] = var->inst_basereg;
12660 //mono_inst_set_src_registers (ins, sregs);
12661 ins->inst_offset = var->inst_offset;
12663 if (MONO_IS_REAL_MOVE (ins)) {
12664 ins->opcode = OP_NOP;
12667 //printf ("%d ", srcindex); mono_print_ins (ins);
12669 sreg = alloc_dreg (cfg, stacktypes [regtype]);
12671 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
12672 if (var->dreg == prev_dreg) {
12674 * sreg refers to the value loaded by the load
12675 * emitted below, but we need to use ins->dreg
12676 * since it refers to the store emitted earlier.
12680 g_assert (sreg != -1);
12681 vreg_to_lvreg [var->dreg] = sreg;
12682 g_assert (lvregs_len < 1024);
12683 lvregs [lvregs_len ++] = var->dreg;
12687 sregs [srcindex] = sreg;
12688 //mono_inst_set_src_registers (ins, sregs);
12690 #if SIZEOF_REGISTER != 8
12691 if (regtype == 'l') {
12692 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
12693 mono_bblock_insert_before_ins (bb, ins, load_ins);
12694 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
12695 mono_bblock_insert_before_ins (bb, ins, load_ins);
12696 use_ins = load_ins;
12701 #if SIZEOF_REGISTER == 4
12702 g_assert (load_opcode != OP_LOADI8_MEMBASE);
12704 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
12705 mono_bblock_insert_before_ins (bb, ins, load_ins);
12706 use_ins = load_ins;
12710 if (var->dreg < orig_next_vreg) {
12711 live_range_end [var->dreg] = use_ins;
12712 live_range_end_bb [var->dreg] = bb;
12715 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
12718 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
12719 tmp->inst_c1 = var->dreg;
12720 mono_bblock_insert_after_ins (bb, ins, tmp);
12724 mono_inst_set_src_registers (ins, sregs);
12726 if (dest_has_lvreg) {
12727 g_assert (ins->dreg != -1);
12728 vreg_to_lvreg [prev_dreg] = ins->dreg;
12729 g_assert (lvregs_len < 1024);
12730 lvregs [lvregs_len ++] = prev_dreg;
12731 dest_has_lvreg = FALSE;
12735 tmp_reg = ins->dreg;
12736 ins->dreg = ins->sreg2;
12737 ins->sreg2 = tmp_reg;
12740 if (MONO_IS_CALL (ins)) {
12741 /* Clear vreg_to_lvreg array */
12742 for (i = 0; i < lvregs_len; i++)
12743 vreg_to_lvreg [lvregs [i]] = 0;
12745 } else if (ins->opcode == OP_NOP) {
12747 MONO_INST_NULLIFY_SREGS (ins);
12750 if (cfg->verbose_level > 2)
12751 mono_print_ins_index (1, ins);
12754 /* Extend the live range based on the liveness info */
12755 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
12756 for (i = 0; i < cfg->num_varinfo; i ++) {
12757 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
12759 if (vreg_is_volatile (cfg, vi->vreg))
12760 /* The liveness info is incomplete */
12763 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
12764 /* Live from at least the first ins of this bb */
12765 live_range_start [vi->vreg] = bb->code;
12766 live_range_start_bb [vi->vreg] = bb;
12769 if (mono_bitset_test_fast (bb->live_out_set, i)) {
12770 /* Live at least until the last ins of this bb */
12771 live_range_end [vi->vreg] = bb->last_ins;
12772 live_range_end_bb [vi->vreg] = bb;
12778 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
12780 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
12781 * by storing the current native offset into MonoMethodVar->live_range_start/end.
12783 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
12784 for (i = 0; i < cfg->num_varinfo; ++i) {
12785 int vreg = MONO_VARINFO (cfg, i)->vreg;
12788 if (live_range_start [vreg]) {
12789 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
12791 ins->inst_c1 = vreg;
12792 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
12794 if (live_range_end [vreg]) {
12795 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
12797 ins->inst_c1 = vreg;
12798 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
12799 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
12801 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
12807 g_free (live_range_start);
12808 g_free (live_range_end);
12809 g_free (live_range_start_bb);
12810 g_free (live_range_end_bb);
12815 * - use 'iadd' instead of 'int_add'
12816 * - handling ovf opcodes: decompose in method_to_ir.
12817 * - unify iregs/fregs
12818 * -> partly done, the missing parts are:
12819 * - a more complete unification would involve unifying the hregs as well, so
12820 * code wouldn't need if (fp) all over the place. but that would mean the hregs
12821 * would no longer map to the machine hregs, so the code generators would need to
12822 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
12823 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
12824 * fp/non-fp branches speeds it up by about 15%.
12825 * - use sext/zext opcodes instead of shifts
12827 * - get rid of TEMPLOADs if possible and use vregs instead
12828 * - clean up usage of OP_P/OP_ opcodes
12829 * - cleanup usage of DUMMY_USE
12830 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
12832 * - set the stack type and allocate a dreg in the EMIT_NEW macros
12833 * - get rid of all the <foo>2 stuff when the new JIT is ready.
12834 * - make sure handle_stack_args () is called before the branch is emitted
12835 * - when the new IR is done, get rid of all unused stuff
12836 * - COMPARE/BEQ as separate instructions or unify them ?
12837 * - keeping them separate allows specialized compare instructions like
12838 * compare_imm, compare_membase
12839 * - most back ends unify fp compare+branch, fp compare+ceq
12840 * - integrate mono_save_args into inline_method
12841 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
12842 * - handle long shift opts on 32 bit platforms somehow: they require
12843 * 3 sregs (2 for arg1 and 1 for arg2)
12844 * - make byref a 'normal' type.
12845 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
12846 * variable if needed.
12847 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
12848 * like inline_method.
12849 * - remove inlining restrictions
12850 * - fix LNEG and enable cfold of INEG
12851 * - generalize x86 optimizations like ldelema as a peephole optimization
12852 * - add store_mem_imm for amd64
12853 * - optimize the loading of the interruption flag in the managed->native wrappers
12854 * - avoid special handling of OP_NOP in passes
12855 * - move code inserting instructions into one function/macro.
12856 * - try a coalescing phase after liveness analysis
12857 * - add float -> vreg conversion + local optimizations on !x86
12858 * - figure out how to handle decomposed branches during optimizations, ie.
12859 * compare+branch, op_jump_table+op_br etc.
12860 * - promote RuntimeXHandles to vregs
12861 * - vtype cleanups:
12862 * - add a NEW_VARLOADA_VREG macro
12863 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
12864 * accessing vtype fields.
12865 * - get rid of I8CONST on 64 bit platforms
12866 * - dealing with the increase in code size due to branches created during opcode
12868 * - use extended basic blocks
12869 * - all parts of the JIT
12870 * - handle_global_vregs () && local regalloc
12871 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
12872 * - sources of increase in code size:
12875 * - isinst and castclass
12876 * - lvregs not allocated to global registers even if used multiple times
12877 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
12879 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
12880 * - add all micro optimizations from the old JIT
12881 * - put tree optimizations into the deadce pass
12882 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
12883 * specific function.
12884 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
12885 * fcompare + branchCC.
12886 * - create a helper function for allocating a stack slot, taking into account
12887 * MONO_CFG_HAS_SPILLUP.
12889 * - merge the ia64 switch changes.
12890 * - optimize mono_regstate2_alloc_int/float.
12891 * - fix the pessimistic handling of variables accessed in exception handler blocks.
12892 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
12893 * parts of the tree could be separated by other instructions, killing the tree
12894 * arguments, or stores killing loads etc. Also, should we fold loads into other
12895 * instructions if the result of the load is used multiple times ?
12896 * - make the REM_IMM optimization in mini-x86.c arch-independent.
12897 * - LAST MERGE: 108395.
12898 * - when returning vtypes in registers, generate IR and append it to the end of the
12899 * last bb instead of doing it in the epilog.
12900 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
12908 - When to decompose opcodes:
12909 - earlier: this makes some optimizations hard to implement, since the low level IR
12910 no longer contains the neccessary information. But it is easier to do.
12911 - later: harder to implement, enables more optimizations.
12912 - Branches inside bblocks:
12913 - created when decomposing complex opcodes.
12914 - branches to another bblock: harmless, but not tracked by the branch
12915 optimizations, so need to branch to a label at the start of the bblock.
12916 - branches to inside the same bblock: very problematic, trips up the local
12917 reg allocator. Can be fixed by spitting the current bblock, but that is a
12918 complex operation, since some local vregs can become global vregs etc.
12919 - Local/global vregs:
12920 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
12921 local register allocator.
12922 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
12923 structure, created by mono_create_var (). Assigned to hregs or the stack by
12924 the global register allocator.
12925 - When to do optimizations like alu->alu_imm:
12926 - earlier -> saves work later on since the IR will be smaller/simpler
12927 - later -> can work on more instructions
12928 - Handling of valuetypes:
12929 - When a vtype is pushed on the stack, a new temporary is created, an
12930 instruction computing its address (LDADDR) is emitted and pushed on
12931 the stack. Need to optimize cases when the vtype is used immediately as in
12932 argument passing, stloc etc.
12933 - Instead of the to_end stuff in the old JIT, simply call the function handling
12934 the values on the stack before emitting the last instruction of the bb.
12937 #endif /* DISABLE_JIT */