2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/assembly.h>
38 #include <mono/metadata/attrdefs.h>
39 #include <mono/metadata/loader.h>
40 #include <mono/metadata/tabledefs.h>
41 #include <mono/metadata/class.h>
42 #include <mono/metadata/object.h>
43 #include <mono/metadata/exception.h>
44 #include <mono/metadata/opcodes.h>
45 #include <mono/metadata/mono-endian.h>
46 #include <mono/metadata/tokentype.h>
47 #include <mono/metadata/tabledefs.h>
48 #include <mono/metadata/marshal.h>
49 #include <mono/metadata/debug-helpers.h>
50 #include <mono/metadata/mono-debug.h>
51 #include <mono/metadata/gc-internal.h>
52 #include <mono/metadata/security-manager.h>
53 #include <mono/metadata/threads-types.h>
54 #include <mono/metadata/security-core-clr.h>
55 #include <mono/metadata/monitor.h>
56 #include <mono/metadata/profiler-private.h>
57 #include <mono/metadata/profiler.h>
58 #include <mono/metadata/debug-mono-symfile.h>
59 #include <mono/utils/mono-compiler.h>
60 #include <mono/utils/mono-memory-model.h>
61 #include <mono/metadata/mono-basic-block.h>
68 #include "jit-icalls.h"
70 #include "debugger-agent.h"
72 #define BRANCH_COST 10
73 #define INLINE_LENGTH_LIMIT 20
74 #define INLINE_FAILURE(msg) do { \
75 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE)) { \
76 if (cfg->verbose_level >= 2) \
77 printf ("inline failed: %s\n", msg); \
78 goto inline_failure; \
81 #define CHECK_CFG_EXCEPTION do {\
82 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
85 #define METHOD_ACCESS_FAILURE do { \
86 char *method_fname = mono_method_full_name (method, TRUE); \
87 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
88 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS); \
89 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
90 g_free (method_fname); \
91 g_free (cil_method_fname); \
92 goto exception_exit; \
94 #define FIELD_ACCESS_FAILURE do { \
95 char *method_fname = mono_method_full_name (method, TRUE); \
96 char *field_fname = mono_field_full_name (field); \
97 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS); \
98 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
99 g_free (method_fname); \
100 g_free (field_fname); \
101 goto exception_exit; \
103 #define GENERIC_SHARING_FAILURE(opcode) do { \
104 if (cfg->generic_sharing_context) { \
105 if (cfg->verbose_level > 2) \
106 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
107 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
108 goto exception_exit; \
111 #define GSHAREDVT_FAILURE(opcode) do { \
112 if (cfg->gsharedvt) { \
113 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __FILE__, __LINE__); \
114 if (cfg->verbose_level >= 2) \
115 printf ("%s\n", cfg->exception_message); \
116 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
117 goto exception_exit; \
120 #define OUT_OF_MEMORY_FAILURE do { \
121 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
122 goto exception_exit; \
124 /* Determine whenever 'ins' represents a load of the 'this' argument */
125 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
127 static int ldind_to_load_membase (int opcode);
128 static int stind_to_store_membase (int opcode);
130 int mono_op_to_op_imm (int opcode);
131 int mono_op_to_op_imm_noemul (int opcode);
133 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
135 /* helper methods signatures */
136 static MonoMethodSignature *helper_sig_class_init_trampoline = NULL;
137 static MonoMethodSignature *helper_sig_domain_get = NULL;
138 static MonoMethodSignature *helper_sig_generic_class_init_trampoline = NULL;
139 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm = NULL;
140 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline = NULL;
141 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline = NULL;
142 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm = NULL;
145 * Instruction metadata
153 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
154 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
160 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
165 /* keep in sync with the enum in mini.h */
168 #include "mini-ops.h"
173 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
174 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
176 * This should contain the index of the last sreg + 1. This is not the same
177 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
179 const gint8 ins_sreg_counts[] = {
180 #include "mini-ops.h"
185 #define MONO_INIT_VARINFO(vi,id) do { \
186 (vi)->range.first_use.pos.bid = 0xffff; \
192 mono_inst_set_src_registers (MonoInst *ins, int *regs)
194 ins->sreg1 = regs [0];
195 ins->sreg2 = regs [1];
196 ins->sreg3 = regs [2];
200 mono_alloc_ireg (MonoCompile *cfg)
202 return alloc_ireg (cfg);
206 mono_alloc_freg (MonoCompile *cfg)
208 return alloc_freg (cfg);
212 mono_alloc_preg (MonoCompile *cfg)
214 return alloc_preg (cfg);
218 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
220 return alloc_dreg (cfg, stack_type);
224 * mono_alloc_ireg_ref:
226 * Allocate an IREG, and mark it as holding a GC ref.
229 mono_alloc_ireg_ref (MonoCompile *cfg)
231 return alloc_ireg_ref (cfg);
235 * mono_alloc_ireg_mp:
237 * Allocate an IREG, and mark it as holding a managed pointer.
240 mono_alloc_ireg_mp (MonoCompile *cfg)
242 return alloc_ireg_mp (cfg);
246 * mono_alloc_ireg_copy:
248 * Allocate an IREG with the same GC type as VREG.
251 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
253 if (vreg_is_ref (cfg, vreg))
254 return alloc_ireg_ref (cfg);
255 else if (vreg_is_mp (cfg, vreg))
256 return alloc_ireg_mp (cfg);
258 return alloc_ireg (cfg);
262 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
268 switch (type->type) {
271 case MONO_TYPE_BOOLEAN:
283 case MONO_TYPE_FNPTR:
285 case MONO_TYPE_CLASS:
286 case MONO_TYPE_STRING:
287 case MONO_TYPE_OBJECT:
288 case MONO_TYPE_SZARRAY:
289 case MONO_TYPE_ARRAY:
293 #if SIZEOF_REGISTER == 8
302 case MONO_TYPE_VALUETYPE:
303 if (type->data.klass->enumtype) {
304 type = mono_class_enum_basetype (type->data.klass);
307 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
310 case MONO_TYPE_TYPEDBYREF:
312 case MONO_TYPE_GENERICINST:
313 type = &type->data.generic_class->container_class->byval_arg;
317 g_assert (cfg->generic_sharing_context);
318 if (mini_type_var_is_vt (cfg, type))
323 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
329 mono_print_bb (MonoBasicBlock *bb, const char *msg)
334 printf ("\n%s %d: [IN: ", msg, bb->block_num);
335 for (i = 0; i < bb->in_count; ++i)
336 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
338 for (i = 0; i < bb->out_count; ++i)
339 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
341 for (tree = bb->code; tree; tree = tree->next)
342 mono_print_ins_index (-1, tree);
346 mono_create_helper_signatures (void)
348 helper_sig_domain_get = mono_create_icall_signature ("ptr");
349 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
350 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
351 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
352 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
353 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
354 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
358 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
359 * foo<T> (int i) { ldarg.0; box T; }
361 #define UNVERIFIED do { \
362 if (cfg->gsharedvt) { \
363 if (cfg->verbose_level > 2) \
364 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
365 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
366 goto exception_exit; \
368 if (mini_get_debug_options ()->break_on_unverified) \
374 #define LOAD_ERROR do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto load_error; } while (0)
376 #define TYPE_LOAD_ERROR(klass) do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else { cfg->exception_ptr = klass; goto load_error; } } while (0)
378 #define GET_BBLOCK(cfg,tblock,ip) do { \
379 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
381 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
382 NEW_BBLOCK (cfg, (tblock)); \
383 (tblock)->cil_code = (ip); \
384 ADD_BBLOCK (cfg, (tblock)); \
388 #if defined(TARGET_X86) || defined(TARGET_AMD64)
389 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
390 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
391 (dest)->dreg = alloc_ireg_mp ((cfg)); \
392 (dest)->sreg1 = (sr1); \
393 (dest)->sreg2 = (sr2); \
394 (dest)->inst_imm = (imm); \
395 (dest)->backend.shift_amount = (shift); \
396 MONO_ADD_INS ((cfg)->cbb, (dest)); \
400 #if SIZEOF_REGISTER == 8
401 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
402 /* FIXME: Need to add many more cases */ \
403 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
405 int dr = alloc_preg (cfg); \
406 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
407 (ins)->sreg2 = widen->dreg; \
411 #define ADD_WIDEN_OP(ins, arg1, arg2)
414 #define ADD_BINOP(op) do { \
415 MONO_INST_NEW (cfg, ins, (op)); \
417 ins->sreg1 = sp [0]->dreg; \
418 ins->sreg2 = sp [1]->dreg; \
419 type_from_op (ins, sp [0], sp [1]); \
421 /* Have to insert a widening op */ \
422 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
423 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
424 MONO_ADD_INS ((cfg)->cbb, (ins)); \
425 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
428 #define ADD_UNOP(op) do { \
429 MONO_INST_NEW (cfg, ins, (op)); \
431 ins->sreg1 = sp [0]->dreg; \
432 type_from_op (ins, sp [0], NULL); \
434 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
435 MONO_ADD_INS ((cfg)->cbb, (ins)); \
436 *sp++ = mono_decompose_opcode (cfg, ins); \
439 #define ADD_BINCOND(next_block) do { \
442 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
443 cmp->sreg1 = sp [0]->dreg; \
444 cmp->sreg2 = sp [1]->dreg; \
445 type_from_op (cmp, sp [0], sp [1]); \
447 type_from_op (ins, sp [0], sp [1]); \
448 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
449 GET_BBLOCK (cfg, tblock, target); \
450 link_bblock (cfg, bblock, tblock); \
451 ins->inst_true_bb = tblock; \
452 if ((next_block)) { \
453 link_bblock (cfg, bblock, (next_block)); \
454 ins->inst_false_bb = (next_block); \
455 start_new_bblock = 1; \
457 GET_BBLOCK (cfg, tblock, ip); \
458 link_bblock (cfg, bblock, tblock); \
459 ins->inst_false_bb = tblock; \
460 start_new_bblock = 2; \
462 if (sp != stack_start) { \
463 handle_stack_args (cfg, stack_start, sp - stack_start); \
464 CHECK_UNVERIFIABLE (cfg); \
466 MONO_ADD_INS (bblock, cmp); \
467 MONO_ADD_INS (bblock, ins); \
471 * link_bblock: Links two basic blocks
473 * links two basic blocks in the control flow graph, the 'from'
474 * argument is the starting block and the 'to' argument is the block
475 * the control flow ends to after 'from'.
478 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
480 MonoBasicBlock **newa;
484 if (from->cil_code) {
486 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
488 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
491 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
493 printf ("edge from entry to exit\n");
498 for (i = 0; i < from->out_count; ++i) {
499 if (to == from->out_bb [i]) {
505 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
506 for (i = 0; i < from->out_count; ++i) {
507 newa [i] = from->out_bb [i];
515 for (i = 0; i < to->in_count; ++i) {
516 if (from == to->in_bb [i]) {
522 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
523 for (i = 0; i < to->in_count; ++i) {
524 newa [i] = to->in_bb [i];
533 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
535 link_bblock (cfg, from, to);
539 * mono_find_block_region:
541 * We mark each basic block with a region ID. We use that to avoid BB
542 * optimizations when blocks are in different regions.
545 * A region token that encodes where this region is, and information
546 * about the clause owner for this block.
548 * The region encodes the try/catch/filter clause that owns this block
549 * as well as the type. -1 is a special value that represents a block
550 * that is in none of try/catch/filter.
553 mono_find_block_region (MonoCompile *cfg, int offset)
555 MonoMethodHeader *header = cfg->header;
556 MonoExceptionClause *clause;
559 for (i = 0; i < header->num_clauses; ++i) {
560 clause = &header->clauses [i];
561 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
562 (offset < (clause->handler_offset)))
563 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
565 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
566 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
567 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
568 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
569 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
571 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
574 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
575 return ((i + 1) << 8) | clause->flags;
582 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
584 MonoMethodHeader *header = cfg->header;
585 MonoExceptionClause *clause;
589 for (i = 0; i < header->num_clauses; ++i) {
590 clause = &header->clauses [i];
591 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
592 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
593 if (clause->flags == type)
594 res = g_list_append (res, clause);
601 mono_create_spvar_for_region (MonoCompile *cfg, int region)
605 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
609 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
610 /* prevent it from being register allocated */
611 var->flags |= MONO_INST_VOLATILE;
613 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
617 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
619 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
623 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
627 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
631 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
632 /* prevent it from being register allocated */
633 var->flags |= MONO_INST_VOLATILE;
635 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
641 * Returns the type used in the eval stack when @type is loaded.
642 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
645 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
649 inst->klass = klass = mono_class_from_mono_type (type);
651 inst->type = STACK_MP;
656 switch (type->type) {
658 inst->type = STACK_INV;
662 case MONO_TYPE_BOOLEAN:
668 inst->type = STACK_I4;
673 case MONO_TYPE_FNPTR:
674 inst->type = STACK_PTR;
676 case MONO_TYPE_CLASS:
677 case MONO_TYPE_STRING:
678 case MONO_TYPE_OBJECT:
679 case MONO_TYPE_SZARRAY:
680 case MONO_TYPE_ARRAY:
681 inst->type = STACK_OBJ;
685 inst->type = STACK_I8;
689 inst->type = STACK_R8;
691 case MONO_TYPE_VALUETYPE:
692 if (type->data.klass->enumtype) {
693 type = mono_class_enum_basetype (type->data.klass);
697 inst->type = STACK_VTYPE;
700 case MONO_TYPE_TYPEDBYREF:
701 inst->klass = mono_defaults.typed_reference_class;
702 inst->type = STACK_VTYPE;
704 case MONO_TYPE_GENERICINST:
705 type = &type->data.generic_class->container_class->byval_arg;
709 g_assert (cfg->generic_sharing_context);
710 if (mini_is_gsharedvt_type (cfg, type)) {
711 g_assert (cfg->gsharedvt);
712 inst->type = STACK_VTYPE;
714 inst->type = STACK_OBJ;
718 g_error ("unknown type 0x%02x in eval stack type", type->type);
723 * The following tables are used to quickly validate the IL code in type_from_op ().
726 bin_num_table [STACK_MAX] [STACK_MAX] = {
727 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
728 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
729 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
730 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
731 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
732 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
733 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
734 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
739 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
742 /* reduce the size of this table */
744 bin_int_table [STACK_MAX] [STACK_MAX] = {
745 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
746 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
747 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
748 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
749 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
750 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
751 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
752 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
756 bin_comp_table [STACK_MAX] [STACK_MAX] = {
757 /* Inv i L p F & O vt */
759 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
760 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
761 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
762 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
763 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
764 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
765 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
768 /* reduce the size of this table */
770 shift_table [STACK_MAX] [STACK_MAX] = {
771 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
772 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
773 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
774 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
775 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
776 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
777 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
778 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
782 * Tables to map from the non-specific opcode to the matching
783 * type-specific opcode.
785 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
787 binops_op_map [STACK_MAX] = {
788 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
791 /* handles from CEE_NEG to CEE_CONV_U8 */
793 unops_op_map [STACK_MAX] = {
794 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
797 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
799 ovfops_op_map [STACK_MAX] = {
800 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
803 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
805 ovf2ops_op_map [STACK_MAX] = {
806 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
809 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
811 ovf3ops_op_map [STACK_MAX] = {
812 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
815 /* handles from CEE_BEQ to CEE_BLT_UN */
817 beqops_op_map [STACK_MAX] = {
818 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
821 /* handles from CEE_CEQ to CEE_CLT_UN */
823 ceqops_op_map [STACK_MAX] = {
824 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
828 * Sets ins->type (the type on the eval stack) according to the
829 * type of the opcode and the arguments to it.
830 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
832 * FIXME: this function sets ins->type unconditionally in some cases, but
833 * it should set it to invalid for some types (a conv.x on an object)
836 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
838 switch (ins->opcode) {
845 /* FIXME: check unverifiable args for STACK_MP */
846 ins->type = bin_num_table [src1->type] [src2->type];
847 ins->opcode += binops_op_map [ins->type];
854 ins->type = bin_int_table [src1->type] [src2->type];
855 ins->opcode += binops_op_map [ins->type];
860 ins->type = shift_table [src1->type] [src2->type];
861 ins->opcode += binops_op_map [ins->type];
866 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
867 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
868 ins->opcode = OP_LCOMPARE;
869 else if (src1->type == STACK_R8)
870 ins->opcode = OP_FCOMPARE;
872 ins->opcode = OP_ICOMPARE;
874 case OP_ICOMPARE_IMM:
875 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
876 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
877 ins->opcode = OP_LCOMPARE_IMM;
889 ins->opcode += beqops_op_map [src1->type];
892 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
893 ins->opcode += ceqops_op_map [src1->type];
899 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
900 ins->opcode += ceqops_op_map [src1->type];
904 ins->type = neg_table [src1->type];
905 ins->opcode += unops_op_map [ins->type];
908 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
909 ins->type = src1->type;
911 ins->type = STACK_INV;
912 ins->opcode += unops_op_map [ins->type];
918 ins->type = STACK_I4;
919 ins->opcode += unops_op_map [src1->type];
922 ins->type = STACK_R8;
923 switch (src1->type) {
926 ins->opcode = OP_ICONV_TO_R_UN;
929 ins->opcode = OP_LCONV_TO_R_UN;
933 case CEE_CONV_OVF_I1:
934 case CEE_CONV_OVF_U1:
935 case CEE_CONV_OVF_I2:
936 case CEE_CONV_OVF_U2:
937 case CEE_CONV_OVF_I4:
938 case CEE_CONV_OVF_U4:
939 ins->type = STACK_I4;
940 ins->opcode += ovf3ops_op_map [src1->type];
942 case CEE_CONV_OVF_I_UN:
943 case CEE_CONV_OVF_U_UN:
944 ins->type = STACK_PTR;
945 ins->opcode += ovf2ops_op_map [src1->type];
947 case CEE_CONV_OVF_I1_UN:
948 case CEE_CONV_OVF_I2_UN:
949 case CEE_CONV_OVF_I4_UN:
950 case CEE_CONV_OVF_U1_UN:
951 case CEE_CONV_OVF_U2_UN:
952 case CEE_CONV_OVF_U4_UN:
953 ins->type = STACK_I4;
954 ins->opcode += ovf2ops_op_map [src1->type];
957 ins->type = STACK_PTR;
958 switch (src1->type) {
960 ins->opcode = OP_ICONV_TO_U;
964 #if SIZEOF_VOID_P == 8
965 ins->opcode = OP_LCONV_TO_U;
967 ins->opcode = OP_MOVE;
971 ins->opcode = OP_LCONV_TO_U;
974 ins->opcode = OP_FCONV_TO_U;
980 ins->type = STACK_I8;
981 ins->opcode += unops_op_map [src1->type];
983 case CEE_CONV_OVF_I8:
984 case CEE_CONV_OVF_U8:
985 ins->type = STACK_I8;
986 ins->opcode += ovf3ops_op_map [src1->type];
988 case CEE_CONV_OVF_U8_UN:
989 case CEE_CONV_OVF_I8_UN:
990 ins->type = STACK_I8;
991 ins->opcode += ovf2ops_op_map [src1->type];
995 ins->type = STACK_R8;
996 ins->opcode += unops_op_map [src1->type];
999 ins->type = STACK_R8;
1003 ins->type = STACK_I4;
1004 ins->opcode += ovfops_op_map [src1->type];
1007 case CEE_CONV_OVF_I:
1008 case CEE_CONV_OVF_U:
1009 ins->type = STACK_PTR;
1010 ins->opcode += ovfops_op_map [src1->type];
1013 case CEE_ADD_OVF_UN:
1015 case CEE_MUL_OVF_UN:
1017 case CEE_SUB_OVF_UN:
1018 ins->type = bin_num_table [src1->type] [src2->type];
1019 ins->opcode += ovfops_op_map [src1->type];
1020 if (ins->type == STACK_R8)
1021 ins->type = STACK_INV;
1023 case OP_LOAD_MEMBASE:
1024 ins->type = STACK_PTR;
1026 case OP_LOADI1_MEMBASE:
1027 case OP_LOADU1_MEMBASE:
1028 case OP_LOADI2_MEMBASE:
1029 case OP_LOADU2_MEMBASE:
1030 case OP_LOADI4_MEMBASE:
1031 case OP_LOADU4_MEMBASE:
1032 ins->type = STACK_PTR;
1034 case OP_LOADI8_MEMBASE:
1035 ins->type = STACK_I8;
1037 case OP_LOADR4_MEMBASE:
1038 case OP_LOADR8_MEMBASE:
1039 ins->type = STACK_R8;
1042 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1046 if (ins->type == STACK_MP)
1047 ins->klass = mono_defaults.object_class;
1052 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1058 param_table [STACK_MAX] [STACK_MAX] = {
1063 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1067 switch (args->type) {
1077 for (i = 0; i < sig->param_count; ++i) {
1078 switch (args [i].type) {
1082 if (!sig->params [i]->byref)
1086 if (sig->params [i]->byref)
1088 switch (sig->params [i]->type) {
1089 case MONO_TYPE_CLASS:
1090 case MONO_TYPE_STRING:
1091 case MONO_TYPE_OBJECT:
1092 case MONO_TYPE_SZARRAY:
1093 case MONO_TYPE_ARRAY:
1100 if (sig->params [i]->byref)
1102 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1111 /*if (!param_table [args [i].type] [sig->params [i]->type])
1119 * When we need a pointer to the current domain many times in a method, we
1120 * call mono_domain_get() once and we store the result in a local variable.
1121 * This function returns the variable that represents the MonoDomain*.
1123 inline static MonoInst *
1124 mono_get_domainvar (MonoCompile *cfg)
1126 if (!cfg->domainvar)
1127 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1128 return cfg->domainvar;
1132 * The got_var contains the address of the Global Offset Table when AOT
1136 mono_get_got_var (MonoCompile *cfg)
1138 #ifdef MONO_ARCH_NEED_GOT_VAR
1139 if (!cfg->compile_aot)
1141 if (!cfg->got_var) {
1142 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1144 return cfg->got_var;
1151 mono_get_vtable_var (MonoCompile *cfg)
1153 g_assert (cfg->generic_sharing_context);
1155 if (!cfg->rgctx_var) {
1156 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1157 /* force the var to be stack allocated */
1158 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1161 return cfg->rgctx_var;
1165 type_from_stack_type (MonoInst *ins) {
1166 switch (ins->type) {
1167 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1168 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1169 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1170 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1172 return &ins->klass->this_arg;
1173 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1174 case STACK_VTYPE: return &ins->klass->byval_arg;
1176 g_error ("stack type %d to monotype not handled\n", ins->type);
1181 static G_GNUC_UNUSED int
1182 type_to_stack_type (MonoType *t)
1184 t = mono_type_get_underlying_type (t);
1188 case MONO_TYPE_BOOLEAN:
1191 case MONO_TYPE_CHAR:
1198 case MONO_TYPE_FNPTR:
1200 case MONO_TYPE_CLASS:
1201 case MONO_TYPE_STRING:
1202 case MONO_TYPE_OBJECT:
1203 case MONO_TYPE_SZARRAY:
1204 case MONO_TYPE_ARRAY:
1212 case MONO_TYPE_VALUETYPE:
1213 case MONO_TYPE_TYPEDBYREF:
1215 case MONO_TYPE_GENERICINST:
1216 if (mono_type_generic_inst_is_valuetype (t))
1222 g_assert_not_reached ();
1229 array_access_to_klass (int opcode)
1233 return mono_defaults.byte_class;
1235 return mono_defaults.uint16_class;
1238 return mono_defaults.int_class;
1241 return mono_defaults.sbyte_class;
1244 return mono_defaults.int16_class;
1247 return mono_defaults.int32_class;
1249 return mono_defaults.uint32_class;
1252 return mono_defaults.int64_class;
1255 return mono_defaults.single_class;
1258 return mono_defaults.double_class;
1259 case CEE_LDELEM_REF:
1260 case CEE_STELEM_REF:
1261 return mono_defaults.object_class;
1263 g_assert_not_reached ();
1269 * We try to share variables when possible
1272 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1277 /* inlining can result in deeper stacks */
1278 if (slot >= cfg->header->max_stack)
1279 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1281 pos = ins->type - 1 + slot * STACK_MAX;
1283 switch (ins->type) {
1290 if ((vnum = cfg->intvars [pos]))
1291 return cfg->varinfo [vnum];
1292 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1293 cfg->intvars [pos] = res->inst_c0;
1296 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1302 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1305 * Don't use this if a generic_context is set, since that means AOT can't
1306 * look up the method using just the image+token.
1307 * table == 0 means this is a reference made from a wrapper.
1309 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1310 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1311 jump_info_token->image = image;
1312 jump_info_token->token = token;
1313 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1318 * This function is called to handle items that are left on the evaluation stack
1319 * at basic block boundaries. What happens is that we save the values to local variables
1320 * and we reload them later when first entering the target basic block (with the
1321 * handle_loaded_temps () function).
1322 * A single joint point will use the same variables (stored in the array bb->out_stack or
1323 * bb->in_stack, if the basic block is before or after the joint point).
1325 * This function needs to be called _before_ emitting the last instruction of
1326 * the bb (i.e. before emitting a branch).
1327 * If the stack merge fails at a join point, cfg->unverifiable is set.
1330 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1333 MonoBasicBlock *bb = cfg->cbb;
1334 MonoBasicBlock *outb;
1335 MonoInst *inst, **locals;
1340 if (cfg->verbose_level > 3)
1341 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1342 if (!bb->out_scount) {
1343 bb->out_scount = count;
1344 //printf ("bblock %d has out:", bb->block_num);
1346 for (i = 0; i < bb->out_count; ++i) {
1347 outb = bb->out_bb [i];
1348 /* exception handlers are linked, but they should not be considered for stack args */
1349 if (outb->flags & BB_EXCEPTION_HANDLER)
1351 //printf (" %d", outb->block_num);
1352 if (outb->in_stack) {
1354 bb->out_stack = outb->in_stack;
1360 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1361 for (i = 0; i < count; ++i) {
1363 * try to reuse temps already allocated for this purpouse, if they occupy the same
1364 * stack slot and if they are of the same type.
1365 * This won't cause conflicts since if 'local' is used to
1366 * store one of the values in the in_stack of a bblock, then
1367 * the same variable will be used for the same outgoing stack
1369 * This doesn't work when inlining methods, since the bblocks
1370 * in the inlined methods do not inherit their in_stack from
1371 * the bblock they are inlined to. See bug #58863 for an
1374 if (cfg->inlined_method)
1375 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1377 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1382 for (i = 0; i < bb->out_count; ++i) {
1383 outb = bb->out_bb [i];
1384 /* exception handlers are linked, but they should not be considered for stack args */
1385 if (outb->flags & BB_EXCEPTION_HANDLER)
1387 if (outb->in_scount) {
1388 if (outb->in_scount != bb->out_scount) {
1389 cfg->unverifiable = TRUE;
1392 continue; /* check they are the same locals */
1394 outb->in_scount = count;
1395 outb->in_stack = bb->out_stack;
1398 locals = bb->out_stack;
1400 for (i = 0; i < count; ++i) {
1401 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1402 inst->cil_code = sp [i]->cil_code;
1403 sp [i] = locals [i];
1404 if (cfg->verbose_level > 3)
1405 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1409 * It is possible that the out bblocks already have in_stack assigned, and
1410 * the in_stacks differ. In this case, we will store to all the different
1417 /* Find a bblock which has a different in_stack */
1419 while (bindex < bb->out_count) {
1420 outb = bb->out_bb [bindex];
1421 /* exception handlers are linked, but they should not be considered for stack args */
1422 if (outb->flags & BB_EXCEPTION_HANDLER) {
1426 if (outb->in_stack != locals) {
1427 for (i = 0; i < count; ++i) {
1428 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1429 inst->cil_code = sp [i]->cil_code;
1430 sp [i] = locals [i];
1431 if (cfg->verbose_level > 3)
1432 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1434 locals = outb->in_stack;
1443 /* Emit code which loads interface_offsets [klass->interface_id]
1444 * The array is stored in memory before vtable.
1447 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1449 if (cfg->compile_aot) {
1450 int ioffset_reg = alloc_preg (cfg);
1451 int iid_reg = alloc_preg (cfg);
1453 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1454 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1455 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1458 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1463 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1465 int ibitmap_reg = alloc_preg (cfg);
1466 #ifdef COMPRESSED_INTERFACE_BITMAP
1468 MonoInst *res, *ins;
1469 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1470 MONO_ADD_INS (cfg->cbb, ins);
1472 if (cfg->compile_aot)
1473 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1475 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1476 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1477 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1479 int ibitmap_byte_reg = alloc_preg (cfg);
1481 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1483 if (cfg->compile_aot) {
1484 int iid_reg = alloc_preg (cfg);
1485 int shifted_iid_reg = alloc_preg (cfg);
1486 int ibitmap_byte_address_reg = alloc_preg (cfg);
1487 int masked_iid_reg = alloc_preg (cfg);
1488 int iid_one_bit_reg = alloc_preg (cfg);
1489 int iid_bit_reg = alloc_preg (cfg);
1490 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1491 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1492 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1493 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1494 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1495 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1496 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1497 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1499 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1500 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1506 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1507 * stored in "klass_reg" implements the interface "klass".
1510 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1512 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1516 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1517 * stored in "vtable_reg" implements the interface "klass".
1520 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1522 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1526 * Emit code which checks whenever the interface id of @klass is smaller than
1527 * than the value given by max_iid_reg.
1530 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1531 MonoBasicBlock *false_target)
1533 if (cfg->compile_aot) {
1534 int iid_reg = alloc_preg (cfg);
1535 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1536 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1539 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1541 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1543 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1546 /* Same as above, but obtains max_iid from a vtable */
1548 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1549 MonoBasicBlock *false_target)
1551 int max_iid_reg = alloc_preg (cfg);
1553 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1554 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1557 /* Same as above, but obtains max_iid from a klass */
1559 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1560 MonoBasicBlock *false_target)
1562 int max_iid_reg = alloc_preg (cfg);
1564 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1565 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1569 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1571 int idepth_reg = alloc_preg (cfg);
1572 int stypes_reg = alloc_preg (cfg);
1573 int stype = alloc_preg (cfg);
1575 mono_class_setup_supertypes (klass);
1577 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1578 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1579 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1580 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1582 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1583 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1585 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1586 } else if (cfg->compile_aot) {
1587 int const_reg = alloc_preg (cfg);
1588 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1589 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1591 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1593 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1597 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1599 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1603 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1605 int intf_reg = alloc_preg (cfg);
1607 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1608 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1609 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1611 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1613 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1617 * Variant of the above that takes a register to the class, not the vtable.
1620 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1622 int intf_bit_reg = alloc_preg (cfg);
1624 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1625 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1626 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1628 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1630 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1634 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1637 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1638 } else if (cfg->compile_aot) {
1639 int const_reg = alloc_preg (cfg);
1640 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1641 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1643 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1645 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1649 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1651 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1655 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1657 if (cfg->compile_aot) {
1658 int const_reg = alloc_preg (cfg);
1659 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1660 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1662 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1664 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1668 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1671 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1674 int rank_reg = alloc_preg (cfg);
1675 int eclass_reg = alloc_preg (cfg);
1677 g_assert (!klass_inst);
1678 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1679 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1680 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1681 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1682 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1683 if (klass->cast_class == mono_defaults.object_class) {
1684 int parent_reg = alloc_preg (cfg);
1685 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1686 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1687 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1688 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1689 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1690 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1691 } else if (klass->cast_class == mono_defaults.enum_class) {
1692 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1693 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1694 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1696 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1697 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1700 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1701 /* Check that the object is a vector too */
1702 int bounds_reg = alloc_preg (cfg);
1703 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1704 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1705 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1708 int idepth_reg = alloc_preg (cfg);
1709 int stypes_reg = alloc_preg (cfg);
1710 int stype = alloc_preg (cfg);
1712 mono_class_setup_supertypes (klass);
1714 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1715 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1716 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1717 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1719 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1720 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1721 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1726 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1728 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1732 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1736 g_assert (val == 0);
1741 if ((size <= 4) && (size <= align)) {
1744 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1747 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1750 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1752 #if SIZEOF_REGISTER == 8
1754 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1760 val_reg = alloc_preg (cfg);
1762 if (SIZEOF_REGISTER == 8)
1763 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1765 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1768 /* This could be optimized further if neccesary */
1770 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1777 #if !NO_UNALIGNED_ACCESS
1778 if (SIZEOF_REGISTER == 8) {
1780 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1785 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1793 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1798 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1803 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1810 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1817 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1818 g_assert (size < 10000);
1821 /* This could be optimized further if neccesary */
1823 cur_reg = alloc_preg (cfg);
1824 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1825 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1832 #if !NO_UNALIGNED_ACCESS
1833 if (SIZEOF_REGISTER == 8) {
1835 cur_reg = alloc_preg (cfg);
1836 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1837 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1846 cur_reg = alloc_preg (cfg);
1847 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1848 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1854 cur_reg = alloc_preg (cfg);
1855 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1856 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1862 cur_reg = alloc_preg (cfg);
1863 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1864 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1872 emit_tls_set (MonoCompile *cfg, int sreg1, int tls_key)
1876 if (cfg->compile_aot) {
1877 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1878 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1880 ins->sreg2 = c->dreg;
1881 MONO_ADD_INS (cfg->cbb, ins);
1883 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1885 ins->inst_offset = mini_get_tls_offset (tls_key);
1886 MONO_ADD_INS (cfg->cbb, ins);
1893 * Emit IR to push the current LMF onto the LMF stack.
1896 emit_push_lmf (MonoCompile *cfg)
1899 * Emit IR to push the LMF:
1900 * lmf_addr = <lmf_addr from tls>
1901 * lmf->lmf_addr = lmf_addr
1902 * lmf->prev_lmf = *lmf_addr
1905 int lmf_reg, prev_lmf_reg;
1906 MonoInst *ins, *lmf_ins;
1911 if (cfg->lmf_ir_mono_lmf) {
1912 /* Load current lmf */
1913 lmf_ins = mono_get_lmf_intrinsic (cfg);
1915 MONO_ADD_INS (cfg->cbb, lmf_ins);
1916 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1917 lmf_reg = ins->dreg;
1918 /* Save previous_lmf */
1919 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
1921 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
1924 * Store lmf_addr in a variable, so it can be allocated to a global register.
1926 if (!cfg->lmf_addr_var)
1927 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1929 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
1931 MONO_ADD_INS (cfg->cbb, lmf_ins);
1933 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
1934 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
1936 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1937 lmf_reg = ins->dreg;
1939 prev_lmf_reg = alloc_preg (cfg);
1940 /* Save previous_lmf */
1941 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
1942 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
1944 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
1951 * Emit IR to pop the current LMF from the LMF stack.
1954 emit_pop_lmf (MonoCompile *cfg)
1956 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
1962 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1963 lmf_reg = ins->dreg;
1965 if (cfg->lmf_ir_mono_lmf) {
1966 /* Load previous_lmf */
1967 prev_lmf_reg = alloc_preg (cfg);
1968 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
1970 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
1973 * Emit IR to pop the LMF:
1974 * *(lmf->lmf_addr) = lmf->prev_lmf
1976 /* This could be called before emit_push_lmf () */
1977 if (!cfg->lmf_addr_var)
1978 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1979 lmf_addr_reg = cfg->lmf_addr_var->dreg;
1981 prev_lmf_reg = alloc_preg (cfg);
1982 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
1983 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
1988 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1991 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1994 type = mini_get_basic_type_from_generic (gsctx, type);
1995 switch (type->type) {
1996 case MONO_TYPE_VOID:
1997 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2000 case MONO_TYPE_BOOLEAN:
2003 case MONO_TYPE_CHAR:
2006 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2010 case MONO_TYPE_FNPTR:
2011 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2012 case MONO_TYPE_CLASS:
2013 case MONO_TYPE_STRING:
2014 case MONO_TYPE_OBJECT:
2015 case MONO_TYPE_SZARRAY:
2016 case MONO_TYPE_ARRAY:
2017 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2020 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2023 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2024 case MONO_TYPE_VALUETYPE:
2025 if (type->data.klass->enumtype) {
2026 type = mono_class_enum_basetype (type->data.klass);
2029 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2030 case MONO_TYPE_TYPEDBYREF:
2031 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2032 case MONO_TYPE_GENERICINST:
2033 type = &type->data.generic_class->container_class->byval_arg;
2036 case MONO_TYPE_MVAR:
2038 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2040 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2046 * target_type_is_incompatible:
2047 * @cfg: MonoCompile context
2049 * Check that the item @arg on the evaluation stack can be stored
2050 * in the target type (can be a local, or field, etc).
2051 * The cfg arg can be used to check if we need verification or just
2054 * Returns: non-0 value if arg can't be stored on a target.
2057 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2059 MonoType *simple_type;
2062 if (target->byref) {
2063 /* FIXME: check that the pointed to types match */
2064 if (arg->type == STACK_MP)
2065 return arg->klass != mono_class_from_mono_type (target);
2066 if (arg->type == STACK_PTR)
2071 simple_type = mono_type_get_underlying_type (target);
2072 switch (simple_type->type) {
2073 case MONO_TYPE_VOID:
2077 case MONO_TYPE_BOOLEAN:
2080 case MONO_TYPE_CHAR:
2083 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2087 /* STACK_MP is needed when setting pinned locals */
2088 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2093 case MONO_TYPE_FNPTR:
2095 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2096 * in native int. (#688008).
2098 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2101 case MONO_TYPE_CLASS:
2102 case MONO_TYPE_STRING:
2103 case MONO_TYPE_OBJECT:
2104 case MONO_TYPE_SZARRAY:
2105 case MONO_TYPE_ARRAY:
2106 if (arg->type != STACK_OBJ)
2108 /* FIXME: check type compatibility */
2112 if (arg->type != STACK_I8)
2117 if (arg->type != STACK_R8)
2120 case MONO_TYPE_VALUETYPE:
2121 if (arg->type != STACK_VTYPE)
2123 klass = mono_class_from_mono_type (simple_type);
2124 if (klass != arg->klass)
2127 case MONO_TYPE_TYPEDBYREF:
2128 if (arg->type != STACK_VTYPE)
2130 klass = mono_class_from_mono_type (simple_type);
2131 if (klass != arg->klass)
2134 case MONO_TYPE_GENERICINST:
2135 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2136 if (arg->type != STACK_VTYPE)
2138 klass = mono_class_from_mono_type (simple_type);
2139 if (klass != arg->klass)
2143 if (arg->type != STACK_OBJ)
2145 /* FIXME: check type compatibility */
2149 case MONO_TYPE_MVAR:
2150 g_assert (cfg->generic_sharing_context);
2151 if (mini_type_var_is_vt (cfg, simple_type)) {
2152 if (arg->type != STACK_VTYPE)
2155 if (arg->type != STACK_OBJ)
2160 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2166 * Prepare arguments for passing to a function call.
2167 * Return a non-zero value if the arguments can't be passed to the given
2169 * The type checks are not yet complete and some conversions may need
2170 * casts on 32 or 64 bit architectures.
2172 * FIXME: implement this using target_type_is_incompatible ()
2175 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2177 MonoType *simple_type;
2181 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2185 for (i = 0; i < sig->param_count; ++i) {
2186 if (sig->params [i]->byref) {
2187 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2191 simple_type = sig->params [i];
2192 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
2194 switch (simple_type->type) {
2195 case MONO_TYPE_VOID:
2200 case MONO_TYPE_BOOLEAN:
2203 case MONO_TYPE_CHAR:
2206 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2212 case MONO_TYPE_FNPTR:
2213 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2216 case MONO_TYPE_CLASS:
2217 case MONO_TYPE_STRING:
2218 case MONO_TYPE_OBJECT:
2219 case MONO_TYPE_SZARRAY:
2220 case MONO_TYPE_ARRAY:
2221 if (args [i]->type != STACK_OBJ)
2226 if (args [i]->type != STACK_I8)
2231 if (args [i]->type != STACK_R8)
2234 case MONO_TYPE_VALUETYPE:
2235 if (simple_type->data.klass->enumtype) {
2236 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2239 if (args [i]->type != STACK_VTYPE)
2242 case MONO_TYPE_TYPEDBYREF:
2243 if (args [i]->type != STACK_VTYPE)
2246 case MONO_TYPE_GENERICINST:
2247 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2250 case MONO_TYPE_MVAR:
2252 if (args [i]->type != STACK_VTYPE)
2256 g_error ("unknown type 0x%02x in check_call_signature",
2264 callvirt_to_call (int opcode)
2267 case OP_CALL_MEMBASE:
2269 case OP_VOIDCALL_MEMBASE:
2271 case OP_FCALL_MEMBASE:
2273 case OP_VCALL_MEMBASE:
2275 case OP_LCALL_MEMBASE:
2278 g_assert_not_reached ();
2284 #ifdef MONO_ARCH_HAVE_IMT
2285 /* Either METHOD or IMT_ARG needs to be set */
2287 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2291 if (COMPILE_LLVM (cfg)) {
2292 method_reg = alloc_preg (cfg);
2295 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2296 } else if (cfg->compile_aot) {
2297 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2300 MONO_INST_NEW (cfg, ins, OP_PCONST);
2301 ins->inst_p0 = method;
2302 ins->dreg = method_reg;
2303 MONO_ADD_INS (cfg->cbb, ins);
2307 call->imt_arg_reg = method_reg;
2309 #ifdef MONO_ARCH_IMT_REG
2310 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2312 /* Need this to keep the IMT arg alive */
2313 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2318 #ifdef MONO_ARCH_IMT_REG
2319 method_reg = alloc_preg (cfg);
2322 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2323 } else if (cfg->compile_aot) {
2324 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2327 MONO_INST_NEW (cfg, ins, OP_PCONST);
2328 ins->inst_p0 = method;
2329 ins->dreg = method_reg;
2330 MONO_ADD_INS (cfg->cbb, ins);
2333 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2335 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2340 static MonoJumpInfo *
2341 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2343 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2347 ji->data.target = target;
2353 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2355 if (cfg->generic_sharing_context)
2356 return mono_class_check_context_used (klass);
2362 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2364 if (cfg->generic_sharing_context)
2365 return mono_method_check_context_used (method);
2371 * check_method_sharing:
2373 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2376 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2378 gboolean pass_vtable = FALSE;
2379 gboolean pass_mrgctx = FALSE;
2381 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2382 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2383 gboolean sharable = FALSE;
2385 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2388 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2389 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
2390 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2392 sharable = sharing_enabled && context_sharable;
2396 * Pass vtable iff target method might
2397 * be shared, which means that sharing
2398 * is enabled for its class and its
2399 * context is sharable (and it's not a
2402 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2406 if (mini_method_get_context (cmethod) &&
2407 mini_method_get_context (cmethod)->method_inst) {
2408 g_assert (!pass_vtable);
2410 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2413 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2414 MonoGenericContext *context = mini_method_get_context (cmethod);
2415 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2417 if (sharing_enabled && context_sharable)
2419 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, mono_method_signature (cmethod)))
2424 if (out_pass_vtable)
2425 *out_pass_vtable = pass_vtable;
2426 if (out_pass_mrgctx)
2427 *out_pass_mrgctx = pass_mrgctx;
2430 inline static MonoCallInst *
2431 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2432 MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
2436 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2441 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2443 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2446 call->signature = sig;
2447 call->rgctx_reg = rgctx;
2450 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2453 if (mini_type_is_vtype (cfg, sig_ret)) {
2454 call->vret_var = cfg->vret_addr;
2455 //g_assert_not_reached ();
2457 } else if (mini_type_is_vtype (cfg, sig_ret)) {
2458 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2461 temp->backend.is_pinvoke = sig->pinvoke;
2464 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2465 * address of return value to increase optimization opportunities.
2466 * Before vtype decomposition, the dreg of the call ins itself represents the
2467 * fact the call modifies the return value. After decomposition, the call will
2468 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2469 * will be transformed into an LDADDR.
2471 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2472 loada->dreg = alloc_preg (cfg);
2473 loada->inst_p0 = temp;
2474 /* We reference the call too since call->dreg could change during optimization */
2475 loada->inst_p1 = call;
2476 MONO_ADD_INS (cfg->cbb, loada);
2478 call->inst.dreg = temp->dreg;
2480 call->vret_var = loada;
2481 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2482 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2484 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2485 if (COMPILE_SOFT_FLOAT (cfg)) {
2487 * If the call has a float argument, we would need to do an r8->r4 conversion using
2488 * an icall, but that cannot be done during the call sequence since it would clobber
2489 * the call registers + the stack. So we do it before emitting the call.
2491 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2493 MonoInst *in = call->args [i];
2495 if (i >= sig->hasthis)
2496 t = sig->params [i - sig->hasthis];
2498 t = &mono_defaults.int_class->byval_arg;
2499 t = mono_type_get_underlying_type (t);
2501 if (!t->byref && t->type == MONO_TYPE_R4) {
2502 MonoInst *iargs [1];
2506 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2508 /* The result will be in an int vreg */
2509 call->args [i] = conv;
2515 call->need_unbox_trampoline = unbox_trampoline;
2518 if (COMPILE_LLVM (cfg))
2519 mono_llvm_emit_call (cfg, call);
2521 mono_arch_emit_call (cfg, call);
2523 mono_arch_emit_call (cfg, call);
2526 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2527 cfg->flags |= MONO_CFG_HAS_CALLS;
2533 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2535 #ifdef MONO_ARCH_RGCTX_REG
2536 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2537 cfg->uses_rgctx_reg = TRUE;
2538 call->rgctx_reg = TRUE;
2540 call->rgctx_arg_reg = rgctx_reg;
2547 inline static MonoInst*
2548 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2554 rgctx_reg = mono_alloc_preg (cfg);
2555 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2558 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2560 call->inst.sreg1 = addr->dreg;
2563 emit_imt_argument (cfg, call, NULL, imt_arg);
2565 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2568 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2570 return (MonoInst*)call;
2574 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2577 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2579 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2582 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2583 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2585 #ifndef DISABLE_REMOTING
2586 gboolean might_be_remote = FALSE;
2588 gboolean virtual = this != NULL;
2589 gboolean enable_for_aot = TRUE;
2593 gboolean need_unbox_trampoline;
2596 sig = mono_method_signature (method);
2599 rgctx_reg = mono_alloc_preg (cfg);
2600 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2603 if (method->string_ctor) {
2604 /* Create the real signature */
2605 /* FIXME: Cache these */
2606 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2607 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2612 context_used = mini_method_check_context_used (cfg, method);
2614 #ifndef DISABLE_REMOTING
2615 might_be_remote = this && sig->hasthis &&
2616 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2617 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2619 if (might_be_remote && context_used) {
2622 g_assert (cfg->generic_sharing_context);
2624 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2626 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2630 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2632 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2634 #ifndef DISABLE_REMOTING
2635 if (might_be_remote)
2636 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2639 call->method = method;
2640 call->inst.flags |= MONO_INST_HAS_METHOD;
2641 call->inst.inst_left = this;
2642 call->tail_call = tail;
2645 int vtable_reg, slot_reg, this_reg;
2648 this_reg = this->dreg;
2650 if (ARCH_HAVE_DELEGATE_TRAMPOLINES && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2651 MonoInst *dummy_use;
2653 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2655 /* Make a call to delegate->invoke_impl */
2656 call->inst.inst_basereg = this_reg;
2657 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2658 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2660 /* We must emit a dummy use here because the delegate trampoline will
2661 replace the 'this' argument with the delegate target making this activation
2662 no longer a root for the delegate.
2663 This is an issue for delegates that target collectible code such as dynamic
2664 methods of GC'able assemblies.
2666 For a test case look into #667921.
2668 FIXME: a dummy use is not the best way to do it as the local register allocator
2669 will put it on a caller save register and spil it around the call.
2670 Ideally, we would either put it on a callee save register or only do the store part.
2672 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2674 return (MonoInst*)call;
2677 if ((!cfg->compile_aot || enable_for_aot) &&
2678 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2679 (MONO_METHOD_IS_FINAL (method) &&
2680 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2681 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2683 * the method is not virtual, we just need to ensure this is not null
2684 * and then we can call the method directly.
2686 #ifndef DISABLE_REMOTING
2687 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2689 * The check above ensures method is not gshared, this is needed since
2690 * gshared methods can't have wrappers.
2692 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2696 if (!method->string_ctor)
2697 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2699 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2700 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2702 * the method is virtual, but we can statically dispatch since either
2703 * it's class or the method itself are sealed.
2704 * But first we need to ensure it's not a null reference.
2706 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2708 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2710 vtable_reg = alloc_preg (cfg);
2711 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2712 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2714 #ifdef MONO_ARCH_HAVE_IMT
2716 guint32 imt_slot = mono_method_get_imt_slot (method);
2717 emit_imt_argument (cfg, call, call->method, imt_arg);
2718 slot_reg = vtable_reg;
2719 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2722 if (slot_reg == -1) {
2723 slot_reg = alloc_preg (cfg);
2724 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2725 offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2728 slot_reg = vtable_reg;
2729 offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2730 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2731 #ifdef MONO_ARCH_HAVE_IMT
2733 g_assert (mono_method_signature (method)->generic_param_count);
2734 emit_imt_argument (cfg, call, call->method, imt_arg);
2739 call->inst.sreg1 = slot_reg;
2740 call->inst.inst_offset = offset;
2741 call->virtual = TRUE;
2745 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2748 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2750 return (MonoInst*)call;
2754 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2756 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this, NULL, NULL);
2760 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2767 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2770 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2772 return (MonoInst*)call;
2776 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2778 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2782 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2786 * mono_emit_abs_call:
2788 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2790 inline static MonoInst*
2791 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2792 MonoMethodSignature *sig, MonoInst **args)
2794 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2798 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2801 if (cfg->abs_patches == NULL)
2802 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2803 g_hash_table_insert (cfg->abs_patches, ji, ji);
2804 ins = mono_emit_native_call (cfg, ji, sig, args);
2805 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2810 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2812 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2813 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2817 * Native code might return non register sized integers
2818 * without initializing the upper bits.
2820 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2821 case OP_LOADI1_MEMBASE:
2822 widen_op = OP_ICONV_TO_I1;
2824 case OP_LOADU1_MEMBASE:
2825 widen_op = OP_ICONV_TO_U1;
2827 case OP_LOADI2_MEMBASE:
2828 widen_op = OP_ICONV_TO_I2;
2830 case OP_LOADU2_MEMBASE:
2831 widen_op = OP_ICONV_TO_U2;
2837 if (widen_op != -1) {
2838 int dreg = alloc_preg (cfg);
2841 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2842 widen->type = ins->type;
2852 get_memcpy_method (void)
2854 static MonoMethod *memcpy_method = NULL;
2855 if (!memcpy_method) {
2856 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2858 g_error ("Old corlib found. Install a new one");
2860 return memcpy_method;
2864 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
2866 MonoClassField *field;
2867 gpointer iter = NULL;
2869 while ((field = mono_class_get_fields (klass, &iter))) {
2872 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2874 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2875 if (mini_type_is_reference (cfg, mono_field_get_type (field))) {
2876 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2877 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2879 MonoClass *field_class = mono_class_from_mono_type (field->type);
2880 if (field_class->has_references)
2881 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
2887 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
2889 int card_table_shift_bits;
2890 gpointer card_table_mask;
2892 MonoInst *dummy_use;
2893 int nursery_shift_bits;
2894 size_t nursery_size;
2895 gboolean has_card_table_wb = FALSE;
2897 if (!cfg->gen_write_barriers)
2900 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2902 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2904 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
2905 has_card_table_wb = TRUE;
2908 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
2911 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2912 wbarrier->sreg1 = ptr->dreg;
2913 wbarrier->sreg2 = value->dreg;
2914 MONO_ADD_INS (cfg->cbb, wbarrier);
2915 } else if (card_table) {
2916 int offset_reg = alloc_preg (cfg);
2917 int card_reg = alloc_preg (cfg);
2920 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2921 if (card_table_mask)
2922 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
2924 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2925 * IMM's larger than 32bits.
2927 if (cfg->compile_aot) {
2928 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
2930 MONO_INST_NEW (cfg, ins, OP_PCONST);
2931 ins->inst_p0 = card_table;
2932 ins->dreg = card_reg;
2933 MONO_ADD_INS (cfg->cbb, ins);
2936 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
2937 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
2939 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2940 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
2943 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
2947 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
2949 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
2950 unsigned need_wb = 0;
2955 /*types with references can't have alignment smaller than sizeof(void*) */
2956 if (align < SIZEOF_VOID_P)
2959 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
2960 if (size > 32 * SIZEOF_VOID_P)
2963 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
2965 /* We don't unroll more than 5 stores to avoid code bloat. */
2966 if (size > 5 * SIZEOF_VOID_P) {
2967 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
2968 size += (SIZEOF_VOID_P - 1);
2969 size &= ~(SIZEOF_VOID_P - 1);
2971 EMIT_NEW_ICONST (cfg, iargs [2], size);
2972 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
2973 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
2977 destreg = iargs [0]->dreg;
2978 srcreg = iargs [1]->dreg;
2981 dest_ptr_reg = alloc_preg (cfg);
2982 tmp_reg = alloc_preg (cfg);
2985 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
2987 while (size >= SIZEOF_VOID_P) {
2988 MonoInst *load_inst;
2989 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
2990 load_inst->dreg = tmp_reg;
2991 load_inst->inst_basereg = srcreg;
2992 load_inst->inst_offset = offset;
2993 MONO_ADD_INS (cfg->cbb, load_inst);
2995 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
2998 emit_write_barrier (cfg, iargs [0], load_inst);
3000 offset += SIZEOF_VOID_P;
3001 size -= SIZEOF_VOID_P;
3004 /*tmp += sizeof (void*)*/
3005 if (size >= SIZEOF_VOID_P) {
3006 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3007 MONO_ADD_INS (cfg->cbb, iargs [0]);
3011 /* Those cannot be references since size < sizeof (void*) */
3013 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3014 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3020 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3021 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3027 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3028 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3037 * Emit code to copy a valuetype of type @klass whose address is stored in
3038 * @src->dreg to memory whose address is stored at @dest->dreg.
3041 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3043 MonoInst *iargs [4];
3044 int context_used, n;
3046 MonoMethod *memcpy_method;
3047 MonoInst *size_ins = NULL;
3048 MonoInst *memcpy_ins = NULL;
3052 * This check breaks with spilled vars... need to handle it during verification anyway.
3053 * g_assert (klass && klass == src->klass && klass == dest->klass);
3056 if (mini_is_gsharedvt_klass (cfg, klass)) {
3058 context_used = mini_class_check_context_used (cfg, klass);
3059 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3060 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3064 n = mono_class_native_size (klass, &align);
3066 n = mono_class_value_size (klass, &align);
3068 /* if native is true there should be no references in the struct */
3069 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3070 /* Avoid barriers when storing to the stack */
3071 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3072 (dest->opcode == OP_LDADDR))) {
3078 context_used = mini_class_check_context_used (cfg, klass);
3080 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3081 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3083 } else if (context_used) {
3084 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3086 if (cfg->compile_aot) {
3087 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
3089 EMIT_NEW_PCONST (cfg, iargs [2], klass);
3090 mono_class_compute_gc_descriptor (klass);
3095 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3097 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3102 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
3103 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3104 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3109 iargs [2] = size_ins;
3111 EMIT_NEW_ICONST (cfg, iargs [2], n);
3113 memcpy_method = get_memcpy_method ();
3115 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3117 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3122 get_memset_method (void)
3124 static MonoMethod *memset_method = NULL;
3125 if (!memset_method) {
3126 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3128 g_error ("Old corlib found. Install a new one");
3130 return memset_method;
3134 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3136 MonoInst *iargs [3];
3137 int n, context_used;
3139 MonoMethod *memset_method;
3140 MonoInst *size_ins = NULL;
3141 MonoInst *bzero_ins = NULL;
3142 static MonoMethod *bzero_method;
3144 /* FIXME: Optimize this for the case when dest is an LDADDR */
3146 mono_class_init (klass);
3147 if (mini_is_gsharedvt_klass (cfg, klass)) {
3148 context_used = mini_class_check_context_used (cfg, klass);
3149 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3150 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3152 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3153 g_assert (bzero_method);
3155 iargs [1] = size_ins;
3156 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3160 n = mono_class_value_size (klass, &align);
3162 if (n <= sizeof (gpointer) * 5) {
3163 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3166 memset_method = get_memset_method ();
3168 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3169 EMIT_NEW_ICONST (cfg, iargs [2], n);
3170 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3175 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3177 MonoInst *this = NULL;
3179 g_assert (cfg->generic_sharing_context);
3181 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3182 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3183 !method->klass->valuetype)
3184 EMIT_NEW_ARGLOAD (cfg, this, 0);
3186 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3187 MonoInst *mrgctx_loc, *mrgctx_var;
3190 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3192 mrgctx_loc = mono_get_vtable_var (cfg);
3193 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3196 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3197 MonoInst *vtable_loc, *vtable_var;
3201 vtable_loc = mono_get_vtable_var (cfg);
3202 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3204 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3205 MonoInst *mrgctx_var = vtable_var;
3208 vtable_reg = alloc_preg (cfg);
3209 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3210 vtable_var->type = STACK_PTR;
3218 vtable_reg = alloc_preg (cfg);
3219 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3224 static MonoJumpInfoRgctxEntry *
3225 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3227 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3228 res->method = method;
3229 res->in_mrgctx = in_mrgctx;
3230 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3231 res->data->type = patch_type;
3232 res->data->data.target = patch_data;
3233 res->info_type = info_type;
3238 static inline MonoInst*
3239 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3241 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3245 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3246 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3248 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3249 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3251 return emit_rgctx_fetch (cfg, rgctx, entry);
3255 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3256 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3258 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3259 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3261 return emit_rgctx_fetch (cfg, rgctx, entry);
3265 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3266 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3268 MonoJumpInfoGSharedVtCall *call_info;
3269 MonoJumpInfoRgctxEntry *entry;
3272 call_info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3273 call_info->sig = sig;
3274 call_info->method = cmethod;
3276 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3277 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3279 return emit_rgctx_fetch (cfg, rgctx, entry);
3284 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3285 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3287 MonoJumpInfoRgctxEntry *entry;
3290 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3291 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3293 return emit_rgctx_fetch (cfg, rgctx, entry);
3297 * emit_get_rgctx_method:
3299 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3300 * normal constants, else emit a load from the rgctx.
3303 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3304 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3306 if (!context_used) {
3309 switch (rgctx_type) {
3310 case MONO_RGCTX_INFO_METHOD:
3311 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3313 case MONO_RGCTX_INFO_METHOD_RGCTX:
3314 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3317 g_assert_not_reached ();
3320 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3321 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3323 return emit_rgctx_fetch (cfg, rgctx, entry);
3328 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3329 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3331 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3332 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3334 return emit_rgctx_fetch (cfg, rgctx, entry);
3338 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3340 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3341 MonoRuntimeGenericContextInfoTemplate *template;
3346 for (i = 0; i < info->entries->len; ++i) {
3347 MonoRuntimeGenericContextInfoTemplate *otemplate = g_ptr_array_index (info->entries, i);
3349 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3353 template = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate));
3354 template->info_type = rgctx_type;
3355 template->data = data;
3357 idx = info->entries->len;
3359 g_ptr_array_add (info->entries, template);
3365 * emit_get_gsharedvt_info:
3367 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3370 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3375 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3376 /* Load info->entries [idx] */
3377 dreg = alloc_preg (cfg);
3378 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3384 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3386 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3390 * On return the caller must check @klass for load errors.
3393 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3395 MonoInst *vtable_arg;
3399 context_used = mini_class_check_context_used (cfg, klass);
3402 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3403 klass, MONO_RGCTX_INFO_VTABLE);
3405 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3409 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3412 if (COMPILE_LLVM (cfg))
3413 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3415 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3416 #ifdef MONO_ARCH_VTABLE_REG
3417 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3418 cfg->uses_vtable_reg = TRUE;
3425 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3429 if (cfg->gen_seq_points && cfg->method == method) {
3430 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3432 ins->flags |= MONO_INST_NONEMPTY_STACK;
3433 MONO_ADD_INS (cfg->cbb, ins);
3438 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check, MonoBasicBlock **out_bblock)
3440 if (mini_get_debug_options ()->better_cast_details) {
3441 int to_klass_reg = alloc_preg (cfg);
3442 int vtable_reg = alloc_preg (cfg);
3443 int klass_reg = alloc_preg (cfg);
3444 MonoBasicBlock *is_null_bb = NULL;
3448 NEW_BBLOCK (cfg, is_null_bb);
3450 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3451 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3454 tls_get = mono_get_jit_tls_intrinsic (cfg);
3456 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3460 MONO_ADD_INS (cfg->cbb, tls_get);
3461 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3462 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3464 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3465 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3466 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3469 MONO_START_BB (cfg, is_null_bb);
3471 *out_bblock = cfg->cbb;
3477 reset_cast_details (MonoCompile *cfg)
3479 /* Reset the variables holding the cast details */
3480 if (mini_get_debug_options ()->better_cast_details) {
3481 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3483 MONO_ADD_INS (cfg->cbb, tls_get);
3484 /* It is enough to reset the from field */
3485 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3490 * On return the caller must check @array_class for load errors
3493 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3495 int vtable_reg = alloc_preg (cfg);
3498 context_used = mini_class_check_context_used (cfg, array_class);
3500 save_cast_details (cfg, array_class, obj->dreg, FALSE, NULL);
3502 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3504 if (cfg->opt & MONO_OPT_SHARED) {
3505 int class_reg = alloc_preg (cfg);
3506 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3507 if (cfg->compile_aot) {
3508 int klass_reg = alloc_preg (cfg);
3509 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3510 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3512 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3514 } else if (context_used) {
3515 MonoInst *vtable_ins;
3517 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3518 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3520 if (cfg->compile_aot) {
3524 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3526 vt_reg = alloc_preg (cfg);
3527 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3528 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3531 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3533 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3537 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3539 reset_cast_details (cfg);
3543 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3544 * generic code is generated.
3547 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3549 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3552 MonoInst *rgctx, *addr;
3554 /* FIXME: What if the class is shared? We might not
3555 have to get the address of the method from the
3557 addr = emit_get_rgctx_method (cfg, context_used, method,
3558 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3560 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3562 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3564 gboolean pass_vtable, pass_mrgctx;
3565 MonoInst *rgctx_arg = NULL;
3567 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3568 g_assert (!pass_mrgctx);
3571 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3574 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3577 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3582 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3586 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3587 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3588 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3589 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3591 obj_reg = sp [0]->dreg;
3592 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3593 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3595 /* FIXME: generics */
3596 g_assert (klass->rank == 0);
3599 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3600 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3602 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3603 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3606 MonoInst *element_class;
3608 /* This assertion is from the unboxcast insn */
3609 g_assert (klass->rank == 0);
3611 element_class = emit_get_rgctx_klass (cfg, context_used,
3612 klass->element_class, MONO_RGCTX_INFO_KLASS);
3614 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3615 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3617 save_cast_details (cfg, klass->element_class, obj_reg, FALSE, NULL);
3618 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3619 reset_cast_details (cfg);
3622 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3623 MONO_ADD_INS (cfg->cbb, add);
3624 add->type = STACK_MP;
3631 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj, MonoBasicBlock **out_cbb)
3633 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3634 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3638 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3644 args [1] = klass_inst;
3647 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3649 NEW_BBLOCK (cfg, is_ref_bb);
3650 NEW_BBLOCK (cfg, is_nullable_bb);
3651 NEW_BBLOCK (cfg, end_bb);
3652 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3653 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3654 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3656 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3657 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3659 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3660 addr_reg = alloc_dreg (cfg, STACK_MP);
3664 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3665 MONO_ADD_INS (cfg->cbb, addr);
3667 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3670 MONO_START_BB (cfg, is_ref_bb);
3672 /* Save the ref to a temporary */
3673 dreg = alloc_ireg (cfg);
3674 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3675 addr->dreg = addr_reg;
3676 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3677 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3680 MONO_START_BB (cfg, is_nullable_bb);
3683 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3684 MonoInst *unbox_call;
3685 MonoMethodSignature *unbox_sig;
3688 var = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
3690 unbox_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3691 unbox_sig->ret = &klass->byval_arg;
3692 unbox_sig->param_count = 1;
3693 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3694 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3696 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3697 addr->dreg = addr_reg;
3700 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3703 MONO_START_BB (cfg, end_bb);
3706 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3708 *out_cbb = cfg->cbb;
3714 * Returns NULL and set the cfg exception on error.
3717 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3719 MonoInst *iargs [2];
3725 MonoInst *iargs [2];
3727 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box);
3729 if (cfg->opt & MONO_OPT_SHARED)
3730 rgctx_info = MONO_RGCTX_INFO_KLASS;
3732 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3733 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3735 if (cfg->opt & MONO_OPT_SHARED) {
3736 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3738 alloc_ftn = mono_object_new;
3741 alloc_ftn = mono_object_new_specific;
3744 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED))
3745 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3747 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3750 if (cfg->opt & MONO_OPT_SHARED) {
3751 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3752 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3754 alloc_ftn = mono_object_new;
3755 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3756 /* This happens often in argument checking code, eg. throw new FooException... */
3757 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3758 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3759 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3761 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3762 MonoMethod *managed_alloc = NULL;
3766 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3767 cfg->exception_ptr = klass;
3771 #ifndef MONO_CROSS_COMPILE
3772 managed_alloc = mono_gc_get_managed_allocator (klass, for_box);
3775 if (managed_alloc) {
3776 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3777 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3779 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3781 guint32 lw = vtable->klass->instance_size;
3782 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3783 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3784 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3787 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3791 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3795 * Returns NULL and set the cfg exception on error.
3798 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoBasicBlock **out_cbb)
3800 MonoInst *alloc, *ins;
3802 *out_cbb = cfg->cbb;
3804 if (mono_class_is_nullable (klass)) {
3805 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3808 /* FIXME: What if the class is shared? We might not
3809 have to get the method address from the RGCTX. */
3810 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3811 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3812 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3814 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3816 gboolean pass_vtable, pass_mrgctx;
3817 MonoInst *rgctx_arg = NULL;
3819 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3820 g_assert (!pass_mrgctx);
3823 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3826 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3829 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3833 if (mini_is_gsharedvt_klass (cfg, klass)) {
3834 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3835 MonoInst *res, *is_ref, *src_var, *addr;
3838 dreg = alloc_ireg (cfg);
3840 NEW_BBLOCK (cfg, is_ref_bb);
3841 NEW_BBLOCK (cfg, is_nullable_bb);
3842 NEW_BBLOCK (cfg, end_bb);
3843 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3844 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3845 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3847 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3848 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3851 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3854 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3855 ins->opcode = OP_STOREV_MEMBASE;
3857 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
3858 res->type = STACK_OBJ;
3860 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3863 MONO_START_BB (cfg, is_ref_bb);
3864 addr_reg = alloc_ireg (cfg);
3866 /* val is a vtype, so has to load the value manually */
3867 src_var = get_vreg_to_inst (cfg, val->dreg);
3869 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
3870 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
3871 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
3872 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3875 MONO_START_BB (cfg, is_nullable_bb);
3878 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
3879 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
3881 MonoMethodSignature *box_sig;
3884 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
3885 * construct that method at JIT time, so have to do things by hand.
3887 box_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3888 box_sig->ret = &mono_defaults.object_class->byval_arg;
3889 box_sig->param_count = 1;
3890 box_sig->params [0] = &klass->byval_arg;
3891 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
3892 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
3893 res->type = STACK_OBJ;
3897 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3899 MONO_START_BB (cfg, end_bb);
3901 *out_cbb = cfg->cbb;
3905 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3909 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3916 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
3919 MonoGenericContainer *container;
3920 MonoGenericInst *ginst;
3922 if (klass->generic_class) {
3923 container = klass->generic_class->container_class->generic_container;
3924 ginst = klass->generic_class->context.class_inst;
3925 } else if (klass->generic_container && context_used) {
3926 container = klass->generic_container;
3927 ginst = container->context.class_inst;
3932 for (i = 0; i < container->type_argc; ++i) {
3934 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
3936 type = ginst->type_argv [i];
3937 if (mini_type_is_reference (cfg, type))
3943 // FIXME: This doesn't work yet (class libs tests fail?)
3944 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3947 * Returns NULL and set the cfg exception on error.
3950 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3952 MonoBasicBlock *is_null_bb;
3953 int obj_reg = src->dreg;
3954 int vtable_reg = alloc_preg (cfg);
3955 MonoInst *klass_inst = NULL;
3960 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
3961 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
3962 MonoInst *cache_ins;
3964 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3969 /* klass - it's the second element of the cache entry*/
3970 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3973 args [2] = cache_ins;
3975 return mono_emit_method_call (cfg, mono_castclass, args, NULL);
3978 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3981 NEW_BBLOCK (cfg, is_null_bb);
3983 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3984 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3986 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
3988 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3989 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3990 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3992 int klass_reg = alloc_preg (cfg);
3994 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3996 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3997 /* the remoting code is broken, access the class for now */
3998 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3999 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4001 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4002 cfg->exception_ptr = klass;
4005 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4007 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4008 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4010 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4012 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4013 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4017 MONO_START_BB (cfg, is_null_bb);
4019 reset_cast_details (cfg);
4025 * Returns NULL and set the cfg exception on error.
4028 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4031 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4032 int obj_reg = src->dreg;
4033 int vtable_reg = alloc_preg (cfg);
4034 int res_reg = alloc_ireg_ref (cfg);
4035 MonoInst *klass_inst = NULL;
4040 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4041 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4042 MonoInst *cache_ins;
4044 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4049 /* klass - it's the second element of the cache entry*/
4050 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4053 args [2] = cache_ins;
4055 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4058 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4061 NEW_BBLOCK (cfg, is_null_bb);
4062 NEW_BBLOCK (cfg, false_bb);
4063 NEW_BBLOCK (cfg, end_bb);
4065 /* Do the assignment at the beginning, so the other assignment can be if converted */
4066 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4067 ins->type = STACK_OBJ;
4070 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4071 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4073 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4075 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4076 g_assert (!context_used);
4077 /* the is_null_bb target simply copies the input register to the output */
4078 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4080 int klass_reg = alloc_preg (cfg);
4083 int rank_reg = alloc_preg (cfg);
4084 int eclass_reg = alloc_preg (cfg);
4086 g_assert (!context_used);
4087 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4088 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4089 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4090 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4091 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
4092 if (klass->cast_class == mono_defaults.object_class) {
4093 int parent_reg = alloc_preg (cfg);
4094 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
4095 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4096 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4097 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4098 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4099 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4100 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4101 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4102 } else if (klass->cast_class == mono_defaults.enum_class) {
4103 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4104 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4105 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4106 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4108 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4109 /* Check that the object is a vector too */
4110 int bounds_reg = alloc_preg (cfg);
4111 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
4112 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4113 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4116 /* the is_null_bb target simply copies the input register to the output */
4117 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4119 } else if (mono_class_is_nullable (klass)) {
4120 g_assert (!context_used);
4121 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4122 /* the is_null_bb target simply copies the input register to the output */
4123 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4125 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4126 g_assert (!context_used);
4127 /* the remoting code is broken, access the class for now */
4128 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4129 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4131 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4132 cfg->exception_ptr = klass;
4135 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4137 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4138 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4140 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4141 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4143 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4144 /* the is_null_bb target simply copies the input register to the output */
4145 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4150 MONO_START_BB (cfg, false_bb);
4152 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4153 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4155 MONO_START_BB (cfg, is_null_bb);
4157 MONO_START_BB (cfg, end_bb);
4163 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4165 /* This opcode takes as input an object reference and a class, and returns:
4166 0) if the object is an instance of the class,
4167 1) if the object is not instance of the class,
4168 2) if the object is a proxy whose type cannot be determined */
4171 #ifndef DISABLE_REMOTING
4172 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4174 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4176 int obj_reg = src->dreg;
4177 int dreg = alloc_ireg (cfg);
4179 #ifndef DISABLE_REMOTING
4180 int klass_reg = alloc_preg (cfg);
4183 NEW_BBLOCK (cfg, true_bb);
4184 NEW_BBLOCK (cfg, false_bb);
4185 NEW_BBLOCK (cfg, end_bb);
4186 #ifndef DISABLE_REMOTING
4187 NEW_BBLOCK (cfg, false2_bb);
4188 NEW_BBLOCK (cfg, no_proxy_bb);
4191 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4192 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4194 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4195 #ifndef DISABLE_REMOTING
4196 NEW_BBLOCK (cfg, interface_fail_bb);
4199 tmp_reg = alloc_preg (cfg);
4200 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4201 #ifndef DISABLE_REMOTING
4202 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4203 MONO_START_BB (cfg, interface_fail_bb);
4204 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4206 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4208 tmp_reg = alloc_preg (cfg);
4209 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4210 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4211 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4213 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4216 #ifndef DISABLE_REMOTING
4217 tmp_reg = alloc_preg (cfg);
4218 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4219 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4221 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4222 tmp_reg = alloc_preg (cfg);
4223 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4224 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4226 tmp_reg = alloc_preg (cfg);
4227 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4228 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4229 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4231 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4232 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4234 MONO_START_BB (cfg, no_proxy_bb);
4236 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4238 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4242 MONO_START_BB (cfg, false_bb);
4244 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4245 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4247 #ifndef DISABLE_REMOTING
4248 MONO_START_BB (cfg, false2_bb);
4250 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4251 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4254 MONO_START_BB (cfg, true_bb);
4256 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4258 MONO_START_BB (cfg, end_bb);
4261 MONO_INST_NEW (cfg, ins, OP_ICONST);
4263 ins->type = STACK_I4;
4269 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4271 /* This opcode takes as input an object reference and a class, and returns:
4272 0) if the object is an instance of the class,
4273 1) if the object is a proxy whose type cannot be determined
4274 an InvalidCastException exception is thrown otherwhise*/
4277 #ifndef DISABLE_REMOTING
4278 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4280 MonoBasicBlock *ok_result_bb;
4282 int obj_reg = src->dreg;
4283 int dreg = alloc_ireg (cfg);
4284 int tmp_reg = alloc_preg (cfg);
4286 #ifndef DISABLE_REMOTING
4287 int klass_reg = alloc_preg (cfg);
4288 NEW_BBLOCK (cfg, end_bb);
4291 NEW_BBLOCK (cfg, ok_result_bb);
4293 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4294 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4296 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4298 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4299 #ifndef DISABLE_REMOTING
4300 NEW_BBLOCK (cfg, interface_fail_bb);
4302 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4303 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4304 MONO_START_BB (cfg, interface_fail_bb);
4305 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4307 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4309 tmp_reg = alloc_preg (cfg);
4310 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4311 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4312 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4314 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4315 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4317 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4318 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4319 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4322 #ifndef DISABLE_REMOTING
4323 NEW_BBLOCK (cfg, no_proxy_bb);
4325 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4326 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4327 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4329 tmp_reg = alloc_preg (cfg);
4330 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4331 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4333 tmp_reg = alloc_preg (cfg);
4334 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4335 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4336 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4338 NEW_BBLOCK (cfg, fail_1_bb);
4340 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4342 MONO_START_BB (cfg, fail_1_bb);
4344 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4345 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4347 MONO_START_BB (cfg, no_proxy_bb);
4349 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4351 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4355 MONO_START_BB (cfg, ok_result_bb);
4357 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4359 #ifndef DISABLE_REMOTING
4360 MONO_START_BB (cfg, end_bb);
4364 MONO_INST_NEW (cfg, ins, OP_ICONST);
4366 ins->type = STACK_I4;
4372 * Returns NULL and set the cfg exception on error.
4374 static G_GNUC_UNUSED MonoInst*
4375 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
4379 gpointer *trampoline;
4380 MonoInst *obj, *method_ins, *tramp_ins;
4384 obj = handle_alloc (cfg, klass, FALSE, 0);
4388 /* Inline the contents of mono_delegate_ctor */
4390 /* Set target field */
4391 /* Optimize away setting of NULL target */
4392 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
4393 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4394 if (cfg->gen_write_barriers) {
4395 dreg = alloc_preg (cfg);
4396 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target));
4397 emit_write_barrier (cfg, ptr, target);
4401 /* Set method field */
4402 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4403 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4404 if (cfg->gen_write_barriers) {
4405 dreg = alloc_preg (cfg);
4406 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method));
4407 emit_write_barrier (cfg, ptr, method_ins);
4410 * To avoid looking up the compiled code belonging to the target method
4411 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4412 * store it, and we fill it after the method has been compiled.
4414 if (!cfg->compile_aot && !method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4415 MonoInst *code_slot_ins;
4418 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4420 domain = mono_domain_get ();
4421 mono_domain_lock (domain);
4422 if (!domain_jit_info (domain)->method_code_hash)
4423 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4424 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4426 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
4427 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4429 mono_domain_unlock (domain);
4431 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
4433 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4436 /* Set invoke_impl field */
4437 if (cfg->compile_aot) {
4438 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
4440 trampoline = mono_create_delegate_trampoline (cfg->domain, klass);
4441 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4443 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4445 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4451 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4453 MonoJitICallInfo *info;
4455 /* Need to register the icall so it gets an icall wrapper */
4456 info = mono_get_array_new_va_icall (rank);
4458 cfg->flags |= MONO_CFG_HAS_VARARGS;
4460 /* mono_array_new_va () needs a vararg calling convention */
4461 cfg->disable_llvm = TRUE;
4463 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4464 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4468 mono_emit_load_got_addr (MonoCompile *cfg)
4470 MonoInst *getaddr, *dummy_use;
4472 if (!cfg->got_var || cfg->got_var_allocated)
4475 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
4476 getaddr->cil_code = cfg->header->code;
4477 getaddr->dreg = cfg->got_var->dreg;
4479 /* Add it to the start of the first bblock */
4480 if (cfg->bb_entry->code) {
4481 getaddr->next = cfg->bb_entry->code;
4482 cfg->bb_entry->code = getaddr;
4485 MONO_ADD_INS (cfg->bb_entry, getaddr);
4487 cfg->got_var_allocated = TRUE;
4490 * Add a dummy use to keep the got_var alive, since real uses might
4491 * only be generated by the back ends.
4492 * Add it to end_bblock, so the variable's lifetime covers the whole
4494 * It would be better to make the usage of the got var explicit in all
4495 * cases when the backend needs it (i.e. calls, throw etc.), so this
4496 * wouldn't be needed.
4498 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
4499 MONO_ADD_INS (cfg->bb_exit, dummy_use);
4502 static int inline_limit;
4503 static gboolean inline_limit_inited;
4506 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
4508 MonoMethodHeaderSummary header;
4510 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4511 MonoMethodSignature *sig = mono_method_signature (method);
4515 if (cfg->generic_sharing_context)
4518 if (cfg->inline_depth > 10)
4521 #ifdef MONO_ARCH_HAVE_LMF_OPS
4522 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
4523 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
4524 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
4529 if (!mono_method_get_header_summary (method, &header))
4532 /*runtime, icall and pinvoke are checked by summary call*/
4533 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
4534 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
4535 (mono_class_is_marshalbyref (method->klass)) ||
4539 /* also consider num_locals? */
4540 /* Do the size check early to avoid creating vtables */
4541 if (!inline_limit_inited) {
4542 if (g_getenv ("MONO_INLINELIMIT"))
4543 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
4545 inline_limit = INLINE_LENGTH_LIMIT;
4546 inline_limit_inited = TRUE;
4548 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
4552 * if we can initialize the class of the method right away, we do,
4553 * otherwise we don't allow inlining if the class needs initialization,
4554 * since it would mean inserting a call to mono_runtime_class_init()
4555 * inside the inlined code
4557 if (!(cfg->opt & MONO_OPT_SHARED)) {
4558 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
4559 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
4560 vtable = mono_class_vtable (cfg->domain, method->klass);
4563 mono_runtime_class_init (vtable);
4564 } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
4565 if (cfg->run_cctors && method->klass->has_cctor) {
4566 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
4567 if (!method->klass->runtime_info)
4568 /* No vtable created yet */
4570 vtable = mono_class_vtable (cfg->domain, method->klass);
4573 /* This makes so that inline cannot trigger */
4574 /* .cctors: too many apps depend on them */
4575 /* running with a specific order... */
4576 if (! vtable->initialized)
4578 mono_runtime_class_init (vtable);
4580 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
4581 if (!method->klass->runtime_info)
4582 /* No vtable created yet */
4584 vtable = mono_class_vtable (cfg->domain, method->klass);
4587 if (!vtable->initialized)
4592 * If we're compiling for shared code
4593 * the cctor will need to be run at aot method load time, for example,
4594 * or at the end of the compilation of the inlining method.
4596 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
4601 * CAS - do not inline methods with declarative security
4602 * Note: this has to be before any possible return TRUE;
4604 if (mono_security_method_has_declsec (method))
4607 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4608 if (mono_arch_is_soft_float ()) {
4610 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4612 for (i = 0; i < sig->param_count; ++i)
4613 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4622 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
4624 if (!cfg->compile_aot) {
4626 if (vtable->initialized)
4630 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
4633 if (!mono_class_needs_cctor_run (klass, method))
4636 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
4637 /* The initialization is already done before the method is called */
4644 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4648 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4651 if (mini_is_gsharedvt_klass (cfg, klass)) {
4654 mono_class_init (klass);
4655 size = mono_class_array_element_size (klass);
4658 mult_reg = alloc_preg (cfg);
4659 array_reg = arr->dreg;
4660 index_reg = index->dreg;
4662 #if SIZEOF_REGISTER == 8
4663 /* The array reg is 64 bits but the index reg is only 32 */
4664 if (COMPILE_LLVM (cfg)) {
4666 index2_reg = index_reg;
4668 index2_reg = alloc_preg (cfg);
4669 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4672 if (index->type == STACK_I8) {
4673 index2_reg = alloc_preg (cfg);
4674 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4676 index2_reg = index_reg;
4681 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4683 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4684 if (size == 1 || size == 2 || size == 4 || size == 8) {
4685 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4687 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
4688 ins->klass = mono_class_get_element_class (klass);
4689 ins->type = STACK_MP;
4695 add_reg = alloc_ireg_mp (cfg);
4698 MonoInst *rgctx_ins;
4701 g_assert (cfg->generic_sharing_context);
4702 context_used = mini_class_check_context_used (cfg, klass);
4703 g_assert (context_used);
4704 rgctx_ins = emit_get_gsharedvt_info (cfg, &klass->byval_arg, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
4705 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
4707 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4709 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4710 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4711 ins->klass = mono_class_get_element_class (klass);
4712 ins->type = STACK_MP;
4713 MONO_ADD_INS (cfg->cbb, ins);
4718 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4720 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4722 int bounds_reg = alloc_preg (cfg);
4723 int add_reg = alloc_ireg_mp (cfg);
4724 int mult_reg = alloc_preg (cfg);
4725 int mult2_reg = alloc_preg (cfg);
4726 int low1_reg = alloc_preg (cfg);
4727 int low2_reg = alloc_preg (cfg);
4728 int high1_reg = alloc_preg (cfg);
4729 int high2_reg = alloc_preg (cfg);
4730 int realidx1_reg = alloc_preg (cfg);
4731 int realidx2_reg = alloc_preg (cfg);
4732 int sum_reg = alloc_preg (cfg);
4733 int index1, index2, tmpreg;
4737 mono_class_init (klass);
4738 size = mono_class_array_element_size (klass);
4740 index1 = index_ins1->dreg;
4741 index2 = index_ins2->dreg;
4743 #if SIZEOF_REGISTER == 8
4744 /* The array reg is 64 bits but the index reg is only 32 */
4745 if (COMPILE_LLVM (cfg)) {
4748 tmpreg = alloc_preg (cfg);
4749 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
4751 tmpreg = alloc_preg (cfg);
4752 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
4756 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
4760 /* range checking */
4761 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4762 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4764 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4765 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4766 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4767 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4768 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4769 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4770 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4772 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4773 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4774 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4775 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4776 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
4777 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4778 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4780 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4781 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4782 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4783 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4784 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4786 ins->type = STACK_MP;
4788 MONO_ADD_INS (cfg->cbb, ins);
4795 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4799 MonoMethod *addr_method;
4802 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4805 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
4807 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4808 /* emit_ldelema_2 depends on OP_LMUL */
4809 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
4810 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
4814 element_size = mono_class_array_element_size (cmethod->klass->element_class);
4815 addr_method = mono_marshal_get_array_address (rank, element_size);
4816 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4821 static MonoBreakPolicy
4822 always_insert_breakpoint (MonoMethod *method)
4824 return MONO_BREAK_POLICY_ALWAYS;
4827 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4830 * mono_set_break_policy:
4831 * policy_callback: the new callback function
4833 * Allow embedders to decide wherther to actually obey breakpoint instructions
4834 * (both break IL instructions and Debugger.Break () method calls), for example
4835 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4836 * untrusted or semi-trusted code.
4838 * @policy_callback will be called every time a break point instruction needs to
4839 * be inserted with the method argument being the method that calls Debugger.Break()
4840 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4841 * if it wants the breakpoint to not be effective in the given method.
4842 * #MONO_BREAK_POLICY_ALWAYS is the default.
4845 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4847 if (policy_callback)
4848 break_policy_func = policy_callback;
4850 break_policy_func = always_insert_breakpoint;
4854 should_insert_brekpoint (MonoMethod *method) {
4855 switch (break_policy_func (method)) {
4856 case MONO_BREAK_POLICY_ALWAYS:
4858 case MONO_BREAK_POLICY_NEVER:
4860 case MONO_BREAK_POLICY_ON_DBG:
4861 g_warning ("mdb no longer supported");
4864 g_warning ("Incorrect value returned from break policy callback");
4869 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4871 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4873 MonoInst *addr, *store, *load;
4874 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4876 /* the bounds check is already done by the callers */
4877 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4879 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4880 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4881 if (mini_type_is_reference (cfg, fsig->params [2]))
4882 emit_write_barrier (cfg, addr, load);
4884 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4885 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4892 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4894 return mini_type_is_reference (cfg, &klass->byval_arg);
4898 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
4900 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
4901 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
4902 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
4903 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
4904 MonoInst *iargs [3];
4907 mono_class_setup_vtable (obj_array);
4908 g_assert (helper->slot);
4910 if (sp [0]->type != STACK_OBJ)
4912 if (sp [2]->type != STACK_OBJ)
4919 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
4923 if (mini_is_gsharedvt_klass (cfg, klass)) {
4926 // FIXME-VT: OP_ICONST optimization
4927 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
4928 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
4929 ins->opcode = OP_STOREV_MEMBASE;
4930 } else if (sp [1]->opcode == OP_ICONST) {
4931 int array_reg = sp [0]->dreg;
4932 int index_reg = sp [1]->dreg;
4933 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
4936 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
4937 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
4939 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
4940 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
4941 if (generic_class_is_reference_type (cfg, klass))
4942 emit_write_barrier (cfg, addr, sp [2]);
4949 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4954 eklass = mono_class_from_mono_type (fsig->params [2]);
4956 eklass = mono_class_from_mono_type (fsig->ret);
4960 return emit_array_store (cfg, eklass, args, FALSE);
4962 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4963 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
4969 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4971 MonoInst *ins = NULL;
4972 #ifdef MONO_ARCH_SIMD_INTRINSICS
4973 if (cfg->opt & MONO_OPT_SIMD) {
4974 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4984 emit_memory_barrier (MonoCompile *cfg, int kind)
4986 MonoInst *ins = NULL;
4987 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4988 MONO_ADD_INS (cfg->cbb, ins);
4989 ins->backend.memory_barrier_kind = kind;
4995 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4997 MonoInst *ins = NULL;
5000 /* The LLVM backend supports these intrinsics */
5001 if (cmethod->klass == mono_defaults.math_class) {
5002 if (strcmp (cmethod->name, "Sin") == 0) {
5004 } else if (strcmp (cmethod->name, "Cos") == 0) {
5006 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5008 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5013 MONO_INST_NEW (cfg, ins, opcode);
5014 ins->type = STACK_R8;
5015 ins->dreg = mono_alloc_freg (cfg);
5016 ins->sreg1 = args [0]->dreg;
5017 MONO_ADD_INS (cfg->cbb, ins);
5021 if (cfg->opt & MONO_OPT_CMOV) {
5022 if (strcmp (cmethod->name, "Min") == 0) {
5023 if (fsig->params [0]->type == MONO_TYPE_I4)
5025 if (fsig->params [0]->type == MONO_TYPE_U4)
5026 opcode = OP_IMIN_UN;
5027 else if (fsig->params [0]->type == MONO_TYPE_I8)
5029 else if (fsig->params [0]->type == MONO_TYPE_U8)
5030 opcode = OP_LMIN_UN;
5031 } else if (strcmp (cmethod->name, "Max") == 0) {
5032 if (fsig->params [0]->type == MONO_TYPE_I4)
5034 if (fsig->params [0]->type == MONO_TYPE_U4)
5035 opcode = OP_IMAX_UN;
5036 else if (fsig->params [0]->type == MONO_TYPE_I8)
5038 else if (fsig->params [0]->type == MONO_TYPE_U8)
5039 opcode = OP_LMAX_UN;
5044 MONO_INST_NEW (cfg, ins, opcode);
5045 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5046 ins->dreg = mono_alloc_ireg (cfg);
5047 ins->sreg1 = args [0]->dreg;
5048 ins->sreg2 = args [1]->dreg;
5049 MONO_ADD_INS (cfg->cbb, ins);
5057 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5059 if (cmethod->klass == mono_defaults.array_class) {
5060 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5061 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5062 if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5063 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5070 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5072 MonoInst *ins = NULL;
5074 static MonoClass *runtime_helpers_class = NULL;
5075 if (! runtime_helpers_class)
5076 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
5077 "System.Runtime.CompilerServices", "RuntimeHelpers");
5079 if (cmethod->klass == mono_defaults.string_class) {
5080 if (strcmp (cmethod->name, "get_Chars") == 0) {
5081 int dreg = alloc_ireg (cfg);
5082 int index_reg = alloc_preg (cfg);
5083 int mult_reg = alloc_preg (cfg);
5084 int add_reg = alloc_preg (cfg);
5086 #if SIZEOF_REGISTER == 8
5087 /* The array reg is 64 bits but the index reg is only 32 */
5088 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5090 index_reg = args [1]->dreg;
5092 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5094 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5095 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
5096 add_reg = ins->dreg;
5097 /* Avoid a warning */
5099 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5102 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5103 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5104 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5105 add_reg, G_STRUCT_OFFSET (MonoString, chars));
5107 type_from_op (ins, NULL, NULL);
5109 } else if (strcmp (cmethod->name, "get_Length") == 0) {
5110 int dreg = alloc_ireg (cfg);
5111 /* Decompose later to allow more optimizations */
5112 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5113 ins->type = STACK_I4;
5114 ins->flags |= MONO_INST_FAULT;
5115 cfg->cbb->has_array_access = TRUE;
5116 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5119 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
5120 int mult_reg = alloc_preg (cfg);
5121 int add_reg = alloc_preg (cfg);
5123 /* The corlib functions check for oob already. */
5124 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
5125 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5126 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
5127 return cfg->cbb->last_ins;
5130 } else if (cmethod->klass == mono_defaults.object_class) {
5132 if (strcmp (cmethod->name, "GetType") == 0) {
5133 int dreg = alloc_ireg_ref (cfg);
5134 int vt_reg = alloc_preg (cfg);
5135 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
5136 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
5137 type_from_op (ins, NULL, NULL);
5140 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
5141 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
5142 int dreg = alloc_ireg (cfg);
5143 int t1 = alloc_ireg (cfg);
5145 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5146 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5147 ins->type = STACK_I4;
5151 } else if (strcmp (cmethod->name, ".ctor") == 0) {
5152 MONO_INST_NEW (cfg, ins, OP_NOP);
5153 MONO_ADD_INS (cfg->cbb, ins);
5157 } else if (cmethod->klass == mono_defaults.array_class) {
5158 if (!cfg->gsharedvt && strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
5159 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
5161 #ifndef MONO_BIG_ARRAYS
5163 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5166 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5167 int dreg = alloc_ireg (cfg);
5168 int bounds_reg = alloc_ireg_mp (cfg);
5169 MonoBasicBlock *end_bb, *szarray_bb;
5170 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5172 NEW_BBLOCK (cfg, end_bb);
5173 NEW_BBLOCK (cfg, szarray_bb);
5175 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5176 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
5177 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5178 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5179 /* Non-szarray case */
5181 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5182 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
5184 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5185 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5186 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5187 MONO_START_BB (cfg, szarray_bb);
5190 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5191 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
5193 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5194 MONO_START_BB (cfg, end_bb);
5196 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5197 ins->type = STACK_I4;
5203 if (cmethod->name [0] != 'g')
5206 if (strcmp (cmethod->name, "get_Rank") == 0) {
5207 int dreg = alloc_ireg (cfg);
5208 int vtable_reg = alloc_preg (cfg);
5209 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5210 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
5211 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5212 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
5213 type_from_op (ins, NULL, NULL);
5216 } else if (strcmp (cmethod->name, "get_Length") == 0) {
5217 int dreg = alloc_ireg (cfg);
5219 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5220 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
5221 type_from_op (ins, NULL, NULL);
5226 } else if (cmethod->klass == runtime_helpers_class) {
5228 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
5229 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
5233 } else if (cmethod->klass == mono_defaults.thread_class) {
5234 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
5235 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5236 MONO_ADD_INS (cfg->cbb, ins);
5238 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
5239 return emit_memory_barrier (cfg, FullBarrier);
5241 } else if (cmethod->klass == mono_defaults.monitor_class) {
5243 /* FIXME this should be integrated to the check below once we support the trampoline version */
5244 #if defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
5245 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) {
5246 MonoMethod *fast_method = NULL;
5248 /* Avoid infinite recursion */
5249 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN && !strcmp (cfg->method->name, "FastMonitorEnterV4"))
5252 fast_method = mono_monitor_get_fast_path (cmethod);
5256 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
5260 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
5261 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
5264 if (COMPILE_LLVM (cfg)) {
5266 * Pass the argument normally, the LLVM backend will handle the
5267 * calling convention problems.
5269 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5271 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
5272 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5273 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5274 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5277 return (MonoInst*)call;
5278 } else if (strcmp (cmethod->name, "Exit") == 0) {
5281 if (COMPILE_LLVM (cfg)) {
5282 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5284 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
5285 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5286 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5287 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5290 return (MonoInst*)call;
5292 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
5294 MonoMethod *fast_method = NULL;
5296 /* Avoid infinite recursion */
5297 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
5298 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
5299 strcmp (cfg->method->name, "FastMonitorExit") == 0))
5302 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) ||
5303 strcmp (cmethod->name, "Exit") == 0)
5304 fast_method = mono_monitor_get_fast_path (cmethod);
5308 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
5311 } else if (cmethod->klass->image == mono_defaults.corlib &&
5312 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5313 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
5316 #if SIZEOF_REGISTER == 8
5317 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
5318 /* 64 bit reads are already atomic */
5319 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
5320 ins->dreg = mono_alloc_preg (cfg);
5321 ins->inst_basereg = args [0]->dreg;
5322 ins->inst_offset = 0;
5323 MONO_ADD_INS (cfg->cbb, ins);
5327 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
5328 if (strcmp (cmethod->name, "Increment") == 0) {
5329 MonoInst *ins_iconst;
5332 if (fsig->params [0]->type == MONO_TYPE_I4)
5333 opcode = OP_ATOMIC_ADD_NEW_I4;
5334 #if SIZEOF_REGISTER == 8
5335 else if (fsig->params [0]->type == MONO_TYPE_I8)
5336 opcode = OP_ATOMIC_ADD_NEW_I8;
5339 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5340 ins_iconst->inst_c0 = 1;
5341 ins_iconst->dreg = mono_alloc_ireg (cfg);
5342 MONO_ADD_INS (cfg->cbb, ins_iconst);
5344 MONO_INST_NEW (cfg, ins, opcode);
5345 ins->dreg = mono_alloc_ireg (cfg);
5346 ins->inst_basereg = args [0]->dreg;
5347 ins->inst_offset = 0;
5348 ins->sreg2 = ins_iconst->dreg;
5349 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5350 MONO_ADD_INS (cfg->cbb, ins);
5352 } else if (strcmp (cmethod->name, "Decrement") == 0) {
5353 MonoInst *ins_iconst;
5356 if (fsig->params [0]->type == MONO_TYPE_I4)
5357 opcode = OP_ATOMIC_ADD_NEW_I4;
5358 #if SIZEOF_REGISTER == 8
5359 else if (fsig->params [0]->type == MONO_TYPE_I8)
5360 opcode = OP_ATOMIC_ADD_NEW_I8;
5363 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5364 ins_iconst->inst_c0 = -1;
5365 ins_iconst->dreg = mono_alloc_ireg (cfg);
5366 MONO_ADD_INS (cfg->cbb, ins_iconst);
5368 MONO_INST_NEW (cfg, ins, opcode);
5369 ins->dreg = mono_alloc_ireg (cfg);
5370 ins->inst_basereg = args [0]->dreg;
5371 ins->inst_offset = 0;
5372 ins->sreg2 = ins_iconst->dreg;
5373 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5374 MONO_ADD_INS (cfg->cbb, ins);
5376 } else if (strcmp (cmethod->name, "Add") == 0) {
5379 if (fsig->params [0]->type == MONO_TYPE_I4)
5380 opcode = OP_ATOMIC_ADD_NEW_I4;
5381 #if SIZEOF_REGISTER == 8
5382 else if (fsig->params [0]->type == MONO_TYPE_I8)
5383 opcode = OP_ATOMIC_ADD_NEW_I8;
5387 MONO_INST_NEW (cfg, ins, opcode);
5388 ins->dreg = mono_alloc_ireg (cfg);
5389 ins->inst_basereg = args [0]->dreg;
5390 ins->inst_offset = 0;
5391 ins->sreg2 = args [1]->dreg;
5392 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5393 MONO_ADD_INS (cfg->cbb, ins);
5396 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
5398 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
5399 if (strcmp (cmethod->name, "Exchange") == 0) {
5401 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
5403 if (fsig->params [0]->type == MONO_TYPE_I4)
5404 opcode = OP_ATOMIC_EXCHANGE_I4;
5405 #if SIZEOF_REGISTER == 8
5406 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
5407 (fsig->params [0]->type == MONO_TYPE_I))
5408 opcode = OP_ATOMIC_EXCHANGE_I8;
5410 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
5411 opcode = OP_ATOMIC_EXCHANGE_I4;
5416 MONO_INST_NEW (cfg, ins, opcode);
5417 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
5418 ins->inst_basereg = args [0]->dreg;
5419 ins->inst_offset = 0;
5420 ins->sreg2 = args [1]->dreg;
5421 MONO_ADD_INS (cfg->cbb, ins);
5423 switch (fsig->params [0]->type) {
5425 ins->type = STACK_I4;
5429 ins->type = STACK_I8;
5431 case MONO_TYPE_OBJECT:
5432 ins->type = STACK_OBJ;
5435 g_assert_not_reached ();
5438 if (cfg->gen_write_barriers && is_ref)
5439 emit_write_barrier (cfg, args [0], args [1]);
5441 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
5443 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
5444 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
5446 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
5447 if (fsig->params [1]->type == MONO_TYPE_I4)
5449 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
5450 size = sizeof (gpointer);
5451 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
5454 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
5455 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5456 ins->sreg1 = args [0]->dreg;
5457 ins->sreg2 = args [1]->dreg;
5458 ins->sreg3 = args [2]->dreg;
5459 ins->type = STACK_I4;
5460 MONO_ADD_INS (cfg->cbb, ins);
5461 } else if (size == 8) {
5462 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
5463 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5464 ins->sreg1 = args [0]->dreg;
5465 ins->sreg2 = args [1]->dreg;
5466 ins->sreg3 = args [2]->dreg;
5467 ins->type = STACK_I8;
5468 MONO_ADD_INS (cfg->cbb, ins);
5470 /* g_assert_not_reached (); */
5472 if (cfg->gen_write_barriers && is_ref)
5473 emit_write_barrier (cfg, args [0], args [1]);
5475 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
5477 if (strcmp (cmethod->name, "MemoryBarrier") == 0)
5478 ins = emit_memory_barrier (cfg, FullBarrier);
5482 } else if (cmethod->klass->image == mono_defaults.corlib) {
5483 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
5484 && strcmp (cmethod->klass->name, "Debugger") == 0) {
5485 if (should_insert_brekpoint (cfg->method)) {
5486 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
5488 MONO_INST_NEW (cfg, ins, OP_NOP);
5489 MONO_ADD_INS (cfg->cbb, ins);
5493 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
5494 && strcmp (cmethod->klass->name, "Environment") == 0) {
5496 EMIT_NEW_ICONST (cfg, ins, 1);
5498 EMIT_NEW_ICONST (cfg, ins, 0);
5502 } else if (cmethod->klass == mono_defaults.math_class) {
5504 * There is general branches code for Min/Max, but it does not work for
5506 * http://everything2.com/?node_id=1051618
5508 } else if ((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") || !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) && !strcmp (cmethod->klass->name, "Selector") && !strcmp (cmethod->name, "GetHandle") && cfg->compile_aot && (args [0]->opcode == OP_GOT_ENTRY || args[0]->opcode == OP_AOTCONST)) {
5509 #ifdef MONO_ARCH_HAVE_OBJC_GET_SELECTOR
5511 MonoJumpInfoToken *ji;
5514 cfg->disable_llvm = TRUE;
5516 if (args [0]->opcode == OP_GOT_ENTRY) {
5517 pi = args [0]->inst_p1;
5518 g_assert (pi->opcode == OP_PATCH_INFO);
5519 g_assert ((int)pi->inst_p1 == MONO_PATCH_INFO_LDSTR);
5522 g_assert ((int)args [0]->inst_p1 == MONO_PATCH_INFO_LDSTR);
5523 ji = args [0]->inst_p0;
5526 NULLIFY_INS (args [0]);
5529 s = mono_ldstr (cfg->domain, ji->image, mono_metadata_token_index (ji->token));
5530 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
5531 ins->dreg = mono_alloc_ireg (cfg);
5533 ins->inst_p0 = mono_string_to_utf8 (s);
5534 MONO_ADD_INS (cfg->cbb, ins);
5539 #ifdef MONO_ARCH_SIMD_INTRINSICS
5540 if (cfg->opt & MONO_OPT_SIMD) {
5541 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5547 if (COMPILE_LLVM (cfg)) {
5548 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
5553 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
5557 * This entry point could be used later for arbitrary method
5560 inline static MonoInst*
5561 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
5562 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
5564 if (method->klass == mono_defaults.string_class) {
5565 /* managed string allocation support */
5566 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
5567 MonoInst *iargs [2];
5568 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
5569 MonoMethod *managed_alloc = NULL;
5571 g_assert (vtable); /*Should not fail since it System.String*/
5572 #ifndef MONO_CROSS_COMPILE
5573 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE);
5577 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
5578 iargs [1] = args [0];
5579 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
5586 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
5588 MonoInst *store, *temp;
5591 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
5592 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
5595 * FIXME: We should use *args++ = sp [0], but that would mean the arg
5596 * would be different than the MonoInst's used to represent arguments, and
5597 * the ldelema implementation can't deal with that.
5598 * Solution: When ldelema is used on an inline argument, create a var for
5599 * it, emit ldelema on that var, and emit the saving code below in
5600 * inline_method () if needed.
5602 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
5603 cfg->args [i] = temp;
5604 /* This uses cfg->args [i] which is set by the preceeding line */
5605 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
5606 store->cil_code = sp [0]->cil_code;
5611 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
5612 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
5614 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5616 check_inline_called_method_name_limit (MonoMethod *called_method)
5619 static const char *limit = NULL;
5621 if (limit == NULL) {
5622 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
5624 if (limit_string != NULL)
5625 limit = limit_string;
5630 if (limit [0] != '\0') {
5631 char *called_method_name = mono_method_full_name (called_method, TRUE);
5633 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
5634 g_free (called_method_name);
5636 //return (strncmp_result <= 0);
5637 return (strncmp_result == 0);
5644 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5646 check_inline_caller_method_name_limit (MonoMethod *caller_method)
5649 static const char *limit = NULL;
5651 if (limit == NULL) {
5652 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
5653 if (limit_string != NULL) {
5654 limit = limit_string;
5660 if (limit [0] != '\0') {
5661 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
5663 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
5664 g_free (caller_method_name);
5666 //return (strncmp_result <= 0);
5667 return (strncmp_result == 0);
5675 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
5677 static double r8_0 = 0.0;
5680 int t = rtype->type;
5682 if (t == MONO_TYPE_VALUETYPE && rtype->data.klass->enumtype)
5683 t = mono_class_enum_basetype (rtype->data.klass)->type;
5685 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
5686 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
5687 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5688 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
5689 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
5690 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
5691 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5692 ins->type = STACK_R8;
5693 ins->inst_p0 = (void*)&r8_0;
5695 MONO_ADD_INS (cfg->cbb, ins);
5696 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
5697 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
5698 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
5699 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
5700 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
5702 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
5707 emit_init_local (MonoCompile *cfg, int local, MonoType *type)
5709 MonoInst *var = cfg->locals [local];
5710 if (COMPILE_SOFT_FLOAT (cfg)) {
5712 int reg = alloc_dreg (cfg, var->type);
5713 emit_init_rvar (cfg, reg, type);
5714 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
5716 emit_init_rvar (cfg, var->dreg, type);
5721 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
5722 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_always)
5724 MonoInst *ins, *rvar = NULL;
5725 MonoMethodHeader *cheader;
5726 MonoBasicBlock *ebblock, *sbblock;
5728 MonoMethod *prev_inlined_method;
5729 MonoInst **prev_locals, **prev_args;
5730 MonoType **prev_arg_types;
5731 guint prev_real_offset;
5732 GHashTable *prev_cbb_hash;
5733 MonoBasicBlock **prev_cil_offset_to_bb;
5734 MonoBasicBlock *prev_cbb;
5735 unsigned char* prev_cil_start;
5736 guint32 prev_cil_offset_to_bb_len;
5737 MonoMethod *prev_current_method;
5738 MonoGenericContext *prev_generic_context;
5739 gboolean ret_var_set, prev_ret_var_set, virtual = FALSE;
5741 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
5743 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5744 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
5747 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5748 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
5752 if (cfg->verbose_level > 2)
5753 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
5755 if (!cmethod->inline_info) {
5756 cfg->stat_inlineable_methods++;
5757 cmethod->inline_info = 1;
5760 /* allocate local variables */
5761 cheader = mono_method_get_header (cmethod);
5763 if (cheader == NULL || mono_loader_get_last_error ()) {
5764 MonoLoaderError *error = mono_loader_get_last_error ();
5767 mono_metadata_free_mh (cheader);
5768 if (inline_always && error)
5769 mono_cfg_set_exception (cfg, error->exception_type);
5771 mono_loader_clear_error ();
5775 /*Must verify before creating locals as it can cause the JIT to assert.*/
5776 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
5777 mono_metadata_free_mh (cheader);
5781 /* allocate space to store the return value */
5782 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
5783 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
5786 prev_locals = cfg->locals;
5787 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
5788 for (i = 0; i < cheader->num_locals; ++i)
5789 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
5791 /* allocate start and end blocks */
5792 /* This is needed so if the inline is aborted, we can clean up */
5793 NEW_BBLOCK (cfg, sbblock);
5794 sbblock->real_offset = real_offset;
5796 NEW_BBLOCK (cfg, ebblock);
5797 ebblock->block_num = cfg->num_bblocks++;
5798 ebblock->real_offset = real_offset;
5800 prev_args = cfg->args;
5801 prev_arg_types = cfg->arg_types;
5802 prev_inlined_method = cfg->inlined_method;
5803 cfg->inlined_method = cmethod;
5804 cfg->ret_var_set = FALSE;
5805 cfg->inline_depth ++;
5806 prev_real_offset = cfg->real_offset;
5807 prev_cbb_hash = cfg->cbb_hash;
5808 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
5809 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
5810 prev_cil_start = cfg->cil_start;
5811 prev_cbb = cfg->cbb;
5812 prev_current_method = cfg->current_method;
5813 prev_generic_context = cfg->generic_context;
5814 prev_ret_var_set = cfg->ret_var_set;
5816 if (*ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
5819 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, virtual);
5821 ret_var_set = cfg->ret_var_set;
5823 cfg->inlined_method = prev_inlined_method;
5824 cfg->real_offset = prev_real_offset;
5825 cfg->cbb_hash = prev_cbb_hash;
5826 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
5827 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
5828 cfg->cil_start = prev_cil_start;
5829 cfg->locals = prev_locals;
5830 cfg->args = prev_args;
5831 cfg->arg_types = prev_arg_types;
5832 cfg->current_method = prev_current_method;
5833 cfg->generic_context = prev_generic_context;
5834 cfg->ret_var_set = prev_ret_var_set;
5835 cfg->inline_depth --;
5837 if ((costs >= 0 && costs < 60) || inline_always) {
5838 if (cfg->verbose_level > 2)
5839 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
5841 cfg->stat_inlined_methods++;
5843 /* always add some code to avoid block split failures */
5844 MONO_INST_NEW (cfg, ins, OP_NOP);
5845 MONO_ADD_INS (prev_cbb, ins);
5847 prev_cbb->next_bb = sbblock;
5848 link_bblock (cfg, prev_cbb, sbblock);
5851 * Get rid of the begin and end bblocks if possible to aid local
5854 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
5856 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
5857 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
5859 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
5860 MonoBasicBlock *prev = ebblock->in_bb [0];
5861 mono_merge_basic_blocks (cfg, prev, ebblock);
5863 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
5864 mono_merge_basic_blocks (cfg, prev_cbb, prev);
5865 cfg->cbb = prev_cbb;
5869 * Its possible that the rvar is set in some prev bblock, but not in others.
5875 for (i = 0; i < ebblock->in_count; ++i) {
5876 bb = ebblock->in_bb [i];
5878 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
5881 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
5891 * If the inlined method contains only a throw, then the ret var is not
5892 * set, so set it to a dummy value.
5895 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
5897 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
5900 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5903 if (cfg->verbose_level > 2)
5904 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
5905 cfg->exception_type = MONO_EXCEPTION_NONE;
5906 mono_loader_clear_error ();
5908 /* This gets rid of the newly added bblocks */
5909 cfg->cbb = prev_cbb;
5911 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5916 * Some of these comments may well be out-of-date.
5917 * Design decisions: we do a single pass over the IL code (and we do bblock
5918 * splitting/merging in the few cases when it's required: a back jump to an IL
5919 * address that was not already seen as bblock starting point).
5920 * Code is validated as we go (full verification is still better left to metadata/verify.c).
5921 * Complex operations are decomposed in simpler ones right away. We need to let the
5922 * arch-specific code peek and poke inside this process somehow (except when the
5923 * optimizations can take advantage of the full semantic info of coarse opcodes).
5924 * All the opcodes of the form opcode.s are 'normalized' to opcode.
5925 * MonoInst->opcode initially is the IL opcode or some simplification of that
5926 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
5927 * opcode with value bigger than OP_LAST.
5928 * At this point the IR can be handed over to an interpreter, a dumb code generator
5929 * or to the optimizing code generator that will translate it to SSA form.
5931 * Profiling directed optimizations.
5932 * We may compile by default with few or no optimizations and instrument the code
5933 * or the user may indicate what methods to optimize the most either in a config file
5934 * or through repeated runs where the compiler applies offline the optimizations to
5935 * each method and then decides if it was worth it.
5938 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
5939 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
5940 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
5941 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
5942 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
5943 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
5944 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
5945 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; LOAD_ERROR;}
5947 /* offset from br.s -> br like opcodes */
5948 #define BIG_BRANCH_OFFSET 13
5951 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
5953 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
5955 return b == NULL || b == bb;
5959 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
5961 unsigned char *ip = start;
5962 unsigned char *target;
5965 MonoBasicBlock *bblock;
5966 const MonoOpcode *opcode;
5969 cli_addr = ip - start;
5970 i = mono_opcode_value ((const guint8 **)&ip, end);
5973 opcode = &mono_opcodes [i];
5974 switch (opcode->argument) {
5975 case MonoInlineNone:
5978 case MonoInlineString:
5979 case MonoInlineType:
5980 case MonoInlineField:
5981 case MonoInlineMethod:
5984 case MonoShortInlineR:
5991 case MonoShortInlineVar:
5992 case MonoShortInlineI:
5995 case MonoShortInlineBrTarget:
5996 target = start + cli_addr + 2 + (signed char)ip [1];
5997 GET_BBLOCK (cfg, bblock, target);
6000 GET_BBLOCK (cfg, bblock, ip);
6002 case MonoInlineBrTarget:
6003 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
6004 GET_BBLOCK (cfg, bblock, target);
6007 GET_BBLOCK (cfg, bblock, ip);
6009 case MonoInlineSwitch: {
6010 guint32 n = read32 (ip + 1);
6013 cli_addr += 5 + 4 * n;
6014 target = start + cli_addr;
6015 GET_BBLOCK (cfg, bblock, target);
6017 for (j = 0; j < n; ++j) {
6018 target = start + cli_addr + (gint32)read32 (ip);
6019 GET_BBLOCK (cfg, bblock, target);
6029 g_assert_not_reached ();
6032 if (i == CEE_THROW) {
6033 unsigned char *bb_start = ip - 1;
6035 /* Find the start of the bblock containing the throw */
6037 while ((bb_start >= start) && !bblock) {
6038 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
6042 bblock->out_of_line = 1;
6052 static inline MonoMethod *
6053 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6057 if (m->wrapper_type != MONO_WRAPPER_NONE) {
6058 method = mono_method_get_wrapper_data (m, token);
6060 method = mono_class_inflate_generic_method (method, context);
6062 method = mono_get_method_full (m->klass->image, token, klass, context);
6068 static inline MonoMethod *
6069 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6071 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
6073 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
6079 static inline MonoClass*
6080 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
6084 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6085 klass = mono_method_get_wrapper_data (method, token);
6087 klass = mono_class_inflate_generic_class (klass, context);
6089 klass = mono_class_get_full (method->klass->image, token, context);
6092 mono_class_init (klass);
6096 static inline MonoMethodSignature*
6097 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
6099 MonoMethodSignature *fsig;
6101 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6104 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6106 fsig = mono_inflate_generic_signature (fsig, context, &error);
6108 g_assert (mono_error_ok (&error));
6111 fsig = mono_metadata_parse_signature (method->klass->image, token);
6117 * Returns TRUE if the JIT should abort inlining because "callee"
6118 * is influenced by security attributes.
6121 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
6125 if ((cfg->method != caller) && mono_security_method_has_declsec (callee)) {
6129 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
6130 if (result == MONO_JIT_SECURITY_OK)
6133 if (result == MONO_JIT_LINKDEMAND_ECMA) {
6134 /* Generate code to throw a SecurityException before the actual call/link */
6135 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6138 NEW_ICONST (cfg, args [0], 4);
6139 NEW_METHODCONST (cfg, args [1], caller);
6140 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
6141 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
6142 /* don't hide previous results */
6143 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
6144 cfg->exception_data = result;
6152 throw_exception (void)
6154 static MonoMethod *method = NULL;
6157 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6158 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
6165 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
6167 MonoMethod *thrower = throw_exception ();
6170 EMIT_NEW_PCONST (cfg, args [0], ex);
6171 mono_emit_method_call (cfg, thrower, args, NULL);
6175 * Return the original method is a wrapper is specified. We can only access
6176 * the custom attributes from the original method.
6179 get_original_method (MonoMethod *method)
6181 if (method->wrapper_type == MONO_WRAPPER_NONE)
6184 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
6185 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
6188 /* in other cases we need to find the original method */
6189 return mono_marshal_method_from_wrapper (method);
6193 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
6194 MonoBasicBlock *bblock, unsigned char *ip)
6196 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6197 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
6199 emit_throw_exception (cfg, ex);
6203 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
6204 MonoBasicBlock *bblock, unsigned char *ip)
6206 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6207 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
6209 emit_throw_exception (cfg, ex);
6213 * Check that the IL instructions at ip are the array initialization
6214 * sequence and return the pointer to the data and the size.
6217 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
6220 * newarr[System.Int32]
6222 * ldtoken field valuetype ...
6223 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
6225 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
6226 guint32 token = read32 (ip + 7);
6227 guint32 field_token = read32 (ip + 2);
6228 guint32 field_index = field_token & 0xffffff;
6230 const char *data_ptr;
6232 MonoMethod *cmethod;
6233 MonoClass *dummy_class;
6234 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
6240 *out_field_token = field_token;
6242 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
6245 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
6247 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
6248 case MONO_TYPE_BOOLEAN:
6252 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
6253 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
6254 case MONO_TYPE_CHAR:
6271 if (size > mono_type_size (field->type, &dummy_align))
6274 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
6275 if (!method->klass->image->dynamic) {
6276 field_index = read32 (ip + 2) & 0xffffff;
6277 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
6278 data_ptr = mono_image_rva_map (method->klass->image, rva);
6279 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
6280 /* for aot code we do the lookup on load */
6281 if (aot && data_ptr)
6282 return GUINT_TO_POINTER (rva);
6284 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
6286 data_ptr = mono_field_get_data (field);
6294 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
6296 char *method_fname = mono_method_full_name (method, TRUE);
6298 MonoMethodHeader *header = mono_method_get_header (method);
6300 if (header->code_size == 0)
6301 method_code = g_strdup ("method body is empty.");
6303 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
6304 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
6305 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
6306 g_free (method_fname);
6307 g_free (method_code);
6308 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
6312 set_exception_object (MonoCompile *cfg, MonoException *exception)
6314 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
6315 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
6316 cfg->exception_ptr = exception;
6320 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
6323 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
6324 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
6325 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
6326 /* Optimize reg-reg moves away */
6328 * Can't optimize other opcodes, since sp[0] might point to
6329 * the last ins of a decomposed opcode.
6331 sp [0]->dreg = (cfg)->locals [n]->dreg;
6333 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
6338 * ldloca inhibits many optimizations so try to get rid of it in common
6341 static inline unsigned char *
6342 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
6352 local = read16 (ip + 2);
6356 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
6357 /* From the INITOBJ case */
6358 token = read32 (ip + 2);
6359 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
6360 CHECK_TYPELOAD (klass);
6361 type = &klass->byval_arg;
6362 emit_init_local (cfg, local, type);
6370 is_exception_class (MonoClass *class)
6373 if (class == mono_defaults.exception_class)
6375 class = class->parent;
6381 * is_jit_optimizer_disabled:
6383 * Determine whenever M's assembly has a DebuggableAttribute with the
6384 * IsJITOptimizerDisabled flag set.
6387 is_jit_optimizer_disabled (MonoMethod *m)
6389 MonoAssembly *ass = m->klass->image->assembly;
6390 MonoCustomAttrInfo* attrs;
6391 static MonoClass *klass;
6393 gboolean val = FALSE;
6396 if (ass->jit_optimizer_disabled_inited)
6397 return ass->jit_optimizer_disabled;
6400 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
6403 ass->jit_optimizer_disabled = FALSE;
6404 mono_memory_barrier ();
6405 ass->jit_optimizer_disabled_inited = TRUE;
6409 attrs = mono_custom_attrs_from_assembly (ass);
6411 for (i = 0; i < attrs->num_attrs; ++i) {
6412 MonoCustomAttrEntry *attr = &attrs->attrs [i];
6415 MonoMethodSignature *sig;
6417 if (!attr->ctor || attr->ctor->klass != klass)
6419 /* Decode the attribute. See reflection.c */
6420 len = attr->data_size;
6421 p = (const char*)attr->data;
6422 g_assert (read16 (p) == 0x0001);
6425 // FIXME: Support named parameters
6426 sig = mono_method_signature (attr->ctor);
6427 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
6429 /* Two boolean arguments */
6433 mono_custom_attrs_free (attrs);
6436 ass->jit_optimizer_disabled = val;
6437 mono_memory_barrier ();
6438 ass->jit_optimizer_disabled_inited = TRUE;
6444 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
6446 gboolean supported_tail_call;
6449 #ifdef MONO_ARCH_HAVE_OP_TAIL_CALL
6450 supported_tail_call = mono_arch_tail_call_supported (mono_method_signature (method), mono_method_signature (cmethod));
6452 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6455 for (i = 0; i < fsig->param_count; ++i) {
6456 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
6457 /* These can point to the current method's stack */
6458 supported_tail_call = FALSE;
6460 if (fsig->hasthis && cmethod->klass->valuetype)
6461 /* this might point to the current method's stack */
6462 supported_tail_call = FALSE;
6463 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
6464 supported_tail_call = FALSE;
6465 if (cfg->method->save_lmf)
6466 supported_tail_call = FALSE;
6467 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
6468 supported_tail_call = FALSE;
6469 if (call_opcode != CEE_CALL)
6470 supported_tail_call = FALSE;
6472 /* Debugging support */
6474 if (supported_tail_call) {
6475 if (!mono_debug_count ())
6476 supported_tail_call = FALSE;
6480 return supported_tail_call;
6483 /* the JIT intercepts ldflda instructions to the tlsdata field in ThreadLocal<T> and redirects
6484 * it to the thread local value based on the tls_offset field. Every other kind of access to
6485 * the field causes an assert.
6488 is_magic_tls_access (MonoClassField *field)
6490 if (strcmp (field->name, "tlsdata"))
6492 if (strcmp (field->parent->name, "ThreadLocal`1"))
6494 return field->parent->image == mono_defaults.corlib;
6497 /* emits the code needed to access a managed tls var (like ThreadStatic)
6498 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
6499 * pointer for the current thread.
6500 * Returns the MonoInst* representing the address of the tls var.
6503 emit_managed_static_data_access (MonoCompile *cfg, MonoInst *thread_ins, int offset_reg)
6506 int static_data_reg, array_reg, dreg;
6507 int offset2_reg, idx_reg;
6508 // inlined access to the tls data
6509 // idx = (offset >> 24) - 1;
6510 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
6511 static_data_reg = alloc_ireg (cfg);
6512 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
6513 idx_reg = alloc_ireg (cfg);
6514 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
6515 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
6516 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
6517 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
6518 array_reg = alloc_ireg (cfg);
6519 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
6520 offset2_reg = alloc_ireg (cfg);
6521 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
6522 dreg = alloc_ireg (cfg);
6523 EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
6528 * redirect access to the tlsdata field to the tls var given by the tls_offset field.
6529 * this address is cached per-method in cached_tls_addr.
6532 create_magic_tls_access (MonoCompile *cfg, MonoClassField *tls_field, MonoInst **cached_tls_addr, MonoInst *thread_local)
6534 MonoInst *load, *addr, *temp, *store, *thread_ins;
6535 MonoClassField *offset_field;
6537 if (*cached_tls_addr) {
6538 EMIT_NEW_TEMPLOAD (cfg, addr, (*cached_tls_addr)->inst_c0);
6541 thread_ins = mono_get_thread_intrinsic (cfg);
6542 offset_field = mono_class_get_field_from_name (tls_field->parent, "tls_offset");
6544 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, offset_field->type, thread_local->dreg, offset_field->offset);
6546 MONO_ADD_INS (cfg->cbb, thread_ins);
6548 MonoMethod *thread_method;
6549 thread_method = mono_class_get_method_from_name (mono_get_thread_class(), "CurrentInternalThread_internal", 0);
6550 thread_ins = mono_emit_method_call (cfg, thread_method, NULL, NULL);
6552 addr = emit_managed_static_data_access (cfg, thread_ins, load->dreg);
6553 addr->klass = mono_class_from_mono_type (tls_field->type);
6554 addr->type = STACK_MP;
6555 *cached_tls_addr = temp = mono_compile_create_var (cfg, type_from_stack_type (addr), OP_LOCAL);
6556 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, addr);
6558 EMIT_NEW_TEMPLOAD (cfg, addr, temp->inst_c0);
6563 * mono_method_to_ir:
6565 * Translate the .net IL into linear IR.
6568 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
6569 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
6570 guint inline_offset, gboolean is_virtual_call)
6573 MonoInst *ins, **sp, **stack_start;
6574 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
6575 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
6576 MonoMethod *cmethod, *method_definition;
6577 MonoInst **arg_array;
6578 MonoMethodHeader *header;
6580 guint32 token, ins_flag;
6582 MonoClass *constrained_call = NULL;
6583 unsigned char *ip, *end, *target, *err_pos;
6584 MonoMethodSignature *sig;
6585 MonoGenericContext *generic_context = NULL;
6586 MonoGenericContainer *generic_container = NULL;
6587 MonoType **param_types;
6588 int i, n, start_new_bblock, dreg;
6589 int num_calls = 0, inline_costs = 0;
6590 int breakpoint_id = 0;
6592 MonoBoolean security, pinvoke;
6593 MonoSecurityManager* secman = NULL;
6594 MonoDeclSecurityActions actions;
6595 GSList *class_inits = NULL;
6596 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
6598 gboolean init_locals, seq_points, skip_dead_blocks;
6599 gboolean disable_inline, sym_seq_points = FALSE;
6600 MonoInst *cached_tls_addr = NULL;
6601 MonoDebugMethodInfo *minfo;
6602 MonoBitSet *seq_point_locs = NULL;
6603 MonoBitSet *seq_point_set_locs = NULL;
6605 disable_inline = is_jit_optimizer_disabled (method);
6607 /* serialization and xdomain stuff may need access to private fields and methods */
6608 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
6609 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
6610 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
6611 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
6612 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
6613 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
6615 dont_verify |= mono_security_smcs_hack_enabled ();
6617 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
6618 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
6619 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
6620 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
6621 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
6623 image = method->klass->image;
6624 header = mono_method_get_header (method);
6626 MonoLoaderError *error;
6628 if ((error = mono_loader_get_last_error ())) {
6629 mono_cfg_set_exception (cfg, error->exception_type);
6631 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
6632 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
6634 goto exception_exit;
6636 generic_container = mono_method_get_generic_container (method);
6637 sig = mono_method_signature (method);
6638 num_args = sig->hasthis + sig->param_count;
6639 ip = (unsigned char*)header->code;
6640 cfg->cil_start = ip;
6641 end = ip + header->code_size;
6642 cfg->stat_cil_code_size += header->code_size;
6643 init_locals = header->init_locals;
6645 seq_points = cfg->gen_seq_points && cfg->method == method;
6646 #ifdef PLATFORM_ANDROID
6647 seq_points &= cfg->method->wrapper_type == MONO_WRAPPER_NONE;
6650 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
6651 /* We could hit a seq point before attaching to the JIT (#8338) */
6655 if (cfg->gen_seq_points && cfg->method == method) {
6656 minfo = mono_debug_lookup_method (method);
6658 int i, n_il_offsets;
6662 mono_debug_symfile_get_line_numbers_full (minfo, NULL, NULL, &n_il_offsets, &il_offsets, &line_numbers, NULL, NULL);
6663 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6664 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6665 sym_seq_points = TRUE;
6666 for (i = 0; i < n_il_offsets; ++i) {
6667 if (il_offsets [i] < header->code_size)
6668 mono_bitset_set_fast (seq_point_locs, il_offsets [i]);
6670 g_free (il_offsets);
6671 g_free (line_numbers);
6676 * Methods without init_locals set could cause asserts in various passes
6681 method_definition = method;
6682 while (method_definition->is_inflated) {
6683 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
6684 method_definition = imethod->declaring;
6687 /* SkipVerification is not allowed if core-clr is enabled */
6688 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
6690 dont_verify_stloc = TRUE;
6693 if (sig->is_inflated)
6694 generic_context = mono_method_get_context (method);
6695 else if (generic_container)
6696 generic_context = &generic_container->context;
6697 cfg->generic_context = generic_context;
6699 if (!cfg->generic_sharing_context)
6700 g_assert (!sig->has_type_parameters);
6702 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
6703 g_assert (method->is_inflated);
6704 g_assert (mono_method_get_context (method)->method_inst);
6706 if (method->is_inflated && mono_method_get_context (method)->method_inst)
6707 g_assert (sig->generic_param_count);
6709 if (cfg->method == method) {
6710 cfg->real_offset = 0;
6712 cfg->real_offset = inline_offset;
6715 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
6716 cfg->cil_offset_to_bb_len = header->code_size;
6718 cfg->current_method = method;
6720 if (cfg->verbose_level > 2)
6721 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
6723 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
6725 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
6726 for (n = 0; n < sig->param_count; ++n)
6727 param_types [n + sig->hasthis] = sig->params [n];
6728 cfg->arg_types = param_types;
6730 dont_inline = g_list_prepend (dont_inline, method);
6731 if (cfg->method == method) {
6733 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
6734 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
6737 NEW_BBLOCK (cfg, start_bblock);
6738 cfg->bb_entry = start_bblock;
6739 start_bblock->cil_code = NULL;
6740 start_bblock->cil_length = 0;
6741 #if defined(__native_client_codegen__)
6742 MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
6743 ins->dreg = alloc_dreg (cfg, STACK_I4);
6744 MONO_ADD_INS (start_bblock, ins);
6748 NEW_BBLOCK (cfg, end_bblock);
6749 cfg->bb_exit = end_bblock;
6750 end_bblock->cil_code = NULL;
6751 end_bblock->cil_length = 0;
6752 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
6753 g_assert (cfg->num_bblocks == 2);
6755 arg_array = cfg->args;
6757 if (header->num_clauses) {
6758 cfg->spvars = g_hash_table_new (NULL, NULL);
6759 cfg->exvars = g_hash_table_new (NULL, NULL);
6761 /* handle exception clauses */
6762 for (i = 0; i < header->num_clauses; ++i) {
6763 MonoBasicBlock *try_bb;
6764 MonoExceptionClause *clause = &header->clauses [i];
6765 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
6766 try_bb->real_offset = clause->try_offset;
6767 try_bb->try_start = TRUE;
6768 try_bb->region = ((i + 1) << 8) | clause->flags;
6769 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
6770 tblock->real_offset = clause->handler_offset;
6771 tblock->flags |= BB_EXCEPTION_HANDLER;
6774 * Linking the try block with the EH block hinders inlining as we won't be able to
6775 * merge the bblocks from inlining and produce an artificial hole for no good reason.
6777 if (COMPILE_LLVM (cfg))
6778 link_bblock (cfg, try_bb, tblock);
6780 if (*(ip + clause->handler_offset) == CEE_POP)
6781 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
6783 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
6784 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
6785 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
6786 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
6787 MONO_ADD_INS (tblock, ins);
6789 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) {
6790 /* finally clauses already have a seq point */
6791 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
6792 MONO_ADD_INS (tblock, ins);
6795 /* todo: is a fault block unsafe to optimize? */
6796 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
6797 tblock->flags |= BB_EXCEPTION_UNSAFE;
6801 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
6803 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
6805 /* catch and filter blocks get the exception object on the stack */
6806 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
6807 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
6808 MonoInst *dummy_use;
6810 /* mostly like handle_stack_args (), but just sets the input args */
6811 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
6812 tblock->in_scount = 1;
6813 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
6814 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
6817 * Add a dummy use for the exvar so its liveness info will be
6821 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
6823 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
6824 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
6825 tblock->flags |= BB_EXCEPTION_HANDLER;
6826 tblock->real_offset = clause->data.filter_offset;
6827 tblock->in_scount = 1;
6828 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
6829 /* The filter block shares the exvar with the handler block */
6830 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
6831 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
6832 MONO_ADD_INS (tblock, ins);
6836 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
6837 clause->data.catch_class &&
6838 cfg->generic_sharing_context &&
6839 mono_class_check_context_used (clause->data.catch_class)) {
6841 * In shared generic code with catch
6842 * clauses containing type variables
6843 * the exception handling code has to
6844 * be able to get to the rgctx.
6845 * Therefore we have to make sure that
6846 * the vtable/mrgctx argument (for
6847 * static or generic methods) or the
6848 * "this" argument (for non-static
6849 * methods) are live.
6851 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
6852 mini_method_get_context (method)->method_inst ||
6853 method->klass->valuetype) {
6854 mono_get_vtable_var (cfg);
6856 MonoInst *dummy_use;
6858 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
6863 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
6864 cfg->cbb = start_bblock;
6865 cfg->args = arg_array;
6866 mono_save_args (cfg, sig, inline_args);
6869 /* FIRST CODE BLOCK */
6870 NEW_BBLOCK (cfg, bblock);
6871 bblock->cil_code = ip;
6875 ADD_BBLOCK (cfg, bblock);
6877 if (cfg->method == method) {
6878 breakpoint_id = mono_debugger_method_has_breakpoint (method);
6879 if (breakpoint_id) {
6880 MONO_INST_NEW (cfg, ins, OP_BREAK);
6881 MONO_ADD_INS (bblock, ins);
6885 if (mono_security_cas_enabled ())
6886 secman = mono_security_manager_get_methods ();
6888 security = (secman && mono_security_method_has_declsec (method));
6889 /* at this point having security doesn't mean we have any code to generate */
6890 if (security && (cfg->method == method)) {
6891 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
6892 * And we do not want to enter the next section (with allocation) if we
6893 * have nothing to generate */
6894 security = mono_declsec_get_demands (method, &actions);
6897 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
6898 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
6900 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
6901 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6902 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
6904 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
6905 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
6909 mono_custom_attrs_free (custom);
6912 custom = mono_custom_attrs_from_class (wrapped->klass);
6913 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
6917 mono_custom_attrs_free (custom);
6920 /* not a P/Invoke after all */
6925 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
6926 /* we use a separate basic block for the initialization code */
6927 NEW_BBLOCK (cfg, init_localsbb);
6928 cfg->bb_init = init_localsbb;
6929 init_localsbb->real_offset = cfg->real_offset;
6930 start_bblock->next_bb = init_localsbb;
6931 init_localsbb->next_bb = bblock;
6932 link_bblock (cfg, start_bblock, init_localsbb);
6933 link_bblock (cfg, init_localsbb, bblock);
6935 cfg->cbb = init_localsbb;
6937 start_bblock->next_bb = bblock;
6938 link_bblock (cfg, start_bblock, bblock);
6941 if (cfg->gsharedvt && cfg->method == method) {
6942 MonoGSharedVtMethodInfo *info;
6943 MonoInst *var, *locals_var;
6946 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
6947 info->method = cfg->method;
6949 info->entries = g_ptr_array_new ();
6950 cfg->gsharedvt_info = info;
6952 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6953 /* prevent it from being register allocated */
6954 //var->flags |= MONO_INST_VOLATILE;
6955 cfg->gsharedvt_info_var = var;
6957 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
6958 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
6960 /* Allocate locals */
6961 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6962 /* prevent it from being register allocated */
6963 //locals_var->flags |= MONO_INST_VOLATILE;
6964 cfg->gsharedvt_locals_var = locals_var;
6966 dreg = alloc_ireg (cfg);
6967 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
6969 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
6970 ins->dreg = locals_var->dreg;
6972 MONO_ADD_INS (cfg->cbb, ins);
6973 cfg->gsharedvt_locals_var_ins = ins;
6975 cfg->flags |= MONO_CFG_HAS_ALLOCA;
6978 ins->flags |= MONO_INST_INIT;
6982 /* at this point we know, if security is TRUE, that some code needs to be generated */
6983 if (security && (cfg->method == method)) {
6986 cfg->stat_cas_demand_generation++;
6988 if (actions.demand.blob) {
6989 /* Add code for SecurityAction.Demand */
6990 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
6991 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
6992 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
6993 mono_emit_method_call (cfg, secman->demand, args, NULL);
6995 if (actions.noncasdemand.blob) {
6996 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
6997 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
6998 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
6999 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
7000 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
7001 mono_emit_method_call (cfg, secman->demand, args, NULL);
7003 if (actions.demandchoice.blob) {
7004 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
7005 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
7006 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
7007 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
7008 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
7012 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
7014 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
7017 if (mono_security_core_clr_enabled ()) {
7018 /* check if this is native code, e.g. an icall or a p/invoke */
7019 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
7020 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
7022 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
7023 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
7025 /* if this ia a native call then it can only be JITted from platform code */
7026 if ((icall || pinvk) && method->klass && method->klass->image) {
7027 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
7028 MonoException *ex = icall ? mono_get_exception_security () :
7029 mono_get_exception_method_access ();
7030 emit_throw_exception (cfg, ex);
7037 CHECK_CFG_EXCEPTION;
7039 if (header->code_size == 0)
7042 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
7047 if (cfg->method == method)
7048 mono_debug_init_method (cfg, bblock, breakpoint_id);
7050 for (n = 0; n < header->num_locals; ++n) {
7051 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
7056 /* We force the vtable variable here for all shared methods
7057 for the possibility that they might show up in a stack
7058 trace where their exact instantiation is needed. */
7059 if (cfg->generic_sharing_context && method == cfg->method) {
7060 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7061 mini_method_get_context (method)->method_inst ||
7062 method->klass->valuetype) {
7063 mono_get_vtable_var (cfg);
7065 /* FIXME: Is there a better way to do this?
7066 We need the variable live for the duration
7067 of the whole method. */
7068 cfg->args [0]->flags |= MONO_INST_VOLATILE;
7072 /* add a check for this != NULL to inlined methods */
7073 if (is_virtual_call) {
7076 NEW_ARGLOAD (cfg, arg_ins, 0);
7077 MONO_ADD_INS (cfg->cbb, arg_ins);
7078 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
7081 skip_dead_blocks = !dont_verify;
7082 if (skip_dead_blocks) {
7083 original_bb = bb = mono_basic_block_split (method, &error);
7084 if (!mono_error_ok (&error)) {
7085 mono_error_cleanup (&error);
7091 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
7092 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
7095 start_new_bblock = 0;
7098 if (cfg->method == method)
7099 cfg->real_offset = ip - header->code;
7101 cfg->real_offset = inline_offset;
7106 if (start_new_bblock) {
7107 bblock->cil_length = ip - bblock->cil_code;
7108 if (start_new_bblock == 2) {
7109 g_assert (ip == tblock->cil_code);
7111 GET_BBLOCK (cfg, tblock, ip);
7113 bblock->next_bb = tblock;
7116 start_new_bblock = 0;
7117 for (i = 0; i < bblock->in_scount; ++i) {
7118 if (cfg->verbose_level > 3)
7119 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
7120 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
7124 g_slist_free (class_inits);
7127 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
7128 link_bblock (cfg, bblock, tblock);
7129 if (sp != stack_start) {
7130 handle_stack_args (cfg, stack_start, sp - stack_start);
7132 CHECK_UNVERIFIABLE (cfg);
7134 bblock->next_bb = tblock;
7137 for (i = 0; i < bblock->in_scount; ++i) {
7138 if (cfg->verbose_level > 3)
7139 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
7140 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
7143 g_slist_free (class_inits);
7148 if (skip_dead_blocks) {
7149 int ip_offset = ip - header->code;
7151 if (ip_offset == bb->end)
7155 int op_size = mono_opcode_size (ip, end);
7156 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
7158 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
7160 if (ip_offset + op_size == bb->end) {
7161 MONO_INST_NEW (cfg, ins, OP_NOP);
7162 MONO_ADD_INS (bblock, ins);
7163 start_new_bblock = 1;
7171 * Sequence points are points where the debugger can place a breakpoint.
7172 * Currently, we generate these automatically at points where the IL
7175 if (seq_points && ((sp == stack_start) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
7177 * Make methods interruptable at the beginning, and at the targets of
7178 * backward branches.
7179 * Also, do this at the start of every bblock in methods with clauses too,
7180 * to be able to handle instructions with inprecise control flow like
7182 * Backward branches are handled at the end of method-to-ir ().
7184 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
7186 /* Avoid sequence points on empty IL like .volatile */
7187 // FIXME: Enable this
7188 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
7189 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
7190 if (sp != stack_start)
7191 ins->flags |= MONO_INST_NONEMPTY_STACK;
7192 MONO_ADD_INS (cfg->cbb, ins);
7195 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
7198 bblock->real_offset = cfg->real_offset;
7200 if ((cfg->method == method) && cfg->coverage_info) {
7201 guint32 cil_offset = ip - header->code;
7202 cfg->coverage_info->data [cil_offset].cil_code = ip;
7204 /* TODO: Use an increment here */
7205 #if defined(TARGET_X86)
7206 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
7207 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
7209 MONO_ADD_INS (cfg->cbb, ins);
7211 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
7212 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
7216 if (cfg->verbose_level > 3)
7217 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
7221 if (seq_points && !sym_seq_points && sp != stack_start) {
7223 * The C# compiler uses these nops to notify the JIT that it should
7224 * insert seq points.
7226 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
7227 MONO_ADD_INS (cfg->cbb, ins);
7229 if (cfg->keep_cil_nops)
7230 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
7232 MONO_INST_NEW (cfg, ins, OP_NOP);
7234 MONO_ADD_INS (bblock, ins);
7237 if (should_insert_brekpoint (cfg->method)) {
7238 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
7240 MONO_INST_NEW (cfg, ins, OP_NOP);
7243 MONO_ADD_INS (bblock, ins);
7249 CHECK_STACK_OVF (1);
7250 n = (*ip)-CEE_LDARG_0;
7252 EMIT_NEW_ARGLOAD (cfg, ins, n);
7260 CHECK_STACK_OVF (1);
7261 n = (*ip)-CEE_LDLOC_0;
7263 EMIT_NEW_LOCLOAD (cfg, ins, n);
7272 n = (*ip)-CEE_STLOC_0;
7275 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
7277 emit_stloc_ir (cfg, sp, header, n);
7284 CHECK_STACK_OVF (1);
7287 EMIT_NEW_ARGLOAD (cfg, ins, n);
7293 CHECK_STACK_OVF (1);
7296 NEW_ARGLOADA (cfg, ins, n);
7297 MONO_ADD_INS (cfg->cbb, ins);
7307 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
7309 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
7314 CHECK_STACK_OVF (1);
7317 EMIT_NEW_LOCLOAD (cfg, ins, n);
7321 case CEE_LDLOCA_S: {
7322 unsigned char *tmp_ip;
7324 CHECK_STACK_OVF (1);
7325 CHECK_LOCAL (ip [1]);
7327 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
7333 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
7342 CHECK_LOCAL (ip [1]);
7343 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
7345 emit_stloc_ir (cfg, sp, header, ip [1]);
7350 CHECK_STACK_OVF (1);
7351 EMIT_NEW_PCONST (cfg, ins, NULL);
7352 ins->type = STACK_OBJ;
7357 CHECK_STACK_OVF (1);
7358 EMIT_NEW_ICONST (cfg, ins, -1);
7371 CHECK_STACK_OVF (1);
7372 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
7378 CHECK_STACK_OVF (1);
7380 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
7386 CHECK_STACK_OVF (1);
7387 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
7393 CHECK_STACK_OVF (1);
7394 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7395 ins->type = STACK_I8;
7396 ins->dreg = alloc_dreg (cfg, STACK_I8);
7398 ins->inst_l = (gint64)read64 (ip);
7399 MONO_ADD_INS (bblock, ins);
7405 gboolean use_aotconst = FALSE;
7407 #ifdef TARGET_POWERPC
7408 /* FIXME: Clean this up */
7409 if (cfg->compile_aot)
7410 use_aotconst = TRUE;
7413 /* FIXME: we should really allocate this only late in the compilation process */
7414 f = mono_domain_alloc (cfg->domain, sizeof (float));
7416 CHECK_STACK_OVF (1);
7422 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
7424 dreg = alloc_freg (cfg);
7425 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
7426 ins->type = STACK_R8;
7428 MONO_INST_NEW (cfg, ins, OP_R4CONST);
7429 ins->type = STACK_R8;
7430 ins->dreg = alloc_dreg (cfg, STACK_R8);
7432 MONO_ADD_INS (bblock, ins);
7442 gboolean use_aotconst = FALSE;
7444 #ifdef TARGET_POWERPC
7445 /* FIXME: Clean this up */
7446 if (cfg->compile_aot)
7447 use_aotconst = TRUE;
7450 /* FIXME: we should really allocate this only late in the compilation process */
7451 d = mono_domain_alloc (cfg->domain, sizeof (double));
7453 CHECK_STACK_OVF (1);
7459 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
7461 dreg = alloc_freg (cfg);
7462 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
7463 ins->type = STACK_R8;
7465 MONO_INST_NEW (cfg, ins, OP_R8CONST);
7466 ins->type = STACK_R8;
7467 ins->dreg = alloc_dreg (cfg, STACK_R8);
7469 MONO_ADD_INS (bblock, ins);
7478 MonoInst *temp, *store;
7480 CHECK_STACK_OVF (1);
7484 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
7485 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
7487 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
7490 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
7503 if (sp [0]->type == STACK_R8)
7504 /* we need to pop the value from the x86 FP stack */
7505 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
7511 INLINE_FAILURE ("jmp");
7512 GSHAREDVT_FAILURE (*ip);
7515 if (stack_start != sp)
7517 token = read32 (ip + 1);
7518 /* FIXME: check the signature matches */
7519 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7521 if (!cmethod || mono_loader_get_last_error ())
7524 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
7525 GENERIC_SHARING_FAILURE (CEE_JMP);
7527 if (mono_security_cas_enabled ())
7528 CHECK_CFG_EXCEPTION;
7530 if (ARCH_HAVE_OP_TAIL_CALL) {
7531 MonoMethodSignature *fsig = mono_method_signature (cmethod);
7534 /* Handle tail calls similarly to calls */
7535 n = fsig->param_count + fsig->hasthis;
7537 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
7538 call->method = cmethod;
7539 call->tail_call = TRUE;
7540 call->signature = mono_method_signature (cmethod);
7541 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
7542 call->inst.inst_p0 = cmethod;
7543 for (i = 0; i < n; ++i)
7544 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
7546 mono_arch_emit_call (cfg, call);
7547 MONO_ADD_INS (bblock, (MonoInst*)call);
7549 for (i = 0; i < num_args; ++i)
7550 /* Prevent arguments from being optimized away */
7551 arg_array [i]->flags |= MONO_INST_VOLATILE;
7553 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
7554 ins = (MonoInst*)call;
7555 ins->inst_p0 = cmethod;
7556 MONO_ADD_INS (bblock, ins);
7560 start_new_bblock = 1;
7565 case CEE_CALLVIRT: {
7566 MonoInst *addr = NULL;
7567 MonoMethodSignature *fsig = NULL;
7569 int virtual = *ip == CEE_CALLVIRT;
7570 int calli = *ip == CEE_CALLI;
7571 gboolean pass_imt_from_rgctx = FALSE;
7572 MonoInst *imt_arg = NULL;
7573 MonoInst *keep_this_alive = NULL;
7574 gboolean pass_vtable = FALSE;
7575 gboolean pass_mrgctx = FALSE;
7576 MonoInst *vtable_arg = NULL;
7577 gboolean check_this = FALSE;
7578 gboolean supported_tail_call = FALSE;
7579 gboolean tail_call = FALSE;
7580 gboolean need_seq_point = FALSE;
7581 guint32 call_opcode = *ip;
7582 gboolean emit_widen = TRUE;
7583 gboolean push_res = TRUE;
7584 gboolean skip_ret = FALSE;
7585 gboolean delegate_invoke = FALSE;
7588 token = read32 (ip + 1);
7593 //GSHAREDVT_FAILURE (*ip);
7598 fsig = mini_get_signature (method, token, generic_context);
7599 n = fsig->param_count + fsig->hasthis;
7601 if (method->dynamic && fsig->pinvoke) {
7605 * This is a call through a function pointer using a pinvoke
7606 * signature. Have to create a wrapper and call that instead.
7607 * FIXME: This is very slow, need to create a wrapper at JIT time
7608 * instead based on the signature.
7610 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
7611 EMIT_NEW_PCONST (cfg, args [1], fsig);
7613 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
7616 MonoMethod *cil_method;
7618 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7619 cil_method = cmethod;
7621 if (constrained_call) {
7622 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7623 if (cfg->verbose_level > 2)
7624 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_call));
7625 if (!((constrained_call->byval_arg.type == MONO_TYPE_VAR ||
7626 constrained_call->byval_arg.type == MONO_TYPE_MVAR) &&
7627 cfg->generic_sharing_context)) {
7628 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_call, generic_context);
7631 if (cfg->verbose_level > 2)
7632 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_call));
7634 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
7636 * This is needed since get_method_constrained can't find
7637 * the method in klass representing a type var.
7638 * The type var is guaranteed to be a reference type in this
7641 if (!mini_is_gsharedvt_klass (cfg, constrained_call))
7642 g_assert (!cmethod->klass->valuetype);
7644 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
7649 if (!cmethod || mono_loader_get_last_error ())
7651 if (!dont_verify && !cfg->skip_visibility) {
7652 MonoMethod *target_method = cil_method;
7653 if (method->is_inflated) {
7654 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
7656 if (!mono_method_can_access_method (method_definition, target_method) &&
7657 !mono_method_can_access_method (method, cil_method))
7658 METHOD_ACCESS_FAILURE;
7661 if (mono_security_core_clr_enabled ())
7662 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
7664 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
7665 /* MS.NET seems to silently convert this to a callvirt */
7670 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
7671 * converts to a callvirt.
7673 * tests/bug-515884.il is an example of this behavior
7675 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
7676 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
7677 if (!virtual && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
7681 if (!cmethod->klass->inited)
7682 if (!mono_class_init (cmethod->klass))
7683 TYPE_LOAD_ERROR (cmethod->klass);
7685 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
7686 mini_class_is_system_array (cmethod->klass)) {
7687 array_rank = cmethod->klass->rank;
7688 fsig = mono_method_signature (cmethod);
7690 fsig = mono_method_signature (cmethod);
7695 if (fsig->pinvoke) {
7696 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
7697 check_for_pending_exc, cfg->compile_aot);
7698 fsig = mono_method_signature (wrapper);
7699 } else if (constrained_call) {
7700 fsig = mono_method_signature (cmethod);
7702 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
7706 mono_save_token_info (cfg, image, token, cil_method);
7708 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7710 * Need to emit an implicit seq point after every non-void call so single stepping through nested calls like
7711 * foo (bar (), baz ())
7712 * works correctly. MS does this also:
7713 * http://stackoverflow.com/questions/6937198/making-your-net-language-step-correctly-in-the-debugger
7714 * The problem with this approach is that the debugger will stop after all calls returning a value,
7715 * even for simple cases, like:
7718 /* Special case a few common successor opcodes */
7719 if (!(ip + 5 < end && (ip [5] == CEE_POP || ip [5] == CEE_NOP)) && !(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
7720 need_seq_point = TRUE;
7723 n = fsig->param_count + fsig->hasthis;
7725 /* Don't support calls made using type arguments for now */
7727 if (cfg->gsharedvt) {
7728 if (mini_is_gsharedvt_signature (cfg, fsig))
7729 GSHAREDVT_FAILURE (*ip);
7733 if (mono_security_cas_enabled ()) {
7734 if (check_linkdemand (cfg, method, cmethod))
7735 INLINE_FAILURE ("linkdemand");
7736 CHECK_CFG_EXCEPTION;
7739 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
7740 g_assert_not_reached ();
7743 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
7746 if (!cfg->generic_sharing_context && cmethod)
7747 g_assert (!mono_method_check_context_used (cmethod));
7751 //g_assert (!virtual || fsig->hasthis);
7755 if (constrained_call) {
7756 if (mini_is_gsharedvt_klass (cfg, constrained_call)) {
7758 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
7760 if ((cmethod->klass != mono_defaults.object_class) && constrained_call->valuetype && cmethod->klass->valuetype) {
7761 /* The 'Own method' case below */
7762 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
7763 /* 'The type parameter is instantiated as a reference type' case below. */
7764 } else if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
7765 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || mini_is_gsharedvt_type (cfg, fsig->ret)) &&
7766 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || mini_is_gsharedvt_type (cfg, fsig->params [0]))))) {
7767 MonoInst *args [16];
7770 * This case handles calls to
7771 * - object:ToString()/Equals()/GetHashCode(),
7772 * - System.IComparable<T>:CompareTo()
7773 * - System.IEquatable<T>:Equals ()
7774 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
7778 if (mono_method_check_context_used (cmethod))
7779 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
7781 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
7782 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_call), constrained_call, MONO_RGCTX_INFO_KLASS);
7784 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
7785 if (fsig->hasthis && fsig->param_count) {
7786 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
7787 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
7788 ins->dreg = alloc_preg (cfg);
7789 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
7790 MONO_ADD_INS (cfg->cbb, ins);
7793 if (mini_is_gsharedvt_type (cfg, fsig->params [0])) {
7796 args [3] = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
7798 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
7799 addr_reg = ins->dreg;
7800 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
7802 EMIT_NEW_ICONST (cfg, args [3], 0);
7803 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
7806 EMIT_NEW_ICONST (cfg, args [3], 0);
7807 EMIT_NEW_ICONST (cfg, args [4], 0);
7809 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
7812 if (mini_is_gsharedvt_type (cfg, fsig->ret)) {
7813 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins, &bblock);
7814 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret)) {
7818 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
7819 MONO_ADD_INS (cfg->cbb, add);
7821 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
7822 MONO_ADD_INS (cfg->cbb, ins);
7823 /* ins represents the call result */
7828 GSHAREDVT_FAILURE (*ip);
7832 * We have the `constrained.' prefix opcode.
7834 if (constrained_call->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
7836 * The type parameter is instantiated as a valuetype,
7837 * but that type doesn't override the method we're
7838 * calling, so we need to box `this'.
7840 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
7841 ins->klass = constrained_call;
7842 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
7843 CHECK_CFG_EXCEPTION;
7844 } else if (!constrained_call->valuetype) {
7845 int dreg = alloc_ireg_ref (cfg);
7848 * The type parameter is instantiated as a reference
7849 * type. We have a managed pointer on the stack, so
7850 * we need to dereference it here.
7852 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
7853 ins->type = STACK_OBJ;
7856 if (cmethod->klass->valuetype) {
7859 /* Interface method */
7862 mono_class_setup_vtable (constrained_call);
7863 CHECK_TYPELOAD (constrained_call);
7864 ioffset = mono_class_interface_offset (constrained_call, cmethod->klass);
7866 TYPE_LOAD_ERROR (constrained_call);
7867 slot = mono_method_get_vtable_slot (cmethod);
7869 TYPE_LOAD_ERROR (cmethod->klass);
7870 cmethod = constrained_call->vtable [ioffset + slot];
7872 if (cmethod->klass == mono_defaults.enum_class) {
7873 /* Enum implements some interfaces, so treat this as the first case */
7874 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
7875 ins->klass = constrained_call;
7876 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
7877 CHECK_CFG_EXCEPTION;
7882 constrained_call = NULL;
7885 if (!calli && check_call_signature (cfg, fsig, sp))
7888 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
7889 if (cmethod && (cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
7890 delegate_invoke = TRUE;
7893 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
7895 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7896 type_to_eval_stack_type ((cfg), fsig->ret, ins);
7904 * If the callee is a shared method, then its static cctor
7905 * might not get called after the call was patched.
7907 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
7908 emit_generic_class_init (cfg, cmethod->klass);
7909 CHECK_TYPELOAD (cmethod->klass);
7913 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
7915 if (cfg->generic_sharing_context && cmethod) {
7916 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
7918 context_used = mini_method_check_context_used (cfg, cmethod);
7920 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7921 /* Generic method interface
7922 calls are resolved via a
7923 helper function and don't
7925 if (!cmethod_context || !cmethod_context->method_inst)
7926 pass_imt_from_rgctx = TRUE;
7930 * If a shared method calls another
7931 * shared method then the caller must
7932 * have a generic sharing context
7933 * because the magic trampoline
7934 * requires it. FIXME: We shouldn't
7935 * have to force the vtable/mrgctx
7936 * variable here. Instead there
7937 * should be a flag in the cfg to
7938 * request a generic sharing context.
7941 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
7942 mono_get_vtable_var (cfg);
7947 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7949 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7951 CHECK_TYPELOAD (cmethod->klass);
7952 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7957 g_assert (!vtable_arg);
7959 if (!cfg->compile_aot) {
7961 * emit_get_rgctx_method () calls mono_class_vtable () so check
7962 * for type load errors before.
7964 mono_class_setup_vtable (cmethod->klass);
7965 CHECK_TYPELOAD (cmethod->klass);
7968 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7970 /* !marshalbyref is needed to properly handle generic methods + remoting */
7971 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
7972 MONO_METHOD_IS_FINAL (cmethod)) &&
7973 !mono_class_is_marshalbyref (cmethod->klass)) {
7980 if (pass_imt_from_rgctx) {
7981 g_assert (!pass_vtable);
7984 imt_arg = emit_get_rgctx_method (cfg, context_used,
7985 cmethod, MONO_RGCTX_INFO_METHOD);
7989 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
7991 /* Calling virtual generic methods */
7992 if (cmethod && virtual &&
7993 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
7994 !(MONO_METHOD_IS_FINAL (cmethod) &&
7995 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
7996 fsig->generic_param_count &&
7997 !(cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))) {
7998 MonoInst *this_temp, *this_arg_temp, *store;
7999 MonoInst *iargs [4];
8000 gboolean use_imt = FALSE;
8002 g_assert (fsig->is_inflated);
8004 /* Prevent inlining of methods that contain indirect calls */
8005 INLINE_FAILURE ("virtual generic call");
8007 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
8008 GSHAREDVT_FAILURE (*ip);
8010 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
8011 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt)
8016 g_assert (!imt_arg);
8018 g_assert (cmethod->is_inflated);
8019 imt_arg = emit_get_rgctx_method (cfg, context_used,
8020 cmethod, MONO_RGCTX_INFO_METHOD);
8021 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
8023 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
8024 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
8025 MONO_ADD_INS (bblock, store);
8027 /* FIXME: This should be a managed pointer */
8028 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8030 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
8031 iargs [1] = emit_get_rgctx_method (cfg, context_used,
8032 cmethod, MONO_RGCTX_INFO_METHOD);
8033 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
8034 addr = mono_emit_jit_icall (cfg,
8035 mono_helper_compile_generic_method, iargs);
8037 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
8039 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8046 * Implement a workaround for the inherent races involved in locking:
8052 * If a thread abort happens between the call to Monitor.Enter () and the start of the
8053 * try block, the Exit () won't be executed, see:
8054 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
8055 * To work around this, we extend such try blocks to include the last x bytes
8056 * of the Monitor.Enter () call.
8058 if (cmethod && cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
8059 MonoBasicBlock *tbb;
8061 GET_BBLOCK (cfg, tbb, ip + 5);
8063 * Only extend try blocks with a finally, to avoid catching exceptions thrown
8064 * from Monitor.Enter like ArgumentNullException.
8066 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
8067 /* Mark this bblock as needing to be extended */
8068 tbb->extend_try_block = TRUE;
8072 /* Conversion to a JIT intrinsic */
8073 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
8075 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8076 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8083 if (cmethod && (cfg->opt & MONO_OPT_INLINE) &&
8084 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
8085 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
8086 !g_list_find (dont_inline, cmethod)) {
8088 gboolean always = FALSE;
8090 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
8091 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
8092 /* Prevent inlining of methods that call wrappers */
8093 INLINE_FAILURE ("wrapper call");
8094 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
8098 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, always);
8100 cfg->real_offset += 5;
8103 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8104 /* *sp is already set by inline_method */
8109 inline_costs += costs;
8115 /* Tail recursion elimination */
8116 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
8117 gboolean has_vtargs = FALSE;
8120 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8121 INLINE_FAILURE ("tail call");
8123 /* keep it simple */
8124 for (i = fsig->param_count - 1; i >= 0; i--) {
8125 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
8130 for (i = 0; i < n; ++i)
8131 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8132 MONO_INST_NEW (cfg, ins, OP_BR);
8133 MONO_ADD_INS (bblock, ins);
8134 tblock = start_bblock->out_bb [0];
8135 link_bblock (cfg, bblock, tblock);
8136 ins->inst_target_bb = tblock;
8137 start_new_bblock = 1;
8139 /* skip the CEE_RET, too */
8140 if (ip_in_bb (cfg, bblock, ip + 5))
8147 inline_costs += 10 * num_calls++;
8150 * Making generic calls out of gsharedvt methods.
8152 if (cmethod && cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig)) {
8153 MonoRgctxInfoType info_type;
8156 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
8157 //GSHAREDVT_FAILURE (*ip);
8158 // disable for possible remoting calls
8159 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
8160 GSHAREDVT_FAILURE (*ip);
8161 if (fsig->generic_param_count) {
8162 /* virtual generic call */
8163 g_assert (mono_use_imt);
8164 g_assert (!imt_arg);
8165 /* Same as the virtual generic case above */
8166 imt_arg = emit_get_rgctx_method (cfg, context_used,
8167 cmethod, MONO_RGCTX_INFO_METHOD);
8168 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
8173 if (cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)
8174 /* test_0_multi_dim_arrays () in gshared.cs */
8175 GSHAREDVT_FAILURE (*ip);
8177 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
8178 keep_this_alive = sp [0];
8180 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
8181 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
8183 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
8184 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
8186 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8188 } else if (calli && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
8190 * We pass the address to the gsharedvt trampoline in the rgctx reg
8192 MonoInst *callee = addr;
8194 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8196 GSHAREDVT_FAILURE (*ip);
8198 addr = emit_get_rgctx_sig (cfg, context_used,
8199 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8200 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8204 /* Generic sharing */
8205 /* FIXME: only do this for generic methods if
8206 they are not shared! */
8207 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
8208 (!mono_method_is_generic_sharable (cmethod, TRUE) ||
8209 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
8210 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
8211 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
8212 INLINE_FAILURE ("gshared");
8214 g_assert (cfg->generic_sharing_context && cmethod);
8218 * We are compiling a call to a
8219 * generic method from shared code,
8220 * which means that we have to look up
8221 * the method in the rgctx and do an
8225 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8227 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8228 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8232 /* Indirect calls */
8234 if (call_opcode == CEE_CALL)
8235 g_assert (context_used);
8236 else if (call_opcode == CEE_CALLI)
8237 g_assert (!vtable_arg);
8239 /* FIXME: what the hell is this??? */
8240 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
8241 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
8243 /* Prevent inlining of methods with indirect calls */
8244 INLINE_FAILURE ("indirect call");
8246 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8251 * Instead of emitting an indirect call, emit a direct call
8252 * with the contents of the aotconst as the patch info.
8254 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8255 info_type = addr->inst_c1;
8256 info_data = addr->inst_p0;
8258 info_type = addr->inst_right->inst_c1;
8259 info_data = addr->inst_right->inst_left;
8262 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8263 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8268 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8276 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
8277 MonoInst *val = sp [fsig->param_count];
8279 if (val->type == STACK_OBJ) {
8280 MonoInst *iargs [2];
8285 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
8288 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
8289 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
8290 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
8291 emit_write_barrier (cfg, addr, val);
8292 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
8293 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
8295 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
8296 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
8297 if (!cmethod->klass->element_class->valuetype && !readonly)
8298 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
8299 CHECK_TYPELOAD (cmethod->klass);
8302 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
8305 g_assert_not_reached ();
8312 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
8316 /* Tail prefix / tail call optimization */
8318 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
8319 /* FIXME: runtime generic context pointer for jumps? */
8320 /* FIXME: handle this for generic sharing eventually */
8321 if (cmethod && (ins_flag & MONO_INST_TAILCALL) &&
8322 !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
8323 supported_tail_call = TRUE;
8325 if (supported_tail_call) {
8328 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8329 INLINE_FAILURE ("tail call");
8331 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
8333 if (ARCH_HAVE_OP_TAIL_CALL) {
8334 /* Handle tail calls similarly to normal calls */
8337 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8338 call->tail_call = TRUE;
8339 call->method = cmethod;
8340 call->signature = mono_method_signature (cmethod);
8343 * We implement tail calls by storing the actual arguments into the
8344 * argument variables, then emitting a CEE_JMP.
8346 for (i = 0; i < n; ++i) {
8347 /* Prevent argument from being register allocated */
8348 arg_array [i]->flags |= MONO_INST_VOLATILE;
8349 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8351 ins = (MonoInst*)call;
8352 ins->inst_p0 = cmethod;
8353 ins->inst_p1 = arg_array [0];
8354 MONO_ADD_INS (bblock, ins);
8355 link_bblock (cfg, bblock, end_bblock);
8356 start_new_bblock = 1;
8358 // FIXME: Eliminate unreachable epilogs
8361 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
8362 * only reachable from this call.
8364 GET_BBLOCK (cfg, tblock, ip + 5);
8365 if (tblock == bblock || tblock->in_count == 0)
8374 * Synchronized wrappers.
8375 * Its hard to determine where to replace a method with its synchronized
8376 * wrapper without causing an infinite recursion. The current solution is
8377 * to add the synchronized wrapper in the trampolines, and to
8378 * change the called method to a dummy wrapper, and resolve that wrapper
8379 * to the real method in mono_jit_compile_method ().
8381 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8382 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
8383 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
8384 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
8388 INLINE_FAILURE ("call");
8389 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual ? sp [0] : NULL,
8390 imt_arg, vtable_arg);
8393 link_bblock (cfg, bblock, end_bblock);
8394 start_new_bblock = 1;
8396 // FIXME: Eliminate unreachable epilogs
8399 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
8400 * only reachable from this call.
8402 GET_BBLOCK (cfg, tblock, ip + 5);
8403 if (tblock == bblock || tblock->in_count == 0)
8410 /* End of call, INS should contain the result of the call, if any */
8412 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
8415 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8420 if (keep_this_alive) {
8421 MonoInst *dummy_use;
8423 /* See mono_emit_method_call_full () */
8424 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
8427 CHECK_CFG_EXCEPTION;
8431 g_assert (*ip == CEE_RET);
8435 constrained_call = NULL;
8437 emit_seq_point (cfg, method, ip, FALSE, TRUE);
8441 if (cfg->method != method) {
8442 /* return from inlined method */
8444 * If in_count == 0, that means the ret is unreachable due to
8445 * being preceeded by a throw. In that case, inline_method () will
8446 * handle setting the return value
8447 * (test case: test_0_inline_throw ()).
8449 if (return_var && cfg->cbb->in_count) {
8450 MonoType *ret_type = mono_method_signature (method)->ret;
8456 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
8459 //g_assert (returnvar != -1);
8460 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
8461 cfg->ret_var_set = TRUE;
8464 if (cfg->lmf_var && cfg->cbb->in_count)
8468 MonoType *ret_type = mono_method_signature (method)->ret;
8470 if (seq_points && !sym_seq_points) {
8472 * Place a seq point here too even through the IL stack is not
8473 * empty, so a step over on
8476 * will work correctly.
8478 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
8479 MONO_ADD_INS (cfg->cbb, ins);
8482 g_assert (!return_var);
8486 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
8489 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
8492 if (!cfg->vret_addr) {
8495 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
8497 EMIT_NEW_RETLOADA (cfg, ret_addr);
8499 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
8500 ins->klass = mono_class_from_mono_type (ret_type);
8503 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
8504 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
8505 MonoInst *iargs [1];
8509 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
8510 mono_arch_emit_setret (cfg, method, conv);
8512 mono_arch_emit_setret (cfg, method, *sp);
8515 mono_arch_emit_setret (cfg, method, *sp);
8520 if (sp != stack_start)
8522 MONO_INST_NEW (cfg, ins, OP_BR);
8524 ins->inst_target_bb = end_bblock;
8525 MONO_ADD_INS (bblock, ins);
8526 link_bblock (cfg, bblock, end_bblock);
8527 start_new_bblock = 1;
8531 MONO_INST_NEW (cfg, ins, OP_BR);
8533 target = ip + 1 + (signed char)(*ip);
8535 GET_BBLOCK (cfg, tblock, target);
8536 link_bblock (cfg, bblock, tblock);
8537 ins->inst_target_bb = tblock;
8538 if (sp != stack_start) {
8539 handle_stack_args (cfg, stack_start, sp - stack_start);
8541 CHECK_UNVERIFIABLE (cfg);
8543 MONO_ADD_INS (bblock, ins);
8544 start_new_bblock = 1;
8545 inline_costs += BRANCH_COST;
8559 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
8561 target = ip + 1 + *(signed char*)ip;
8567 inline_costs += BRANCH_COST;
8571 MONO_INST_NEW (cfg, ins, OP_BR);
8574 target = ip + 4 + (gint32)read32(ip);
8576 GET_BBLOCK (cfg, tblock, target);
8577 link_bblock (cfg, bblock, tblock);
8578 ins->inst_target_bb = tblock;
8579 if (sp != stack_start) {
8580 handle_stack_args (cfg, stack_start, sp - stack_start);
8582 CHECK_UNVERIFIABLE (cfg);
8585 MONO_ADD_INS (bblock, ins);
8587 start_new_bblock = 1;
8588 inline_costs += BRANCH_COST;
8595 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
8596 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
8597 guint32 opsize = is_short ? 1 : 4;
8599 CHECK_OPSIZE (opsize);
8601 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
8604 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
8609 GET_BBLOCK (cfg, tblock, target);
8610 link_bblock (cfg, bblock, tblock);
8611 GET_BBLOCK (cfg, tblock, ip);
8612 link_bblock (cfg, bblock, tblock);
8614 if (sp != stack_start) {
8615 handle_stack_args (cfg, stack_start, sp - stack_start);
8616 CHECK_UNVERIFIABLE (cfg);
8619 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
8620 cmp->sreg1 = sp [0]->dreg;
8621 type_from_op (cmp, sp [0], NULL);
8624 #if SIZEOF_REGISTER == 4
8625 if (cmp->opcode == OP_LCOMPARE_IMM) {
8626 /* Convert it to OP_LCOMPARE */
8627 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8628 ins->type = STACK_I8;
8629 ins->dreg = alloc_dreg (cfg, STACK_I8);
8631 MONO_ADD_INS (bblock, ins);
8632 cmp->opcode = OP_LCOMPARE;
8633 cmp->sreg2 = ins->dreg;
8636 MONO_ADD_INS (bblock, cmp);
8638 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
8639 type_from_op (ins, sp [0], NULL);
8640 MONO_ADD_INS (bblock, ins);
8641 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
8642 GET_BBLOCK (cfg, tblock, target);
8643 ins->inst_true_bb = tblock;
8644 GET_BBLOCK (cfg, tblock, ip);
8645 ins->inst_false_bb = tblock;
8646 start_new_bblock = 2;
8649 inline_costs += BRANCH_COST;
8664 MONO_INST_NEW (cfg, ins, *ip);
8666 target = ip + 4 + (gint32)read32(ip);
8672 inline_costs += BRANCH_COST;
8676 MonoBasicBlock **targets;
8677 MonoBasicBlock *default_bblock;
8678 MonoJumpInfoBBTable *table;
8679 int offset_reg = alloc_preg (cfg);
8680 int target_reg = alloc_preg (cfg);
8681 int table_reg = alloc_preg (cfg);
8682 int sum_reg = alloc_preg (cfg);
8683 gboolean use_op_switch;
8687 n = read32 (ip + 1);
8690 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
8694 CHECK_OPSIZE (n * sizeof (guint32));
8695 target = ip + n * sizeof (guint32);
8697 GET_BBLOCK (cfg, default_bblock, target);
8698 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
8700 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
8701 for (i = 0; i < n; ++i) {
8702 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
8703 targets [i] = tblock;
8704 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
8708 if (sp != stack_start) {
8710 * Link the current bb with the targets as well, so handle_stack_args
8711 * will set their in_stack correctly.
8713 link_bblock (cfg, bblock, default_bblock);
8714 for (i = 0; i < n; ++i)
8715 link_bblock (cfg, bblock, targets [i]);
8717 handle_stack_args (cfg, stack_start, sp - stack_start);
8719 CHECK_UNVERIFIABLE (cfg);
8722 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
8723 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
8726 for (i = 0; i < n; ++i)
8727 link_bblock (cfg, bblock, targets [i]);
8729 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
8730 table->table = targets;
8731 table->table_size = n;
8733 use_op_switch = FALSE;
8735 /* ARM implements SWITCH statements differently */
8736 /* FIXME: Make it use the generic implementation */
8737 if (!cfg->compile_aot)
8738 use_op_switch = TRUE;
8741 if (COMPILE_LLVM (cfg))
8742 use_op_switch = TRUE;
8744 cfg->cbb->has_jump_table = 1;
8746 if (use_op_switch) {
8747 MONO_INST_NEW (cfg, ins, OP_SWITCH);
8748 ins->sreg1 = src1->dreg;
8749 ins->inst_p0 = table;
8750 ins->inst_many_bb = targets;
8751 ins->klass = GUINT_TO_POINTER (n);
8752 MONO_ADD_INS (cfg->cbb, ins);
8754 if (sizeof (gpointer) == 8)
8755 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
8757 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
8759 #if SIZEOF_REGISTER == 8
8760 /* The upper word might not be zero, and we add it to a 64 bit address later */
8761 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
8764 if (cfg->compile_aot) {
8765 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
8767 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
8768 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
8769 ins->inst_p0 = table;
8770 ins->dreg = table_reg;
8771 MONO_ADD_INS (cfg->cbb, ins);
8774 /* FIXME: Use load_memindex */
8775 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
8776 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
8777 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
8779 start_new_bblock = 1;
8780 inline_costs += (BRANCH_COST * 2);
8800 dreg = alloc_freg (cfg);
8803 dreg = alloc_lreg (cfg);
8806 dreg = alloc_ireg_ref (cfg);
8809 dreg = alloc_preg (cfg);
8812 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
8813 ins->type = ldind_type [*ip - CEE_LDIND_I1];
8814 ins->flags |= ins_flag;
8816 MONO_ADD_INS (bblock, ins);
8818 if (ins->flags & MONO_INST_VOLATILE) {
8819 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
8820 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
8821 emit_memory_barrier (cfg, FullBarrier);
8836 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
8837 ins->flags |= ins_flag;
8840 if (ins->flags & MONO_INST_VOLATILE) {
8841 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
8842 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
8843 emit_memory_barrier (cfg, FullBarrier);
8846 MONO_ADD_INS (bblock, ins);
8848 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
8849 emit_write_barrier (cfg, sp [0], sp [1]);
8858 MONO_INST_NEW (cfg, ins, (*ip));
8860 ins->sreg1 = sp [0]->dreg;
8861 ins->sreg2 = sp [1]->dreg;
8862 type_from_op (ins, sp [0], sp [1]);
8864 ins->dreg = alloc_dreg ((cfg), (ins)->type);
8866 /* Use the immediate opcodes if possible */
8867 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
8868 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
8869 if (imm_opcode != -1) {
8870 ins->opcode = imm_opcode;
8871 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
8874 sp [1]->opcode = OP_NOP;
8878 MONO_ADD_INS ((cfg)->cbb, (ins));
8880 *sp++ = mono_decompose_opcode (cfg, ins);
8897 MONO_INST_NEW (cfg, ins, (*ip));
8899 ins->sreg1 = sp [0]->dreg;
8900 ins->sreg2 = sp [1]->dreg;
8901 type_from_op (ins, sp [0], sp [1]);
8903 ADD_WIDEN_OP (ins, sp [0], sp [1]);
8904 ins->dreg = alloc_dreg ((cfg), (ins)->type);
8906 /* FIXME: Pass opcode to is_inst_imm */
8908 /* Use the immediate opcodes if possible */
8909 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
8912 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
8913 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
8914 /* Keep emulated opcodes which are optimized away later */
8915 if ((ins->opcode == OP_IREM_UN || ins->opcode == OP_IDIV_UN_IMM) && (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) && sp [1]->opcode == OP_ICONST && mono_is_power_of_two (sp [1]->inst_c0) >= 0) {
8916 imm_opcode = mono_op_to_op_imm (ins->opcode);
8919 if (imm_opcode != -1) {
8920 ins->opcode = imm_opcode;
8921 if (sp [1]->opcode == OP_I8CONST) {
8922 #if SIZEOF_REGISTER == 8
8923 ins->inst_imm = sp [1]->inst_l;
8925 ins->inst_ls_word = sp [1]->inst_ls_word;
8926 ins->inst_ms_word = sp [1]->inst_ms_word;
8930 ins->inst_imm = (gssize)(sp [1]->inst_c0);
8933 /* Might be followed by an instruction added by ADD_WIDEN_OP */
8934 if (sp [1]->next == NULL)
8935 sp [1]->opcode = OP_NOP;
8938 MONO_ADD_INS ((cfg)->cbb, (ins));
8940 *sp++ = mono_decompose_opcode (cfg, ins);
8953 case CEE_CONV_OVF_I8:
8954 case CEE_CONV_OVF_U8:
8958 /* Special case this earlier so we have long constants in the IR */
8959 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
8960 int data = sp [-1]->inst_c0;
8961 sp [-1]->opcode = OP_I8CONST;
8962 sp [-1]->type = STACK_I8;
8963 #if SIZEOF_REGISTER == 8
8964 if ((*ip) == CEE_CONV_U8)
8965 sp [-1]->inst_c0 = (guint32)data;
8967 sp [-1]->inst_c0 = data;
8969 sp [-1]->inst_ls_word = data;
8970 if ((*ip) == CEE_CONV_U8)
8971 sp [-1]->inst_ms_word = 0;
8973 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
8975 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
8982 case CEE_CONV_OVF_I4:
8983 case CEE_CONV_OVF_I1:
8984 case CEE_CONV_OVF_I2:
8985 case CEE_CONV_OVF_I:
8986 case CEE_CONV_OVF_U:
8989 if (sp [-1]->type == STACK_R8) {
8990 ADD_UNOP (CEE_CONV_OVF_I8);
8997 case CEE_CONV_OVF_U1:
8998 case CEE_CONV_OVF_U2:
8999 case CEE_CONV_OVF_U4:
9002 if (sp [-1]->type == STACK_R8) {
9003 ADD_UNOP (CEE_CONV_OVF_U8);
9010 case CEE_CONV_OVF_I1_UN:
9011 case CEE_CONV_OVF_I2_UN:
9012 case CEE_CONV_OVF_I4_UN:
9013 case CEE_CONV_OVF_I8_UN:
9014 case CEE_CONV_OVF_U1_UN:
9015 case CEE_CONV_OVF_U2_UN:
9016 case CEE_CONV_OVF_U4_UN:
9017 case CEE_CONV_OVF_U8_UN:
9018 case CEE_CONV_OVF_I_UN:
9019 case CEE_CONV_OVF_U_UN:
9026 CHECK_CFG_EXCEPTION;
9030 case CEE_ADD_OVF_UN:
9032 case CEE_MUL_OVF_UN:
9034 case CEE_SUB_OVF_UN:
9040 GSHAREDVT_FAILURE (*ip);
9043 token = read32 (ip + 1);
9044 klass = mini_get_class (method, token, generic_context);
9045 CHECK_TYPELOAD (klass);
9047 if (generic_class_is_reference_type (cfg, klass)) {
9048 MonoInst *store, *load;
9049 int dreg = alloc_ireg_ref (cfg);
9051 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
9052 load->flags |= ins_flag;
9053 MONO_ADD_INS (cfg->cbb, load);
9055 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
9056 store->flags |= ins_flag;
9057 MONO_ADD_INS (cfg->cbb, store);
9059 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
9060 emit_write_barrier (cfg, sp [0], sp [1]);
9062 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
9074 token = read32 (ip + 1);
9075 klass = mini_get_class (method, token, generic_context);
9076 CHECK_TYPELOAD (klass);
9078 /* Optimize the common ldobj+stloc combination */
9088 loc_index = ip [5] - CEE_STLOC_0;
9095 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
9096 CHECK_LOCAL (loc_index);
9098 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9099 ins->dreg = cfg->locals [loc_index]->dreg;
9105 /* Optimize the ldobj+stobj combination */
9106 /* The reference case ends up being a load+store anyway */
9107 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
9112 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
9119 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9128 CHECK_STACK_OVF (1);
9130 n = read32 (ip + 1);
9132 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
9133 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
9134 ins->type = STACK_OBJ;
9137 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
9138 MonoInst *iargs [1];
9140 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
9141 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
9143 if (cfg->opt & MONO_OPT_SHARED) {
9144 MonoInst *iargs [3];
9146 if (cfg->compile_aot) {
9147 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
9149 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9150 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
9151 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
9152 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
9153 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
9155 if (bblock->out_of_line) {
9156 MonoInst *iargs [2];
9158 if (image == mono_defaults.corlib) {
9160 * Avoid relocations in AOT and save some space by using a
9161 * version of helper_ldstr specialized to mscorlib.
9163 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
9164 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
9166 /* Avoid creating the string object */
9167 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9168 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
9169 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
9173 if (cfg->compile_aot) {
9174 NEW_LDSTRCONST (cfg, ins, image, n);
9176 MONO_ADD_INS (bblock, ins);
9179 NEW_PCONST (cfg, ins, NULL);
9180 ins->type = STACK_OBJ;
9181 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
9183 OUT_OF_MEMORY_FAILURE;
9186 MONO_ADD_INS (bblock, ins);
9195 MonoInst *iargs [2];
9196 MonoMethodSignature *fsig;
9199 MonoInst *vtable_arg = NULL;
9202 token = read32 (ip + 1);
9203 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
9204 if (!cmethod || mono_loader_get_last_error ())
9206 fsig = mono_method_get_signature (cmethod, image, token);
9210 mono_save_token_info (cfg, image, token, cmethod);
9212 if (!mono_class_init (cmethod->klass))
9213 TYPE_LOAD_ERROR (cmethod->klass);
9215 context_used = mini_method_check_context_used (cfg, cmethod);
9217 if (mono_security_cas_enabled ()) {
9218 if (check_linkdemand (cfg, method, cmethod))
9219 INLINE_FAILURE ("linkdemand");
9220 CHECK_CFG_EXCEPTION;
9221 } else if (mono_security_core_clr_enabled ()) {
9222 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9225 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9226 emit_generic_class_init (cfg, cmethod->klass);
9227 CHECK_TYPELOAD (cmethod->klass);
9231 if (cfg->gsharedvt) {
9232 if (mini_is_gsharedvt_variable_signature (sig))
9233 GSHAREDVT_FAILURE (*ip);
9237 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
9238 mono_method_is_generic_sharable (cmethod, TRUE)) {
9239 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
9240 mono_class_vtable (cfg->domain, cmethod->klass);
9241 CHECK_TYPELOAD (cmethod->klass);
9243 vtable_arg = emit_get_rgctx_method (cfg, context_used,
9244 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9247 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
9248 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9250 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9252 CHECK_TYPELOAD (cmethod->klass);
9253 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9258 n = fsig->param_count;
9262 * Generate smaller code for the common newobj <exception> instruction in
9263 * argument checking code.
9265 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
9266 is_exception_class (cmethod->klass) && n <= 2 &&
9267 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
9268 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
9269 MonoInst *iargs [3];
9271 g_assert (!vtable_arg);
9275 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
9278 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
9282 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
9287 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
9290 g_assert_not_reached ();
9298 /* move the args to allow room for 'this' in the first position */
9304 /* check_call_signature () requires sp[0] to be set */
9305 this_ins.type = STACK_OBJ;
9307 if (check_call_signature (cfg, fsig, sp))
9312 if (mini_class_is_system_array (cmethod->klass)) {
9313 g_assert (!vtable_arg);
9315 *sp = emit_get_rgctx_method (cfg, context_used,
9316 cmethod, MONO_RGCTX_INFO_METHOD);
9318 /* Avoid varargs in the common case */
9319 if (fsig->param_count == 1)
9320 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
9321 else if (fsig->param_count == 2)
9322 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
9323 else if (fsig->param_count == 3)
9324 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
9325 else if (fsig->param_count == 4)
9326 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
9328 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
9329 } else if (cmethod->string_ctor) {
9330 g_assert (!context_used);
9331 g_assert (!vtable_arg);
9332 /* we simply pass a null pointer */
9333 EMIT_NEW_PCONST (cfg, *sp, NULL);
9334 /* now call the string ctor */
9335 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
9337 MonoInst* callvirt_this_arg = NULL;
9339 if (cmethod->klass->valuetype) {
9340 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
9341 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
9342 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
9347 * The code generated by mini_emit_virtual_call () expects
9348 * iargs [0] to be a boxed instance, but luckily the vcall
9349 * will be transformed into a normal call there.
9351 } else if (context_used) {
9352 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
9355 MonoVTable *vtable = NULL;
9357 if (!cfg->compile_aot)
9358 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9359 CHECK_TYPELOAD (cmethod->klass);
9362 * TypeInitializationExceptions thrown from the mono_runtime_class_init
9363 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
9364 * As a workaround, we call class cctors before allocating objects.
9366 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
9367 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, cmethod->klass, helper_sig_class_init_trampoline, NULL);
9368 if (cfg->verbose_level > 2)
9369 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
9370 class_inits = g_slist_prepend (class_inits, cmethod->klass);
9373 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
9376 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
9379 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
9381 /* Now call the actual ctor */
9382 /* Avoid virtual calls to ctors if possible */
9383 if (mono_class_is_marshalbyref (cmethod->klass))
9384 callvirt_this_arg = sp [0];
9387 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
9388 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9389 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9394 CHECK_CFG_EXCEPTION;
9395 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
9396 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
9397 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
9398 !g_list_find (dont_inline, cmethod)) {
9401 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
9402 cfg->real_offset += 5;
9405 inline_costs += costs - 5;
9407 INLINE_FAILURE ("inline failure");
9408 // FIXME-VT: Clean this up
9409 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
9410 GSHAREDVT_FAILURE(*ip);
9411 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
9413 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
9416 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
9417 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
9418 } else if (context_used &&
9419 (!mono_method_is_generic_sharable (cmethod, TRUE) ||
9420 !mono_class_generic_sharing_enabled (cmethod->klass))) {
9421 MonoInst *cmethod_addr;
9423 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
9424 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9426 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
9428 INLINE_FAILURE ("ctor call");
9429 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
9430 callvirt_this_arg, NULL, vtable_arg);
9434 if (alloc == NULL) {
9436 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
9437 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
9451 token = read32 (ip + 1);
9452 klass = mini_get_class (method, token, generic_context);
9453 CHECK_TYPELOAD (klass);
9454 if (sp [0]->type != STACK_OBJ)
9457 context_used = mini_class_check_context_used (cfg, klass);
9459 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9460 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
9467 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9470 if (cfg->compile_aot)
9471 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9473 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9475 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
9477 save_cast_details (cfg, klass, sp [0]->dreg, TRUE, &bblock);
9478 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
9479 reset_cast_details (cfg);
9482 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9483 MonoMethod *mono_castclass;
9484 MonoInst *iargs [1];
9487 mono_castclass = mono_marshal_get_castclass (klass);
9490 save_cast_details (cfg, klass, sp [0]->dreg, TRUE, &bblock);
9491 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
9492 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9493 reset_cast_details (cfg);
9494 CHECK_CFG_EXCEPTION;
9495 g_assert (costs > 0);
9498 cfg->real_offset += 5;
9503 inline_costs += costs;
9506 ins = handle_castclass (cfg, klass, *sp, context_used);
9507 CHECK_CFG_EXCEPTION;
9517 token = read32 (ip + 1);
9518 klass = mini_get_class (method, token, generic_context);
9519 CHECK_TYPELOAD (klass);
9520 if (sp [0]->type != STACK_OBJ)
9523 context_used = mini_class_check_context_used (cfg, klass);
9525 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9526 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
9533 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9536 if (cfg->compile_aot)
9537 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9539 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9541 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
9544 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9545 MonoMethod *mono_isinst;
9546 MonoInst *iargs [1];
9549 mono_isinst = mono_marshal_get_isinst (klass);
9552 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
9553 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9554 CHECK_CFG_EXCEPTION;
9555 g_assert (costs > 0);
9558 cfg->real_offset += 5;
9563 inline_costs += costs;
9566 ins = handle_isinst (cfg, klass, *sp, context_used);
9567 CHECK_CFG_EXCEPTION;
9574 case CEE_UNBOX_ANY: {
9578 token = read32 (ip + 1);
9579 klass = mini_get_class (method, token, generic_context);
9580 CHECK_TYPELOAD (klass);
9582 mono_save_token_info (cfg, image, token, klass);
9584 context_used = mini_class_check_context_used (cfg, klass);
9586 if (mini_is_gsharedvt_klass (cfg, klass)) {
9587 *sp = handle_unbox_gsharedvt (cfg, klass, *sp, &bblock);
9595 if (generic_class_is_reference_type (cfg, klass)) {
9596 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
9597 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9598 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
9605 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9608 /*FIXME AOT support*/
9609 if (cfg->compile_aot)
9610 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9612 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9614 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
9615 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
9618 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9619 MonoMethod *mono_castclass;
9620 MonoInst *iargs [1];
9623 mono_castclass = mono_marshal_get_castclass (klass);
9626 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
9627 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9628 CHECK_CFG_EXCEPTION;
9629 g_assert (costs > 0);
9632 cfg->real_offset += 5;
9636 inline_costs += costs;
9638 ins = handle_castclass (cfg, klass, *sp, context_used);
9639 CHECK_CFG_EXCEPTION;
9647 if (mono_class_is_nullable (klass)) {
9648 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
9655 ins = handle_unbox (cfg, klass, sp, context_used);
9661 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9674 token = read32 (ip + 1);
9675 klass = mini_get_class (method, token, generic_context);
9676 CHECK_TYPELOAD (klass);
9678 mono_save_token_info (cfg, image, token, klass);
9680 context_used = mini_class_check_context_used (cfg, klass);
9682 if (generic_class_is_reference_type (cfg, klass)) {
9688 if (klass == mono_defaults.void_class)
9690 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
9692 /* frequent check in generic code: box (struct), brtrue */
9694 // FIXME: LLVM can't handle the inconsistent bb linking
9695 if (!mono_class_is_nullable (klass) &&
9696 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
9697 (ip [5] == CEE_BRTRUE ||
9698 ip [5] == CEE_BRTRUE_S ||
9699 ip [5] == CEE_BRFALSE ||
9700 ip [5] == CEE_BRFALSE_S)) {
9701 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
9703 MonoBasicBlock *true_bb, *false_bb;
9707 if (cfg->verbose_level > 3) {
9708 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9709 printf ("<box+brtrue opt>\n");
9717 target = ip + 1 + (signed char)(*ip);
9724 target = ip + 4 + (gint)(read32 (ip));
9728 g_assert_not_reached ();
9732 * We need to link both bblocks, since it is needed for handling stack
9733 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
9734 * Branching to only one of them would lead to inconsistencies, so
9735 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
9737 GET_BBLOCK (cfg, true_bb, target);
9738 GET_BBLOCK (cfg, false_bb, ip);
9740 mono_link_bblock (cfg, cfg->cbb, true_bb);
9741 mono_link_bblock (cfg, cfg->cbb, false_bb);
9743 if (sp != stack_start) {
9744 handle_stack_args (cfg, stack_start, sp - stack_start);
9746 CHECK_UNVERIFIABLE (cfg);
9749 if (COMPILE_LLVM (cfg)) {
9750 dreg = alloc_ireg (cfg);
9751 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
9752 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
9754 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
9756 /* The JIT can't eliminate the iconst+compare */
9757 MONO_INST_NEW (cfg, ins, OP_BR);
9758 ins->inst_target_bb = is_true ? true_bb : false_bb;
9759 MONO_ADD_INS (cfg->cbb, ins);
9762 start_new_bblock = 1;
9766 *sp++ = handle_box (cfg, val, klass, context_used, &bblock);
9768 CHECK_CFG_EXCEPTION;
9777 token = read32 (ip + 1);
9778 klass = mini_get_class (method, token, generic_context);
9779 CHECK_TYPELOAD (klass);
9781 mono_save_token_info (cfg, image, token, klass);
9783 context_used = mini_class_check_context_used (cfg, klass);
9785 if (mono_class_is_nullable (klass)) {
9788 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
9789 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
9793 ins = handle_unbox (cfg, klass, sp, context_used);
9806 MonoClassField *field;
9807 #ifndef DISABLE_REMOTING
9811 gboolean is_instance;
9813 gpointer addr = NULL;
9814 gboolean is_special_static;
9816 MonoInst *store_val = NULL;
9817 MonoInst *thread_ins;
9820 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
9822 if (op == CEE_STFLD) {
9830 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
9832 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
9835 if (op == CEE_STSFLD) {
9843 token = read32 (ip + 1);
9844 if (method->wrapper_type != MONO_WRAPPER_NONE) {
9845 field = mono_method_get_wrapper_data (method, token);
9846 klass = field->parent;
9849 field = mono_field_from_token (image, token, &klass, generic_context);
9853 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
9854 FIELD_ACCESS_FAILURE;
9855 mono_class_init (klass);
9857 if (is_instance && *ip != CEE_LDFLDA && is_magic_tls_access (field))
9860 /* if the class is Critical then transparent code cannot access it's fields */
9861 if (!is_instance && mono_security_core_clr_enabled ())
9862 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
9864 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
9865 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
9866 if (mono_security_core_clr_enabled ())
9867 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
9871 * LDFLD etc. is usable on static fields as well, so convert those cases to
9874 if (is_instance && field->type->attrs & FIELD_ATTRIBUTE_STATIC) {
9886 g_assert_not_reached ();
9888 is_instance = FALSE;
9891 context_used = mini_class_check_context_used (cfg, klass);
9895 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
9896 if (op == CEE_STFLD) {
9897 if (target_type_is_incompatible (cfg, field->type, sp [1]))
9899 #ifndef DISABLE_REMOTING
9900 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
9901 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
9902 MonoInst *iargs [5];
9904 GSHAREDVT_FAILURE (op);
9907 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9908 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
9909 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
9913 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
9914 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
9915 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9916 CHECK_CFG_EXCEPTION;
9917 g_assert (costs > 0);
9919 cfg->real_offset += 5;
9922 inline_costs += costs;
9924 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
9931 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
9933 if (mini_is_gsharedvt_klass (cfg, klass)) {
9934 MonoInst *offset_ins;
9936 context_used = mini_class_check_context_used (cfg, klass);
9938 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9939 dreg = alloc_ireg_mp (cfg);
9940 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
9941 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
9942 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
9944 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
9946 if (sp [0]->opcode != OP_LDADDR)
9947 store->flags |= MONO_INST_FAULT;
9949 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
9950 /* insert call to write barrier */
9954 dreg = alloc_ireg_mp (cfg);
9955 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
9956 emit_write_barrier (cfg, ptr, sp [1]);
9959 store->flags |= ins_flag;
9966 #ifndef DISABLE_REMOTING
9967 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
9968 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
9969 MonoInst *iargs [4];
9971 GSHAREDVT_FAILURE (op);
9974 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9975 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
9976 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
9977 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
9978 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
9979 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9980 CHECK_CFG_EXCEPTION;
9982 g_assert (costs > 0);
9984 cfg->real_offset += 5;
9988 inline_costs += costs;
9990 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
9996 if (sp [0]->type == STACK_VTYPE) {
9999 /* Have to compute the address of the variable */
10001 var = get_vreg_to_inst (cfg, sp [0]->dreg);
10003 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
10005 g_assert (var->klass == klass);
10007 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
10011 if (op == CEE_LDFLDA) {
10012 if (is_magic_tls_access (field)) {
10013 GSHAREDVT_FAILURE (*ip);
10015 *sp++ = create_magic_tls_access (cfg, field, &cached_tls_addr, ins);
10017 if (sp [0]->type == STACK_OBJ) {
10018 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
10019 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
10022 dreg = alloc_ireg_mp (cfg);
10024 if (mini_is_gsharedvt_klass (cfg, klass)) {
10025 MonoInst *offset_ins;
10027 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10028 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10030 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10032 ins->klass = mono_class_from_mono_type (field->type);
10033 ins->type = STACK_MP;
10039 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10041 if (mini_is_gsharedvt_klass (cfg, klass)) {
10042 MonoInst *offset_ins;
10044 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10045 dreg = alloc_ireg_mp (cfg);
10046 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10047 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
10049 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
10051 load->flags |= ins_flag;
10052 if (sp [0]->opcode != OP_LDADDR)
10053 load->flags |= MONO_INST_FAULT;
10067 * We can only support shared generic static
10068 * field access on architectures where the
10069 * trampoline code has been extended to handle
10070 * the generic class init.
10072 #ifndef MONO_ARCH_VTABLE_REG
10073 GENERIC_SHARING_FAILURE (op);
10076 context_used = mini_class_check_context_used (cfg, klass);
10078 ftype = mono_field_get_type (field);
10080 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
10083 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
10084 * to be called here.
10086 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
10087 mono_class_vtable (cfg->domain, klass);
10088 CHECK_TYPELOAD (klass);
10090 mono_domain_lock (cfg->domain);
10091 if (cfg->domain->special_static_fields)
10092 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
10093 mono_domain_unlock (cfg->domain);
10095 is_special_static = mono_class_field_is_special_static (field);
10097 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
10098 thread_ins = mono_get_thread_intrinsic (cfg);
10102 /* Generate IR to compute the field address */
10103 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
10105 * Fast access to TLS data
10106 * Inline version of get_thread_static_data () in
10110 int idx, static_data_reg, array_reg, dreg;
10112 GSHAREDVT_FAILURE (op);
10114 // offset &= 0x7fffffff;
10115 // idx = (offset >> 24) - 1;
10116 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
10117 MONO_ADD_INS (cfg->cbb, thread_ins);
10118 static_data_reg = alloc_ireg (cfg);
10119 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
10121 if (cfg->compile_aot) {
10122 int offset_reg, offset2_reg, idx_reg;
10124 /* For TLS variables, this will return the TLS offset */
10125 EMIT_NEW_SFLDACONST (cfg, ins, field);
10126 offset_reg = ins->dreg;
10127 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
10128 idx_reg = alloc_ireg (cfg);
10129 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
10130 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
10131 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
10132 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
10133 array_reg = alloc_ireg (cfg);
10134 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
10135 offset2_reg = alloc_ireg (cfg);
10136 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
10137 dreg = alloc_ireg (cfg);
10138 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
10140 offset = (gsize)addr & 0x7fffffff;
10141 idx = (offset >> 24) - 1;
10143 array_reg = alloc_ireg (cfg);
10144 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
10145 dreg = alloc_ireg (cfg);
10146 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
10148 } else if ((cfg->opt & MONO_OPT_SHARED) ||
10149 (cfg->compile_aot && is_special_static) ||
10150 (context_used && is_special_static)) {
10151 MonoInst *iargs [2];
10153 g_assert (field->parent);
10154 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10155 if (context_used) {
10156 iargs [1] = emit_get_rgctx_field (cfg, context_used,
10157 field, MONO_RGCTX_INFO_CLASS_FIELD);
10159 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10161 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10162 } else if (context_used) {
10163 MonoInst *static_data;
10166 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
10167 method->klass->name_space, method->klass->name, method->name,
10168 depth, field->offset);
10171 if (mono_class_needs_cctor_run (klass, method))
10172 emit_generic_class_init (cfg, klass);
10175 * The pointer we're computing here is
10177 * super_info.static_data + field->offset
10179 static_data = emit_get_rgctx_klass (cfg, context_used,
10180 klass, MONO_RGCTX_INFO_STATIC_DATA);
10182 if (mini_is_gsharedvt_klass (cfg, klass)) {
10183 MonoInst *offset_ins;
10185 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10186 dreg = alloc_ireg_mp (cfg);
10187 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
10188 } else if (field->offset == 0) {
10191 int addr_reg = mono_alloc_preg (cfg);
10192 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
10194 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
10195 MonoInst *iargs [2];
10197 g_assert (field->parent);
10198 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10199 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10200 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10202 MonoVTable *vtable = NULL;
10204 if (!cfg->compile_aot)
10205 vtable = mono_class_vtable (cfg->domain, klass);
10206 CHECK_TYPELOAD (klass);
10209 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
10210 if (!(g_slist_find (class_inits, klass))) {
10211 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, klass, helper_sig_class_init_trampoline, NULL);
10212 if (cfg->verbose_level > 2)
10213 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
10214 class_inits = g_slist_prepend (class_inits, klass);
10217 if (cfg->run_cctors) {
10219 /* This makes so that inline cannot trigger */
10220 /* .cctors: too many apps depend on them */
10221 /* running with a specific order... */
10223 if (! vtable->initialized)
10224 INLINE_FAILURE ("class init");
10225 ex = mono_runtime_class_init_full (vtable, FALSE);
10227 set_exception_object (cfg, ex);
10228 goto exception_exit;
10232 if (cfg->compile_aot)
10233 EMIT_NEW_SFLDACONST (cfg, ins, field);
10236 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10237 EMIT_NEW_PCONST (cfg, ins, addr);
10240 MonoInst *iargs [1];
10241 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
10242 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
10246 /* Generate IR to do the actual load/store operation */
10248 if (op == CEE_LDSFLDA) {
10249 ins->klass = mono_class_from_mono_type (ftype);
10250 ins->type = STACK_PTR;
10252 } else if (op == CEE_STSFLD) {
10255 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
10256 store->flags |= ins_flag;
10258 gboolean is_const = FALSE;
10259 MonoVTable *vtable = NULL;
10260 gpointer addr = NULL;
10262 if (!context_used) {
10263 vtable = mono_class_vtable (cfg->domain, klass);
10264 CHECK_TYPELOAD (klass);
10266 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
10267 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
10268 int ro_type = ftype->type;
10270 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10271 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
10272 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
10275 GSHAREDVT_FAILURE (op);
10277 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
10280 case MONO_TYPE_BOOLEAN:
10282 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
10286 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
10289 case MONO_TYPE_CHAR:
10291 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
10295 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
10300 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
10304 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
10309 case MONO_TYPE_PTR:
10310 case MONO_TYPE_FNPTR:
10311 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10312 type_to_eval_stack_type ((cfg), field->type, *sp);
10315 case MONO_TYPE_STRING:
10316 case MONO_TYPE_OBJECT:
10317 case MONO_TYPE_CLASS:
10318 case MONO_TYPE_SZARRAY:
10319 case MONO_TYPE_ARRAY:
10320 if (!mono_gc_is_moving ()) {
10321 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10322 type_to_eval_stack_type ((cfg), field->type, *sp);
10330 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
10335 case MONO_TYPE_VALUETYPE:
10345 CHECK_STACK_OVF (1);
10347 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
10348 load->flags |= ins_flag;
10361 token = read32 (ip + 1);
10362 klass = mini_get_class (method, token, generic_context);
10363 CHECK_TYPELOAD (klass);
10364 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
10365 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
10366 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
10367 generic_class_is_reference_type (cfg, klass)) {
10368 /* insert call to write barrier */
10369 emit_write_barrier (cfg, sp [0], sp [1]);
10381 const char *data_ptr;
10383 guint32 field_token;
10389 token = read32 (ip + 1);
10391 klass = mini_get_class (method, token, generic_context);
10392 CHECK_TYPELOAD (klass);
10394 context_used = mini_class_check_context_used (cfg, klass);
10396 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
10397 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
10398 ins->sreg1 = sp [0]->dreg;
10399 ins->type = STACK_I4;
10400 ins->dreg = alloc_ireg (cfg);
10401 MONO_ADD_INS (cfg->cbb, ins);
10402 *sp = mono_decompose_opcode (cfg, ins);
10405 if (context_used) {
10406 MonoInst *args [3];
10407 MonoClass *array_class = mono_array_class_get (klass, 1);
10408 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
10410 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
10413 args [0] = emit_get_rgctx_klass (cfg, context_used,
10414 array_class, MONO_RGCTX_INFO_VTABLE);
10419 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
10421 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
10423 if (cfg->opt & MONO_OPT_SHARED) {
10424 /* Decompose now to avoid problems with references to the domainvar */
10425 MonoInst *iargs [3];
10427 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10428 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10429 iargs [2] = sp [0];
10431 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
10433 /* Decompose later since it is needed by abcrem */
10434 MonoClass *array_type = mono_array_class_get (klass, 1);
10435 mono_class_vtable (cfg->domain, array_type);
10436 CHECK_TYPELOAD (array_type);
10438 MONO_INST_NEW (cfg, ins, OP_NEWARR);
10439 ins->dreg = alloc_ireg_ref (cfg);
10440 ins->sreg1 = sp [0]->dreg;
10441 ins->inst_newa_class = klass;
10442 ins->type = STACK_OBJ;
10443 ins->klass = array_type;
10444 MONO_ADD_INS (cfg->cbb, ins);
10445 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10446 cfg->cbb->has_array_access = TRUE;
10448 /* Needed so mono_emit_load_get_addr () gets called */
10449 mono_get_got_var (cfg);
10459 * we inline/optimize the initialization sequence if possible.
10460 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
10461 * for small sizes open code the memcpy
10462 * ensure the rva field is big enough
10464 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
10465 MonoMethod *memcpy_method = get_memcpy_method ();
10466 MonoInst *iargs [3];
10467 int add_reg = alloc_ireg_mp (cfg);
10469 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
10470 if (cfg->compile_aot) {
10471 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
10473 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
10475 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
10476 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
10485 if (sp [0]->type != STACK_OBJ)
10488 MONO_INST_NEW (cfg, ins, OP_LDLEN);
10489 ins->dreg = alloc_preg (cfg);
10490 ins->sreg1 = sp [0]->dreg;
10491 ins->type = STACK_I4;
10492 /* This flag will be inherited by the decomposition */
10493 ins->flags |= MONO_INST_FAULT;
10494 MONO_ADD_INS (cfg->cbb, ins);
10495 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10496 cfg->cbb->has_array_access = TRUE;
10504 if (sp [0]->type != STACK_OBJ)
10507 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10509 klass = mini_get_class (method, read32 (ip + 1), generic_context);
10510 CHECK_TYPELOAD (klass);
10511 /* we need to make sure that this array is exactly the type it needs
10512 * to be for correctness. the wrappers are lax with their usage
10513 * so we need to ignore them here
10515 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
10516 MonoClass *array_class = mono_array_class_get (klass, 1);
10517 mini_emit_check_array_type (cfg, sp [0], array_class);
10518 CHECK_TYPELOAD (array_class);
10522 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10527 case CEE_LDELEM_I1:
10528 case CEE_LDELEM_U1:
10529 case CEE_LDELEM_I2:
10530 case CEE_LDELEM_U2:
10531 case CEE_LDELEM_I4:
10532 case CEE_LDELEM_U4:
10533 case CEE_LDELEM_I8:
10535 case CEE_LDELEM_R4:
10536 case CEE_LDELEM_R8:
10537 case CEE_LDELEM_REF: {
10543 if (*ip == CEE_LDELEM) {
10545 token = read32 (ip + 1);
10546 klass = mini_get_class (method, token, generic_context);
10547 CHECK_TYPELOAD (klass);
10548 mono_class_init (klass);
10551 klass = array_access_to_klass (*ip);
10553 if (sp [0]->type != STACK_OBJ)
10556 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10558 if (mini_is_gsharedvt_klass (cfg, klass)) {
10559 // FIXME-VT: OP_ICONST optimization
10560 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10561 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10562 ins->opcode = OP_LOADV_MEMBASE;
10563 } else if (sp [1]->opcode == OP_ICONST) {
10564 int array_reg = sp [0]->dreg;
10565 int index_reg = sp [1]->dreg;
10566 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
10568 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
10569 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
10571 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10572 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10575 if (*ip == CEE_LDELEM)
10582 case CEE_STELEM_I1:
10583 case CEE_STELEM_I2:
10584 case CEE_STELEM_I4:
10585 case CEE_STELEM_I8:
10586 case CEE_STELEM_R4:
10587 case CEE_STELEM_R8:
10588 case CEE_STELEM_REF:
10593 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10595 if (*ip == CEE_STELEM) {
10597 token = read32 (ip + 1);
10598 klass = mini_get_class (method, token, generic_context);
10599 CHECK_TYPELOAD (klass);
10600 mono_class_init (klass);
10603 klass = array_access_to_klass (*ip);
10605 if (sp [0]->type != STACK_OBJ)
10608 emit_array_store (cfg, klass, sp, TRUE);
10610 if (*ip == CEE_STELEM)
10617 case CEE_CKFINITE: {
10621 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
10622 ins->sreg1 = sp [0]->dreg;
10623 ins->dreg = alloc_freg (cfg);
10624 ins->type = STACK_R8;
10625 MONO_ADD_INS (bblock, ins);
10627 *sp++ = mono_decompose_opcode (cfg, ins);
10632 case CEE_REFANYVAL: {
10633 MonoInst *src_var, *src;
10635 int klass_reg = alloc_preg (cfg);
10636 int dreg = alloc_preg (cfg);
10638 GSHAREDVT_FAILURE (*ip);
10641 MONO_INST_NEW (cfg, ins, *ip);
10644 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
10645 CHECK_TYPELOAD (klass);
10646 mono_class_init (klass);
10648 context_used = mini_class_check_context_used (cfg, klass);
10651 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10653 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
10654 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
10655 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
10657 if (context_used) {
10658 MonoInst *klass_ins;
10660 klass_ins = emit_get_rgctx_klass (cfg, context_used,
10661 klass, MONO_RGCTX_INFO_KLASS);
10664 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
10665 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
10667 mini_emit_class_check (cfg, klass_reg, klass);
10669 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
10670 ins->type = STACK_MP;
10675 case CEE_MKREFANY: {
10676 MonoInst *loc, *addr;
10678 GSHAREDVT_FAILURE (*ip);
10681 MONO_INST_NEW (cfg, ins, *ip);
10684 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
10685 CHECK_TYPELOAD (klass);
10686 mono_class_init (klass);
10688 context_used = mini_class_check_context_used (cfg, klass);
10690 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
10691 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
10693 if (context_used) {
10694 MonoInst *const_ins;
10695 int type_reg = alloc_preg (cfg);
10697 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
10698 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
10699 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
10700 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
10701 } else if (cfg->compile_aot) {
10702 int const_reg = alloc_preg (cfg);
10703 int type_reg = alloc_preg (cfg);
10705 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
10706 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
10707 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
10708 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
10710 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
10711 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
10713 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
10715 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
10716 ins->type = STACK_VTYPE;
10717 ins->klass = mono_defaults.typed_reference_class;
10722 case CEE_LDTOKEN: {
10724 MonoClass *handle_class;
10726 CHECK_STACK_OVF (1);
10729 n = read32 (ip + 1);
10731 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
10732 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
10733 handle = mono_method_get_wrapper_data (method, n);
10734 handle_class = mono_method_get_wrapper_data (method, n + 1);
10735 if (handle_class == mono_defaults.typehandle_class)
10736 handle = &((MonoClass*)handle)->byval_arg;
10739 handle = mono_ldtoken (image, n, &handle_class, generic_context);
10743 mono_class_init (handle_class);
10744 if (cfg->generic_sharing_context) {
10745 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
10746 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
10747 /* This case handles ldtoken
10748 of an open type, like for
10751 } else if (handle_class == mono_defaults.typehandle_class) {
10752 /* If we get a MONO_TYPE_CLASS
10753 then we need to provide the
10755 instantiation of it. */
10756 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
10759 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type (handle));
10760 } else if (handle_class == mono_defaults.fieldhandle_class)
10761 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
10762 else if (handle_class == mono_defaults.methodhandle_class)
10763 context_used = mini_method_check_context_used (cfg, handle);
10765 g_assert_not_reached ();
10768 if ((cfg->opt & MONO_OPT_SHARED) &&
10769 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
10770 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
10771 MonoInst *addr, *vtvar, *iargs [3];
10772 int method_context_used;
10774 method_context_used = mini_method_check_context_used (cfg, method);
10776 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
10778 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10779 EMIT_NEW_ICONST (cfg, iargs [1], n);
10780 if (method_context_used) {
10781 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
10782 method, MONO_RGCTX_INFO_METHOD);
10783 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
10785 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
10786 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
10788 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
10790 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
10792 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
10794 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
10795 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
10796 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
10797 (cmethod->klass == mono_defaults.systemtype_class) &&
10798 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
10799 MonoClass *tclass = mono_class_from_mono_type (handle);
10801 mono_class_init (tclass);
10802 if (context_used) {
10803 ins = emit_get_rgctx_klass (cfg, context_used,
10804 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
10805 } else if (cfg->compile_aot) {
10806 if (method->wrapper_type) {
10807 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
10808 /* Special case for static synchronized wrappers */
10809 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
10811 /* FIXME: n is not a normal token */
10812 cfg->disable_aot = TRUE;
10813 EMIT_NEW_PCONST (cfg, ins, NULL);
10816 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
10819 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
10821 ins->type = STACK_OBJ;
10822 ins->klass = cmethod->klass;
10825 MonoInst *addr, *vtvar;
10827 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
10829 if (context_used) {
10830 if (handle_class == mono_defaults.typehandle_class) {
10831 ins = emit_get_rgctx_klass (cfg, context_used,
10832 mono_class_from_mono_type (handle),
10833 MONO_RGCTX_INFO_TYPE);
10834 } else if (handle_class == mono_defaults.methodhandle_class) {
10835 ins = emit_get_rgctx_method (cfg, context_used,
10836 handle, MONO_RGCTX_INFO_METHOD);
10837 } else if (handle_class == mono_defaults.fieldhandle_class) {
10838 ins = emit_get_rgctx_field (cfg, context_used,
10839 handle, MONO_RGCTX_INFO_CLASS_FIELD);
10841 g_assert_not_reached ();
10843 } else if (cfg->compile_aot) {
10844 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
10846 EMIT_NEW_PCONST (cfg, ins, handle);
10848 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
10849 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
10850 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
10860 MONO_INST_NEW (cfg, ins, OP_THROW);
10862 ins->sreg1 = sp [0]->dreg;
10864 bblock->out_of_line = TRUE;
10865 MONO_ADD_INS (bblock, ins);
10866 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
10867 MONO_ADD_INS (bblock, ins);
10870 link_bblock (cfg, bblock, end_bblock);
10871 start_new_bblock = 1;
10873 case CEE_ENDFINALLY:
10874 /* mono_save_seq_point_info () depends on this */
10875 if (sp != stack_start)
10876 emit_seq_point (cfg, method, ip, FALSE, FALSE);
10877 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
10878 MONO_ADD_INS (bblock, ins);
10880 start_new_bblock = 1;
10883 * Control will leave the method so empty the stack, otherwise
10884 * the next basic block will start with a nonempty stack.
10886 while (sp != stack_start) {
10891 case CEE_LEAVE_S: {
10894 if (*ip == CEE_LEAVE) {
10896 target = ip + 5 + (gint32)read32(ip + 1);
10899 target = ip + 2 + (signed char)(ip [1]);
10902 /* empty the stack */
10903 while (sp != stack_start) {
10908 * If this leave statement is in a catch block, check for a
10909 * pending exception, and rethrow it if necessary.
10910 * We avoid doing this in runtime invoke wrappers, since those are called
10911 * by native code which excepts the wrapper to catch all exceptions.
10913 for (i = 0; i < header->num_clauses; ++i) {
10914 MonoExceptionClause *clause = &header->clauses [i];
10917 * Use <= in the final comparison to handle clauses with multiple
10918 * leave statements, like in bug #78024.
10919 * The ordering of the exception clauses guarantees that we find the
10920 * innermost clause.
10922 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
10924 MonoBasicBlock *dont_throw;
10929 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
10932 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
10934 NEW_BBLOCK (cfg, dont_throw);
10937 * Currently, we always rethrow the abort exception, despite the
10938 * fact that this is not correct. See thread6.cs for an example.
10939 * But propagating the abort exception is more important than
10940 * getting the sematics right.
10942 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
10943 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
10944 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
10946 MONO_START_BB (cfg, dont_throw);
10951 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
10953 MonoExceptionClause *clause;
10955 for (tmp = handlers; tmp; tmp = tmp->next) {
10956 clause = tmp->data;
10957 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
10959 link_bblock (cfg, bblock, tblock);
10960 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
10961 ins->inst_target_bb = tblock;
10962 ins->inst_eh_block = clause;
10963 MONO_ADD_INS (bblock, ins);
10964 bblock->has_call_handler = 1;
10965 if (COMPILE_LLVM (cfg)) {
10966 MonoBasicBlock *target_bb;
10969 * Link the finally bblock with the target, since it will
10970 * conceptually branch there.
10971 * FIXME: Have to link the bblock containing the endfinally.
10973 GET_BBLOCK (cfg, target_bb, target);
10974 link_bblock (cfg, tblock, target_bb);
10977 g_list_free (handlers);
10980 MONO_INST_NEW (cfg, ins, OP_BR);
10981 MONO_ADD_INS (bblock, ins);
10982 GET_BBLOCK (cfg, tblock, target);
10983 link_bblock (cfg, bblock, tblock);
10984 ins->inst_target_bb = tblock;
10985 start_new_bblock = 1;
10987 if (*ip == CEE_LEAVE)
10996 * Mono specific opcodes
10998 case MONO_CUSTOM_PREFIX: {
11000 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
11004 case CEE_MONO_ICALL: {
11006 MonoJitICallInfo *info;
11008 token = read32 (ip + 2);
11009 func = mono_method_get_wrapper_data (method, token);
11010 info = mono_find_jit_icall_by_addr (func);
11012 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
11015 CHECK_STACK (info->sig->param_count);
11016 sp -= info->sig->param_count;
11018 ins = mono_emit_jit_icall (cfg, info->func, sp);
11019 if (!MONO_TYPE_IS_VOID (info->sig->ret))
11023 inline_costs += 10 * num_calls++;
11027 case CEE_MONO_LDPTR: {
11030 CHECK_STACK_OVF (1);
11032 token = read32 (ip + 2);
11034 ptr = mono_method_get_wrapper_data (method, token);
11035 /* FIXME: Generalize this */
11036 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
11037 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
11042 EMIT_NEW_PCONST (cfg, ins, ptr);
11045 inline_costs += 10 * num_calls++;
11046 /* Can't embed random pointers into AOT code */
11047 cfg->disable_aot = 1;
11050 case CEE_MONO_JIT_ICALL_ADDR: {
11051 MonoJitICallInfo *callinfo;
11054 CHECK_STACK_OVF (1);
11056 token = read32 (ip + 2);
11058 ptr = mono_method_get_wrapper_data (method, token);
11059 callinfo = mono_find_jit_icall_by_addr (ptr);
11060 g_assert (callinfo);
11061 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
11064 inline_costs += 10 * num_calls++;
11067 case CEE_MONO_ICALL_ADDR: {
11068 MonoMethod *cmethod;
11071 CHECK_STACK_OVF (1);
11073 token = read32 (ip + 2);
11075 cmethod = mono_method_get_wrapper_data (method, token);
11077 if (cfg->compile_aot) {
11078 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
11080 ptr = mono_lookup_internal_call (cmethod);
11082 EMIT_NEW_PCONST (cfg, ins, ptr);
11088 case CEE_MONO_VTADDR: {
11089 MonoInst *src_var, *src;
11095 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11096 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
11101 case CEE_MONO_NEWOBJ: {
11102 MonoInst *iargs [2];
11104 CHECK_STACK_OVF (1);
11106 token = read32 (ip + 2);
11107 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11108 mono_class_init (klass);
11109 NEW_DOMAINCONST (cfg, iargs [0]);
11110 MONO_ADD_INS (cfg->cbb, iargs [0]);
11111 NEW_CLASSCONST (cfg, iargs [1], klass);
11112 MONO_ADD_INS (cfg->cbb, iargs [1]);
11113 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
11115 inline_costs += 10 * num_calls++;
11118 case CEE_MONO_OBJADDR:
11121 MONO_INST_NEW (cfg, ins, OP_MOVE);
11122 ins->dreg = alloc_ireg_mp (cfg);
11123 ins->sreg1 = sp [0]->dreg;
11124 ins->type = STACK_MP;
11125 MONO_ADD_INS (cfg->cbb, ins);
11129 case CEE_MONO_LDNATIVEOBJ:
11131 * Similar to LDOBJ, but instead load the unmanaged
11132 * representation of the vtype to the stack.
11137 token = read32 (ip + 2);
11138 klass = mono_method_get_wrapper_data (method, token);
11139 g_assert (klass->valuetype);
11140 mono_class_init (klass);
11143 MonoInst *src, *dest, *temp;
11146 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
11147 temp->backend.is_pinvoke = 1;
11148 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
11149 mini_emit_stobj (cfg, dest, src, klass, TRUE);
11151 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
11152 dest->type = STACK_VTYPE;
11153 dest->klass = klass;
11159 case CEE_MONO_RETOBJ: {
11161 * Same as RET, but return the native representation of a vtype
11164 g_assert (cfg->ret);
11165 g_assert (mono_method_signature (method)->pinvoke);
11170 token = read32 (ip + 2);
11171 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11173 if (!cfg->vret_addr) {
11174 g_assert (cfg->ret_var_is_local);
11176 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
11178 EMIT_NEW_RETLOADA (cfg, ins);
11180 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
11182 if (sp != stack_start)
11185 MONO_INST_NEW (cfg, ins, OP_BR);
11186 ins->inst_target_bb = end_bblock;
11187 MONO_ADD_INS (bblock, ins);
11188 link_bblock (cfg, bblock, end_bblock);
11189 start_new_bblock = 1;
11193 case CEE_MONO_CISINST:
11194 case CEE_MONO_CCASTCLASS: {
11199 token = read32 (ip + 2);
11200 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11201 if (ip [1] == CEE_MONO_CISINST)
11202 ins = handle_cisinst (cfg, klass, sp [0]);
11204 ins = handle_ccastclass (cfg, klass, sp [0]);
11210 case CEE_MONO_SAVE_LMF:
11211 case CEE_MONO_RESTORE_LMF:
11212 #ifdef MONO_ARCH_HAVE_LMF_OPS
11213 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
11214 MONO_ADD_INS (bblock, ins);
11215 cfg->need_lmf_area = TRUE;
11219 case CEE_MONO_CLASSCONST:
11220 CHECK_STACK_OVF (1);
11222 token = read32 (ip + 2);
11223 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
11226 inline_costs += 10 * num_calls++;
11228 case CEE_MONO_NOT_TAKEN:
11229 bblock->out_of_line = TRUE;
11232 case CEE_MONO_TLS: {
11235 CHECK_STACK_OVF (1);
11237 key = (gint32)read32 (ip + 2);
11238 g_assert (key < TLS_KEY_NUM);
11240 ins = mono_create_tls_get (cfg, key);
11242 if (cfg->compile_aot) {
11243 cfg->disable_aot = TRUE;
11244 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
11245 ins->dreg = alloc_preg (cfg);
11246 ins->type = STACK_PTR;
11248 g_assert_not_reached ();
11251 ins->type = STACK_PTR;
11252 MONO_ADD_INS (bblock, ins);
11257 case CEE_MONO_DYN_CALL: {
11258 MonoCallInst *call;
11260 /* It would be easier to call a trampoline, but that would put an
11261 * extra frame on the stack, confusing exception handling. So
11262 * implement it inline using an opcode for now.
11265 if (!cfg->dyn_call_var) {
11266 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11267 /* prevent it from being register allocated */
11268 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
11271 /* Has to use a call inst since it local regalloc expects it */
11272 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
11273 ins = (MonoInst*)call;
11275 ins->sreg1 = sp [0]->dreg;
11276 ins->sreg2 = sp [1]->dreg;
11277 MONO_ADD_INS (bblock, ins);
11279 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
11282 inline_costs += 10 * num_calls++;
11286 case CEE_MONO_MEMORY_BARRIER: {
11288 emit_memory_barrier (cfg, (int)read32 (ip + 1));
11292 case CEE_MONO_JIT_ATTACH: {
11293 MonoInst *args [16];
11294 MonoInst *ad_ins, *lmf_ins;
11295 MonoBasicBlock *next_bb = NULL;
11297 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11299 EMIT_NEW_PCONST (cfg, ins, NULL);
11300 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11306 ad_ins = mono_get_domain_intrinsic (cfg);
11307 lmf_ins = mono_get_lmf_intrinsic (cfg);
11310 if (MONO_ARCH_HAVE_TLS_GET && ad_ins && lmf_ins) {
11311 NEW_BBLOCK (cfg, next_bb);
11313 MONO_ADD_INS (cfg->cbb, ad_ins);
11314 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ad_ins->dreg, 0);
11315 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
11317 MONO_ADD_INS (cfg->cbb, lmf_ins);
11318 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, lmf_ins->dreg, 0);
11319 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
11322 if (cfg->compile_aot) {
11323 /* AOT code is only used in the root domain */
11324 EMIT_NEW_PCONST (cfg, args [0], NULL);
11326 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
11328 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
11329 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11332 MONO_START_BB (cfg, next_bb);
11338 case CEE_MONO_JIT_DETACH: {
11339 MonoInst *args [16];
11341 /* Restore the original domain */
11342 dreg = alloc_ireg (cfg);
11343 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
11344 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
11349 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
11355 case CEE_PREFIX1: {
11358 case CEE_ARGLIST: {
11359 /* somewhat similar to LDTOKEN */
11360 MonoInst *addr, *vtvar;
11361 CHECK_STACK_OVF (1);
11362 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
11364 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11365 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
11367 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11368 ins->type = STACK_VTYPE;
11369 ins->klass = mono_defaults.argumenthandle_class;
11382 * The following transforms:
11383 * CEE_CEQ into OP_CEQ
11384 * CEE_CGT into OP_CGT
11385 * CEE_CGT_UN into OP_CGT_UN
11386 * CEE_CLT into OP_CLT
11387 * CEE_CLT_UN into OP_CLT_UN
11389 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
11391 MONO_INST_NEW (cfg, ins, cmp->opcode);
11393 cmp->sreg1 = sp [0]->dreg;
11394 cmp->sreg2 = sp [1]->dreg;
11395 type_from_op (cmp, sp [0], sp [1]);
11397 if ((sp [0]->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
11398 cmp->opcode = OP_LCOMPARE;
11399 else if (sp [0]->type == STACK_R8)
11400 cmp->opcode = OP_FCOMPARE;
11402 cmp->opcode = OP_ICOMPARE;
11403 MONO_ADD_INS (bblock, cmp);
11404 ins->type = STACK_I4;
11405 ins->dreg = alloc_dreg (cfg, ins->type);
11406 type_from_op (ins, sp [0], sp [1]);
11408 if (cmp->opcode == OP_FCOMPARE) {
11410 * The backends expect the fceq opcodes to do the
11413 cmp->opcode = OP_NOP;
11414 ins->sreg1 = cmp->sreg1;
11415 ins->sreg2 = cmp->sreg2;
11417 MONO_ADD_INS (bblock, ins);
11423 MonoInst *argconst;
11424 MonoMethod *cil_method;
11426 CHECK_STACK_OVF (1);
11428 n = read32 (ip + 2);
11429 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
11430 if (!cmethod || mono_loader_get_last_error ())
11432 mono_class_init (cmethod->klass);
11434 mono_save_token_info (cfg, image, n, cmethod);
11436 context_used = mini_method_check_context_used (cfg, cmethod);
11438 cil_method = cmethod;
11439 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
11440 METHOD_ACCESS_FAILURE;
11442 if (mono_security_cas_enabled ()) {
11443 if (check_linkdemand (cfg, method, cmethod))
11444 INLINE_FAILURE ("linkdemand");
11445 CHECK_CFG_EXCEPTION;
11446 } else if (mono_security_core_clr_enabled ()) {
11447 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
11451 * Optimize the common case of ldftn+delegate creation
11453 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
11454 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
11455 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
11456 MonoInst *target_ins;
11457 MonoMethod *invoke;
11458 int invoke_context_used;
11460 invoke = mono_get_delegate_invoke (ctor_method->klass);
11461 if (!invoke || !mono_method_signature (invoke))
11464 invoke_context_used = mini_method_check_context_used (cfg, invoke);
11466 target_ins = sp [-1];
11468 if (mono_security_core_clr_enabled ())
11469 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
11471 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
11472 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
11473 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
11474 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
11475 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
11479 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
11480 /* FIXME: SGEN support */
11481 if (invoke_context_used == 0) {
11483 if (cfg->verbose_level > 3)
11484 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
11486 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
11487 CHECK_CFG_EXCEPTION;
11496 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
11497 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
11501 inline_costs += 10 * num_calls++;
11504 case CEE_LDVIRTFTN: {
11505 MonoInst *args [2];
11509 n = read32 (ip + 2);
11510 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
11511 if (!cmethod || mono_loader_get_last_error ())
11513 mono_class_init (cmethod->klass);
11515 context_used = mini_method_check_context_used (cfg, cmethod);
11517 if (mono_security_cas_enabled ()) {
11518 if (check_linkdemand (cfg, method, cmethod))
11519 INLINE_FAILURE ("linkdemand");
11520 CHECK_CFG_EXCEPTION;
11521 } else if (mono_security_core_clr_enabled ()) {
11522 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
11528 args [1] = emit_get_rgctx_method (cfg, context_used,
11529 cmethod, MONO_RGCTX_INFO_METHOD);
11532 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
11534 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
11537 inline_costs += 10 * num_calls++;
11541 CHECK_STACK_OVF (1);
11543 n = read16 (ip + 2);
11545 EMIT_NEW_ARGLOAD (cfg, ins, n);
11550 CHECK_STACK_OVF (1);
11552 n = read16 (ip + 2);
11554 NEW_ARGLOADA (cfg, ins, n);
11555 MONO_ADD_INS (cfg->cbb, ins);
11563 n = read16 (ip + 2);
11565 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
11567 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
11571 CHECK_STACK_OVF (1);
11573 n = read16 (ip + 2);
11575 EMIT_NEW_LOCLOAD (cfg, ins, n);
11580 unsigned char *tmp_ip;
11581 CHECK_STACK_OVF (1);
11583 n = read16 (ip + 2);
11586 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
11592 EMIT_NEW_LOCLOADA (cfg, ins, n);
11601 n = read16 (ip + 2);
11603 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
11605 emit_stloc_ir (cfg, sp, header, n);
11612 if (sp != stack_start)
11614 if (cfg->method != method)
11616 * Inlining this into a loop in a parent could lead to
11617 * stack overflows which is different behavior than the
11618 * non-inlined case, thus disable inlining in this case.
11620 goto inline_failure;
11622 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
11623 ins->dreg = alloc_preg (cfg);
11624 ins->sreg1 = sp [0]->dreg;
11625 ins->type = STACK_PTR;
11626 MONO_ADD_INS (cfg->cbb, ins);
11628 cfg->flags |= MONO_CFG_HAS_ALLOCA;
11630 ins->flags |= MONO_INST_INIT;
11635 case CEE_ENDFILTER: {
11636 MonoExceptionClause *clause, *nearest;
11637 int cc, nearest_num;
11641 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
11643 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
11644 ins->sreg1 = (*sp)->dreg;
11645 MONO_ADD_INS (bblock, ins);
11646 start_new_bblock = 1;
11651 for (cc = 0; cc < header->num_clauses; ++cc) {
11652 clause = &header->clauses [cc];
11653 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
11654 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
11655 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
11660 g_assert (nearest);
11661 if ((ip - header->code) != nearest->handler_offset)
11666 case CEE_UNALIGNED_:
11667 ins_flag |= MONO_INST_UNALIGNED;
11668 /* FIXME: record alignment? we can assume 1 for now */
11672 case CEE_VOLATILE_:
11673 ins_flag |= MONO_INST_VOLATILE;
11677 ins_flag |= MONO_INST_TAILCALL;
11678 cfg->flags |= MONO_CFG_HAS_TAIL;
11679 /* Can't inline tail calls at this time */
11680 inline_costs += 100000;
11687 token = read32 (ip + 2);
11688 klass = mini_get_class (method, token, generic_context);
11689 CHECK_TYPELOAD (klass);
11690 if (generic_class_is_reference_type (cfg, klass))
11691 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
11693 mini_emit_initobj (cfg, *sp, NULL, klass);
11697 case CEE_CONSTRAINED_:
11699 token = read32 (ip + 2);
11700 constrained_call = mini_get_class (method, token, generic_context);
11701 CHECK_TYPELOAD (constrained_call);
11705 case CEE_INITBLK: {
11706 MonoInst *iargs [3];
11710 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
11711 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
11712 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
11713 /* emit_memset only works when val == 0 */
11714 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
11716 iargs [0] = sp [0];
11717 iargs [1] = sp [1];
11718 iargs [2] = sp [2];
11719 if (ip [1] == CEE_CPBLK) {
11720 MonoMethod *memcpy_method = get_memcpy_method ();
11721 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11723 MonoMethod *memset_method = get_memset_method ();
11724 mono_emit_method_call (cfg, memset_method, iargs, NULL);
11734 ins_flag |= MONO_INST_NOTYPECHECK;
11736 ins_flag |= MONO_INST_NORANGECHECK;
11737 /* we ignore the no-nullcheck for now since we
11738 * really do it explicitly only when doing callvirt->call
11742 case CEE_RETHROW: {
11744 int handler_offset = -1;
11746 for (i = 0; i < header->num_clauses; ++i) {
11747 MonoExceptionClause *clause = &header->clauses [i];
11748 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
11749 handler_offset = clause->handler_offset;
11754 bblock->flags |= BB_EXCEPTION_UNSAFE;
11756 g_assert (handler_offset != -1);
11758 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
11759 MONO_INST_NEW (cfg, ins, OP_RETHROW);
11760 ins->sreg1 = load->dreg;
11761 MONO_ADD_INS (bblock, ins);
11763 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11764 MONO_ADD_INS (bblock, ins);
11767 link_bblock (cfg, bblock, end_bblock);
11768 start_new_bblock = 1;
11776 GSHAREDVT_FAILURE (*ip);
11778 CHECK_STACK_OVF (1);
11780 token = read32 (ip + 2);
11781 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic && !generic_context) {
11782 MonoType *type = mono_type_create_from_typespec (image, token);
11783 val = mono_type_size (type, &ialign);
11785 MonoClass *klass = mono_class_get_full (image, token, generic_context);
11786 CHECK_TYPELOAD (klass);
11787 mono_class_init (klass);
11788 val = mono_type_size (&klass->byval_arg, &ialign);
11790 EMIT_NEW_ICONST (cfg, ins, val);
11795 case CEE_REFANYTYPE: {
11796 MonoInst *src_var, *src;
11798 GSHAREDVT_FAILURE (*ip);
11804 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11806 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11807 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11808 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
11813 case CEE_READONLY_:
11826 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
11836 g_warning ("opcode 0x%02x not handled", *ip);
11840 if (start_new_bblock != 1)
11843 bblock->cil_length = ip - bblock->cil_code;
11844 if (bblock->next_bb) {
11845 /* This could already be set because of inlining, #693905 */
11846 MonoBasicBlock *bb = bblock;
11848 while (bb->next_bb)
11850 bb->next_bb = end_bblock;
11852 bblock->next_bb = end_bblock;
11855 if (cfg->method == method && cfg->domainvar) {
11857 MonoInst *get_domain;
11859 cfg->cbb = init_localsbb;
11861 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
11862 MONO_ADD_INS (cfg->cbb, get_domain);
11864 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
11866 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
11867 MONO_ADD_INS (cfg->cbb, store);
11870 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
11871 if (cfg->compile_aot)
11872 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
11873 mono_get_got_var (cfg);
11876 if (cfg->method == method && cfg->got_var)
11877 mono_emit_load_got_addr (cfg);
11880 cfg->cbb = init_localsbb;
11882 for (i = 0; i < header->num_locals; ++i) {
11883 emit_init_local (cfg, i, header->locals [i]);
11887 if (cfg->init_ref_vars && cfg->method == method) {
11888 /* Emit initialization for ref vars */
11889 // FIXME: Avoid duplication initialization for IL locals.
11890 for (i = 0; i < cfg->num_varinfo; ++i) {
11891 MonoInst *ins = cfg->varinfo [i];
11893 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
11894 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
11898 if (cfg->lmf_var && cfg->method == method) {
11899 cfg->cbb = init_localsbb;
11900 emit_push_lmf (cfg);
11904 MonoBasicBlock *bb;
11907 * Make seq points at backward branch targets interruptable.
11909 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
11910 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
11911 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
11914 /* Add a sequence point for method entry/exit events */
11916 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
11917 MONO_ADD_INS (init_localsbb, ins);
11918 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
11919 MONO_ADD_INS (cfg->bb_exit, ins);
11923 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
11924 * the code they refer to was dead (#11880).
11926 if (sym_seq_points) {
11927 for (i = 0; i < header->code_size; ++i) {
11928 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
11931 NEW_SEQ_POINT (cfg, ins, i, FALSE);
11932 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
11939 if (cfg->method == method) {
11940 MonoBasicBlock *bb;
11941 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
11942 bb->region = mono_find_block_region (cfg, bb->real_offset);
11944 mono_create_spvar_for_region (cfg, bb->region);
11945 if (cfg->verbose_level > 2)
11946 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
11950 g_slist_free (class_inits);
11951 dont_inline = g_list_remove (dont_inline, method);
11953 if (inline_costs < 0) {
11956 /* Method is too large */
11957 mname = mono_method_full_name (method, TRUE);
11958 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
11959 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
11961 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
11962 mono_basic_block_free (original_bb);
11966 if ((cfg->verbose_level > 2) && (cfg->method == method))
11967 mono_print_code (cfg, "AFTER METHOD-TO-IR");
11969 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
11970 mono_basic_block_free (original_bb);
11971 return inline_costs;
11974 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
11981 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
11985 set_exception_type_from_invalid_il (cfg, method, ip);
11989 g_slist_free (class_inits);
11990 mono_basic_block_free (original_bb);
11991 dont_inline = g_list_remove (dont_inline, method);
11992 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
11997 store_membase_reg_to_store_membase_imm (int opcode)
12000 case OP_STORE_MEMBASE_REG:
12001 return OP_STORE_MEMBASE_IMM;
12002 case OP_STOREI1_MEMBASE_REG:
12003 return OP_STOREI1_MEMBASE_IMM;
12004 case OP_STOREI2_MEMBASE_REG:
12005 return OP_STOREI2_MEMBASE_IMM;
12006 case OP_STOREI4_MEMBASE_REG:
12007 return OP_STOREI4_MEMBASE_IMM;
12008 case OP_STOREI8_MEMBASE_REG:
12009 return OP_STOREI8_MEMBASE_IMM;
12011 g_assert_not_reached ();
12018 mono_op_to_op_imm (int opcode)
12022 return OP_IADD_IMM;
12024 return OP_ISUB_IMM;
12026 return OP_IDIV_IMM;
12028 return OP_IDIV_UN_IMM;
12030 return OP_IREM_IMM;
12032 return OP_IREM_UN_IMM;
12034 return OP_IMUL_IMM;
12036 return OP_IAND_IMM;
12040 return OP_IXOR_IMM;
12042 return OP_ISHL_IMM;
12044 return OP_ISHR_IMM;
12046 return OP_ISHR_UN_IMM;
12049 return OP_LADD_IMM;
12051 return OP_LSUB_IMM;
12053 return OP_LAND_IMM;
12057 return OP_LXOR_IMM;
12059 return OP_LSHL_IMM;
12061 return OP_LSHR_IMM;
12063 return OP_LSHR_UN_IMM;
12066 return OP_COMPARE_IMM;
12068 return OP_ICOMPARE_IMM;
12070 return OP_LCOMPARE_IMM;
12072 case OP_STORE_MEMBASE_REG:
12073 return OP_STORE_MEMBASE_IMM;
12074 case OP_STOREI1_MEMBASE_REG:
12075 return OP_STOREI1_MEMBASE_IMM;
12076 case OP_STOREI2_MEMBASE_REG:
12077 return OP_STOREI2_MEMBASE_IMM;
12078 case OP_STOREI4_MEMBASE_REG:
12079 return OP_STOREI4_MEMBASE_IMM;
12081 #if defined(TARGET_X86) || defined (TARGET_AMD64)
12083 return OP_X86_PUSH_IMM;
12084 case OP_X86_COMPARE_MEMBASE_REG:
12085 return OP_X86_COMPARE_MEMBASE_IMM;
12087 #if defined(TARGET_AMD64)
12088 case OP_AMD64_ICOMPARE_MEMBASE_REG:
12089 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
12091 case OP_VOIDCALL_REG:
12092 return OP_VOIDCALL;
12100 return OP_LOCALLOC_IMM;
12107 ldind_to_load_membase (int opcode)
12111 return OP_LOADI1_MEMBASE;
12113 return OP_LOADU1_MEMBASE;
12115 return OP_LOADI2_MEMBASE;
12117 return OP_LOADU2_MEMBASE;
12119 return OP_LOADI4_MEMBASE;
12121 return OP_LOADU4_MEMBASE;
12123 return OP_LOAD_MEMBASE;
12124 case CEE_LDIND_REF:
12125 return OP_LOAD_MEMBASE;
12127 return OP_LOADI8_MEMBASE;
12129 return OP_LOADR4_MEMBASE;
12131 return OP_LOADR8_MEMBASE;
12133 g_assert_not_reached ();
12140 stind_to_store_membase (int opcode)
12144 return OP_STOREI1_MEMBASE_REG;
12146 return OP_STOREI2_MEMBASE_REG;
12148 return OP_STOREI4_MEMBASE_REG;
12150 case CEE_STIND_REF:
12151 return OP_STORE_MEMBASE_REG;
12153 return OP_STOREI8_MEMBASE_REG;
12155 return OP_STORER4_MEMBASE_REG;
12157 return OP_STORER8_MEMBASE_REG;
12159 g_assert_not_reached ();
12166 mono_load_membase_to_load_mem (int opcode)
12168 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
12169 #if defined(TARGET_X86) || defined(TARGET_AMD64)
12171 case OP_LOAD_MEMBASE:
12172 return OP_LOAD_MEM;
12173 case OP_LOADU1_MEMBASE:
12174 return OP_LOADU1_MEM;
12175 case OP_LOADU2_MEMBASE:
12176 return OP_LOADU2_MEM;
12177 case OP_LOADI4_MEMBASE:
12178 return OP_LOADI4_MEM;
12179 case OP_LOADU4_MEMBASE:
12180 return OP_LOADU4_MEM;
12181 #if SIZEOF_REGISTER == 8
12182 case OP_LOADI8_MEMBASE:
12183 return OP_LOADI8_MEM;
12192 op_to_op_dest_membase (int store_opcode, int opcode)
12194 #if defined(TARGET_X86)
12195 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
12200 return OP_X86_ADD_MEMBASE_REG;
12202 return OP_X86_SUB_MEMBASE_REG;
12204 return OP_X86_AND_MEMBASE_REG;
12206 return OP_X86_OR_MEMBASE_REG;
12208 return OP_X86_XOR_MEMBASE_REG;
12211 return OP_X86_ADD_MEMBASE_IMM;
12214 return OP_X86_SUB_MEMBASE_IMM;
12217 return OP_X86_AND_MEMBASE_IMM;
12220 return OP_X86_OR_MEMBASE_IMM;
12223 return OP_X86_XOR_MEMBASE_IMM;
12229 #if defined(TARGET_AMD64)
12230 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
12235 return OP_X86_ADD_MEMBASE_REG;
12237 return OP_X86_SUB_MEMBASE_REG;
12239 return OP_X86_AND_MEMBASE_REG;
12241 return OP_X86_OR_MEMBASE_REG;
12243 return OP_X86_XOR_MEMBASE_REG;
12245 return OP_X86_ADD_MEMBASE_IMM;
12247 return OP_X86_SUB_MEMBASE_IMM;
12249 return OP_X86_AND_MEMBASE_IMM;
12251 return OP_X86_OR_MEMBASE_IMM;
12253 return OP_X86_XOR_MEMBASE_IMM;
12255 return OP_AMD64_ADD_MEMBASE_REG;
12257 return OP_AMD64_SUB_MEMBASE_REG;
12259 return OP_AMD64_AND_MEMBASE_REG;
12261 return OP_AMD64_OR_MEMBASE_REG;
12263 return OP_AMD64_XOR_MEMBASE_REG;
12266 return OP_AMD64_ADD_MEMBASE_IMM;
12269 return OP_AMD64_SUB_MEMBASE_IMM;
12272 return OP_AMD64_AND_MEMBASE_IMM;
12275 return OP_AMD64_OR_MEMBASE_IMM;
12278 return OP_AMD64_XOR_MEMBASE_IMM;
12288 op_to_op_store_membase (int store_opcode, int opcode)
12290 #if defined(TARGET_X86) || defined(TARGET_AMD64)
12293 if (store_opcode == OP_STOREI1_MEMBASE_REG)
12294 return OP_X86_SETEQ_MEMBASE;
12296 if (store_opcode == OP_STOREI1_MEMBASE_REG)
12297 return OP_X86_SETNE_MEMBASE;
12305 op_to_op_src1_membase (int load_opcode, int opcode)
12308 /* FIXME: This has sign extension issues */
12310 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
12311 return OP_X86_COMPARE_MEMBASE8_IMM;
12314 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
12319 return OP_X86_PUSH_MEMBASE;
12320 case OP_COMPARE_IMM:
12321 case OP_ICOMPARE_IMM:
12322 return OP_X86_COMPARE_MEMBASE_IMM;
12325 return OP_X86_COMPARE_MEMBASE_REG;
12329 #ifdef TARGET_AMD64
12330 /* FIXME: This has sign extension issues */
12332 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
12333 return OP_X86_COMPARE_MEMBASE8_IMM;
12338 #ifdef __mono_ilp32__
12339 if (load_opcode == OP_LOADI8_MEMBASE)
12341 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12343 return OP_X86_PUSH_MEMBASE;
12345 /* FIXME: This only works for 32 bit immediates
12346 case OP_COMPARE_IMM:
12347 case OP_LCOMPARE_IMM:
12348 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12349 return OP_AMD64_COMPARE_MEMBASE_IMM;
12351 case OP_ICOMPARE_IMM:
12352 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
12353 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
12357 #ifdef __mono_ilp32__
12358 if (load_opcode == OP_LOAD_MEMBASE)
12359 return OP_AMD64_ICOMPARE_MEMBASE_REG;
12360 if (load_opcode == OP_LOADI8_MEMBASE)
12362 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12364 return OP_AMD64_COMPARE_MEMBASE_REG;
12367 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
12368 return OP_AMD64_ICOMPARE_MEMBASE_REG;
12377 op_to_op_src2_membase (int load_opcode, int opcode)
12380 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
12386 return OP_X86_COMPARE_REG_MEMBASE;
12388 return OP_X86_ADD_REG_MEMBASE;
12390 return OP_X86_SUB_REG_MEMBASE;
12392 return OP_X86_AND_REG_MEMBASE;
12394 return OP_X86_OR_REG_MEMBASE;
12396 return OP_X86_XOR_REG_MEMBASE;
12400 #ifdef TARGET_AMD64
12401 #ifdef __mono_ilp32__
12402 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
12404 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
12408 return OP_AMD64_ICOMPARE_REG_MEMBASE;
12410 return OP_X86_ADD_REG_MEMBASE;
12412 return OP_X86_SUB_REG_MEMBASE;
12414 return OP_X86_AND_REG_MEMBASE;
12416 return OP_X86_OR_REG_MEMBASE;
12418 return OP_X86_XOR_REG_MEMBASE;
12420 #ifdef __mono_ilp32__
12421 } else if (load_opcode == OP_LOADI8_MEMBASE) {
12423 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
12428 return OP_AMD64_COMPARE_REG_MEMBASE;
12430 return OP_AMD64_ADD_REG_MEMBASE;
12432 return OP_AMD64_SUB_REG_MEMBASE;
12434 return OP_AMD64_AND_REG_MEMBASE;
12436 return OP_AMD64_OR_REG_MEMBASE;
12438 return OP_AMD64_XOR_REG_MEMBASE;
12447 mono_op_to_op_imm_noemul (int opcode)
12450 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
12456 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
12463 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
12468 return mono_op_to_op_imm (opcode);
12473 * mono_handle_global_vregs:
12475 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
12479 mono_handle_global_vregs (MonoCompile *cfg)
12481 gint32 *vreg_to_bb;
12482 MonoBasicBlock *bb;
12485 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
12487 #ifdef MONO_ARCH_SIMD_INTRINSICS
12488 if (cfg->uses_simd_intrinsics)
12489 mono_simd_simplify_indirection (cfg);
12492 /* Find local vregs used in more than one bb */
12493 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12494 MonoInst *ins = bb->code;
12495 int block_num = bb->block_num;
12497 if (cfg->verbose_level > 2)
12498 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
12501 for (; ins; ins = ins->next) {
12502 const char *spec = INS_INFO (ins->opcode);
12503 int regtype = 0, regindex;
12506 if (G_UNLIKELY (cfg->verbose_level > 2))
12507 mono_print_ins (ins);
12509 g_assert (ins->opcode >= MONO_CEE_LAST);
12511 for (regindex = 0; regindex < 4; regindex ++) {
12514 if (regindex == 0) {
12515 regtype = spec [MONO_INST_DEST];
12516 if (regtype == ' ')
12519 } else if (regindex == 1) {
12520 regtype = spec [MONO_INST_SRC1];
12521 if (regtype == ' ')
12524 } else if (regindex == 2) {
12525 regtype = spec [MONO_INST_SRC2];
12526 if (regtype == ' ')
12529 } else if (regindex == 3) {
12530 regtype = spec [MONO_INST_SRC3];
12531 if (regtype == ' ')
12536 #if SIZEOF_REGISTER == 4
12537 /* In the LLVM case, the long opcodes are not decomposed */
12538 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
12540 * Since some instructions reference the original long vreg,
12541 * and some reference the two component vregs, it is quite hard
12542 * to determine when it needs to be global. So be conservative.
12544 if (!get_vreg_to_inst (cfg, vreg)) {
12545 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
12547 if (cfg->verbose_level > 2)
12548 printf ("LONG VREG R%d made global.\n", vreg);
12552 * Make the component vregs volatile since the optimizations can
12553 * get confused otherwise.
12555 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
12556 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
12560 g_assert (vreg != -1);
12562 prev_bb = vreg_to_bb [vreg];
12563 if (prev_bb == 0) {
12564 /* 0 is a valid block num */
12565 vreg_to_bb [vreg] = block_num + 1;
12566 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
12567 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
12570 if (!get_vreg_to_inst (cfg, vreg)) {
12571 if (G_UNLIKELY (cfg->verbose_level > 2))
12572 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
12576 if (vreg_is_ref (cfg, vreg))
12577 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
12579 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
12582 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
12585 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
12588 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
12591 g_assert_not_reached ();
12595 /* Flag as having been used in more than one bb */
12596 vreg_to_bb [vreg] = -1;
12602 /* If a variable is used in only one bblock, convert it into a local vreg */
12603 for (i = 0; i < cfg->num_varinfo; i++) {
12604 MonoInst *var = cfg->varinfo [i];
12605 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
12607 switch (var->type) {
12613 #if SIZEOF_REGISTER == 8
12616 #if !defined(TARGET_X86)
12617 /* Enabling this screws up the fp stack on x86 */
12620 if (mono_arch_is_soft_float ())
12623 /* Arguments are implicitly global */
12624 /* Putting R4 vars into registers doesn't work currently */
12625 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
12626 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
12628 * Make that the variable's liveness interval doesn't contain a call, since
12629 * that would cause the lvreg to be spilled, making the whole optimization
12632 /* This is too slow for JIT compilation */
12634 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
12636 int def_index, call_index, ins_index;
12637 gboolean spilled = FALSE;
12642 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
12643 const char *spec = INS_INFO (ins->opcode);
12645 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
12646 def_index = ins_index;
12648 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
12649 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
12650 if (call_index > def_index) {
12656 if (MONO_IS_CALL (ins))
12657 call_index = ins_index;
12667 if (G_UNLIKELY (cfg->verbose_level > 2))
12668 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
12669 var->flags |= MONO_INST_IS_DEAD;
12670 cfg->vreg_to_inst [var->dreg] = NULL;
12677 * Compress the varinfo and vars tables so the liveness computation is faster and
12678 * takes up less space.
12681 for (i = 0; i < cfg->num_varinfo; ++i) {
12682 MonoInst *var = cfg->varinfo [i];
12683 if (pos < i && cfg->locals_start == i)
12684 cfg->locals_start = pos;
12685 if (!(var->flags & MONO_INST_IS_DEAD)) {
12687 cfg->varinfo [pos] = cfg->varinfo [i];
12688 cfg->varinfo [pos]->inst_c0 = pos;
12689 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
12690 cfg->vars [pos].idx = pos;
12691 #if SIZEOF_REGISTER == 4
12692 if (cfg->varinfo [pos]->type == STACK_I8) {
12693 /* Modify the two component vars too */
12696 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
12697 var1->inst_c0 = pos;
12698 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
12699 var1->inst_c0 = pos;
12706 cfg->num_varinfo = pos;
12707 if (cfg->locals_start > cfg->num_varinfo)
12708 cfg->locals_start = cfg->num_varinfo;
12712 * mono_spill_global_vars:
12714 * Generate spill code for variables which are not allocated to registers,
12715 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
12716 * code is generated which could be optimized by the local optimization passes.
12719 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
12721 MonoBasicBlock *bb;
12723 int orig_next_vreg;
12724 guint32 *vreg_to_lvreg;
12726 guint32 i, lvregs_len;
12727 gboolean dest_has_lvreg = FALSE;
12728 guint32 stacktypes [128];
12729 MonoInst **live_range_start, **live_range_end;
12730 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
12731 int *gsharedvt_vreg_to_idx = NULL;
12733 *need_local_opts = FALSE;
12735 memset (spec2, 0, sizeof (spec2));
12737 /* FIXME: Move this function to mini.c */
12738 stacktypes ['i'] = STACK_PTR;
12739 stacktypes ['l'] = STACK_I8;
12740 stacktypes ['f'] = STACK_R8;
12741 #ifdef MONO_ARCH_SIMD_INTRINSICS
12742 stacktypes ['x'] = STACK_VTYPE;
12745 #if SIZEOF_REGISTER == 4
12746 /* Create MonoInsts for longs */
12747 for (i = 0; i < cfg->num_varinfo; i++) {
12748 MonoInst *ins = cfg->varinfo [i];
12750 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
12751 switch (ins->type) {
12756 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
12759 g_assert (ins->opcode == OP_REGOFFSET);
12761 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
12763 tree->opcode = OP_REGOFFSET;
12764 tree->inst_basereg = ins->inst_basereg;
12765 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
12767 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
12769 tree->opcode = OP_REGOFFSET;
12770 tree->inst_basereg = ins->inst_basereg;
12771 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
12781 if (cfg->compute_gc_maps) {
12782 /* registers need liveness info even for !non refs */
12783 for (i = 0; i < cfg->num_varinfo; i++) {
12784 MonoInst *ins = cfg->varinfo [i];
12786 if (ins->opcode == OP_REGVAR)
12787 ins->flags |= MONO_INST_GC_TRACK;
12791 if (cfg->gsharedvt) {
12792 gsharedvt_vreg_to_idx = g_new0 (int, cfg->next_vreg);
12794 for (i = 0; i < cfg->num_varinfo; ++i) {
12795 MonoInst *ins = cfg->varinfo [i];
12798 if (mini_is_gsharedvt_variable_type (cfg, ins->inst_vtype)) {
12799 if (i >= cfg->locals_start) {
12801 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
12802 gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
12803 ins->opcode = OP_GSHAREDVT_LOCAL;
12804 ins->inst_imm = idx;
12807 gsharedvt_vreg_to_idx [ins->dreg] = -1;
12808 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
12814 /* FIXME: widening and truncation */
12817 * As an optimization, when a variable allocated to the stack is first loaded into
12818 * an lvreg, we will remember the lvreg and use it the next time instead of loading
12819 * the variable again.
12821 orig_next_vreg = cfg->next_vreg;
12822 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
12823 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
12827 * These arrays contain the first and last instructions accessing a given
12829 * Since we emit bblocks in the same order we process them here, and we
12830 * don't split live ranges, these will precisely describe the live range of
12831 * the variable, i.e. the instruction range where a valid value can be found
12832 * in the variables location.
12833 * The live range is computed using the liveness info computed by the liveness pass.
12834 * We can't use vmv->range, since that is an abstract live range, and we need
12835 * one which is instruction precise.
12836 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
12838 /* FIXME: Only do this if debugging info is requested */
12839 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
12840 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
12841 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
12842 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
12844 /* Add spill loads/stores */
12845 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12848 if (cfg->verbose_level > 2)
12849 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
12851 /* Clear vreg_to_lvreg array */
12852 for (i = 0; i < lvregs_len; i++)
12853 vreg_to_lvreg [lvregs [i]] = 0;
12857 MONO_BB_FOR_EACH_INS (bb, ins) {
12858 const char *spec = INS_INFO (ins->opcode);
12859 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
12860 gboolean store, no_lvreg;
12861 int sregs [MONO_MAX_SRC_REGS];
12863 if (G_UNLIKELY (cfg->verbose_level > 2))
12864 mono_print_ins (ins);
12866 if (ins->opcode == OP_NOP)
12870 * We handle LDADDR here as well, since it can only be decomposed
12871 * when variable addresses are known.
12873 if (ins->opcode == OP_LDADDR) {
12874 MonoInst *var = ins->inst_p0;
12876 if (var->opcode == OP_VTARG_ADDR) {
12877 /* Happens on SPARC/S390 where vtypes are passed by reference */
12878 MonoInst *vtaddr = var->inst_left;
12879 if (vtaddr->opcode == OP_REGVAR) {
12880 ins->opcode = OP_MOVE;
12881 ins->sreg1 = vtaddr->dreg;
12883 else if (var->inst_left->opcode == OP_REGOFFSET) {
12884 ins->opcode = OP_LOAD_MEMBASE;
12885 ins->inst_basereg = vtaddr->inst_basereg;
12886 ins->inst_offset = vtaddr->inst_offset;
12889 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg] < 0) {
12890 /* gsharedvt arg passed by ref */
12891 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
12893 ins->opcode = OP_LOAD_MEMBASE;
12894 ins->inst_basereg = var->inst_basereg;
12895 ins->inst_offset = var->inst_offset;
12896 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg]) {
12897 MonoInst *load, *load2, *load3;
12898 int idx = gsharedvt_vreg_to_idx [var->dreg] - 1;
12899 int reg1, reg2, reg3;
12900 MonoInst *info_var = cfg->gsharedvt_info_var;
12901 MonoInst *locals_var = cfg->gsharedvt_locals_var;
12905 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
12908 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
12910 g_assert (info_var);
12911 g_assert (locals_var);
12913 /* Mark the instruction used to compute the locals var as used */
12914 cfg->gsharedvt_locals_var_ins = NULL;
12916 /* Load the offset */
12917 if (info_var->opcode == OP_REGOFFSET) {
12918 reg1 = alloc_ireg (cfg);
12919 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
12920 } else if (info_var->opcode == OP_REGVAR) {
12922 reg1 = info_var->dreg;
12924 g_assert_not_reached ();
12926 reg2 = alloc_ireg (cfg);
12927 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
12928 /* Load the locals area address */
12929 reg3 = alloc_ireg (cfg);
12930 if (locals_var->opcode == OP_REGOFFSET) {
12931 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
12932 } else if (locals_var->opcode == OP_REGVAR) {
12933 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
12935 g_assert_not_reached ();
12937 /* Compute the address */
12938 ins->opcode = OP_PADD;
12942 mono_bblock_insert_before_ins (bb, ins, load3);
12943 mono_bblock_insert_before_ins (bb, load3, load2);
12945 mono_bblock_insert_before_ins (bb, load2, load);
12947 g_assert (var->opcode == OP_REGOFFSET);
12949 ins->opcode = OP_ADD_IMM;
12950 ins->sreg1 = var->inst_basereg;
12951 ins->inst_imm = var->inst_offset;
12954 *need_local_opts = TRUE;
12955 spec = INS_INFO (ins->opcode);
12958 if (ins->opcode < MONO_CEE_LAST) {
12959 mono_print_ins (ins);
12960 g_assert_not_reached ();
12964 * Store opcodes have destbasereg in the dreg, but in reality, it is an
12968 if (MONO_IS_STORE_MEMBASE (ins)) {
12969 tmp_reg = ins->dreg;
12970 ins->dreg = ins->sreg2;
12971 ins->sreg2 = tmp_reg;
12974 spec2 [MONO_INST_DEST] = ' ';
12975 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
12976 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
12977 spec2 [MONO_INST_SRC3] = ' ';
12979 } else if (MONO_IS_STORE_MEMINDEX (ins))
12980 g_assert_not_reached ();
12985 if (G_UNLIKELY (cfg->verbose_level > 2)) {
12986 printf ("\t %.3s %d", spec, ins->dreg);
12987 num_sregs = mono_inst_get_src_registers (ins, sregs);
12988 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
12989 printf (" %d", sregs [srcindex]);
12996 regtype = spec [MONO_INST_DEST];
12997 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
13000 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
13001 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
13002 MonoInst *store_ins;
13004 MonoInst *def_ins = ins;
13005 int dreg = ins->dreg; /* The original vreg */
13007 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
13009 if (var->opcode == OP_REGVAR) {
13010 ins->dreg = var->dreg;
13011 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
13013 * Instead of emitting a load+store, use a _membase opcode.
13015 g_assert (var->opcode == OP_REGOFFSET);
13016 if (ins->opcode == OP_MOVE) {
13020 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
13021 ins->inst_basereg = var->inst_basereg;
13022 ins->inst_offset = var->inst_offset;
13025 spec = INS_INFO (ins->opcode);
13029 g_assert (var->opcode == OP_REGOFFSET);
13031 prev_dreg = ins->dreg;
13033 /* Invalidate any previous lvreg for this vreg */
13034 vreg_to_lvreg [ins->dreg] = 0;
13038 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
13040 store_opcode = OP_STOREI8_MEMBASE_REG;
13043 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
13045 #if SIZEOF_REGISTER != 8
13046 if (regtype == 'l') {
13047 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
13048 mono_bblock_insert_after_ins (bb, ins, store_ins);
13049 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
13050 mono_bblock_insert_after_ins (bb, ins, store_ins);
13051 def_ins = store_ins;
13056 g_assert (store_opcode != OP_STOREV_MEMBASE);
13058 /* Try to fuse the store into the instruction itself */
13059 /* FIXME: Add more instructions */
13060 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
13061 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
13062 ins->inst_imm = ins->inst_c0;
13063 ins->inst_destbasereg = var->inst_basereg;
13064 ins->inst_offset = var->inst_offset;
13065 spec = INS_INFO (ins->opcode);
13066 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
13067 ins->opcode = store_opcode;
13068 ins->inst_destbasereg = var->inst_basereg;
13069 ins->inst_offset = var->inst_offset;
13073 tmp_reg = ins->dreg;
13074 ins->dreg = ins->sreg2;
13075 ins->sreg2 = tmp_reg;
13078 spec2 [MONO_INST_DEST] = ' ';
13079 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
13080 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
13081 spec2 [MONO_INST_SRC3] = ' ';
13083 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
13084 // FIXME: The backends expect the base reg to be in inst_basereg
13085 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
13087 ins->inst_basereg = var->inst_basereg;
13088 ins->inst_offset = var->inst_offset;
13089 spec = INS_INFO (ins->opcode);
13091 /* printf ("INS: "); mono_print_ins (ins); */
13092 /* Create a store instruction */
13093 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
13095 /* Insert it after the instruction */
13096 mono_bblock_insert_after_ins (bb, ins, store_ins);
13098 def_ins = store_ins;
13101 * We can't assign ins->dreg to var->dreg here, since the
13102 * sregs could use it. So set a flag, and do it after
13105 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
13106 dest_has_lvreg = TRUE;
13111 if (def_ins && !live_range_start [dreg]) {
13112 live_range_start [dreg] = def_ins;
13113 live_range_start_bb [dreg] = bb;
13116 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
13119 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
13120 tmp->inst_c1 = dreg;
13121 mono_bblock_insert_after_ins (bb, def_ins, tmp);
13128 num_sregs = mono_inst_get_src_registers (ins, sregs);
13129 for (srcindex = 0; srcindex < 3; ++srcindex) {
13130 regtype = spec [MONO_INST_SRC1 + srcindex];
13131 sreg = sregs [srcindex];
13133 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
13134 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
13135 MonoInst *var = get_vreg_to_inst (cfg, sreg);
13136 MonoInst *use_ins = ins;
13137 MonoInst *load_ins;
13138 guint32 load_opcode;
13140 if (var->opcode == OP_REGVAR) {
13141 sregs [srcindex] = var->dreg;
13142 //mono_inst_set_src_registers (ins, sregs);
13143 live_range_end [sreg] = use_ins;
13144 live_range_end_bb [sreg] = bb;
13146 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
13149 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
13150 /* var->dreg is a hreg */
13151 tmp->inst_c1 = sreg;
13152 mono_bblock_insert_after_ins (bb, ins, tmp);
13158 g_assert (var->opcode == OP_REGOFFSET);
13160 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
13162 g_assert (load_opcode != OP_LOADV_MEMBASE);
13164 if (vreg_to_lvreg [sreg]) {
13165 g_assert (vreg_to_lvreg [sreg] != -1);
13167 /* The variable is already loaded to an lvreg */
13168 if (G_UNLIKELY (cfg->verbose_level > 2))
13169 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
13170 sregs [srcindex] = vreg_to_lvreg [sreg];
13171 //mono_inst_set_src_registers (ins, sregs);
13175 /* Try to fuse the load into the instruction */
13176 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
13177 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
13178 sregs [0] = var->inst_basereg;
13179 //mono_inst_set_src_registers (ins, sregs);
13180 ins->inst_offset = var->inst_offset;
13181 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
13182 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
13183 sregs [1] = var->inst_basereg;
13184 //mono_inst_set_src_registers (ins, sregs);
13185 ins->inst_offset = var->inst_offset;
13187 if (MONO_IS_REAL_MOVE (ins)) {
13188 ins->opcode = OP_NOP;
13191 //printf ("%d ", srcindex); mono_print_ins (ins);
13193 sreg = alloc_dreg (cfg, stacktypes [regtype]);
13195 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
13196 if (var->dreg == prev_dreg) {
13198 * sreg refers to the value loaded by the load
13199 * emitted below, but we need to use ins->dreg
13200 * since it refers to the store emitted earlier.
13204 g_assert (sreg != -1);
13205 vreg_to_lvreg [var->dreg] = sreg;
13206 g_assert (lvregs_len < 1024);
13207 lvregs [lvregs_len ++] = var->dreg;
13211 sregs [srcindex] = sreg;
13212 //mono_inst_set_src_registers (ins, sregs);
13214 #if SIZEOF_REGISTER != 8
13215 if (regtype == 'l') {
13216 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
13217 mono_bblock_insert_before_ins (bb, ins, load_ins);
13218 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
13219 mono_bblock_insert_before_ins (bb, ins, load_ins);
13220 use_ins = load_ins;
13225 #if SIZEOF_REGISTER == 4
13226 g_assert (load_opcode != OP_LOADI8_MEMBASE);
13228 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
13229 mono_bblock_insert_before_ins (bb, ins, load_ins);
13230 use_ins = load_ins;
13234 if (var->dreg < orig_next_vreg) {
13235 live_range_end [var->dreg] = use_ins;
13236 live_range_end_bb [var->dreg] = bb;
13239 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
13242 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
13243 tmp->inst_c1 = var->dreg;
13244 mono_bblock_insert_after_ins (bb, ins, tmp);
13248 mono_inst_set_src_registers (ins, sregs);
13250 if (dest_has_lvreg) {
13251 g_assert (ins->dreg != -1);
13252 vreg_to_lvreg [prev_dreg] = ins->dreg;
13253 g_assert (lvregs_len < 1024);
13254 lvregs [lvregs_len ++] = prev_dreg;
13255 dest_has_lvreg = FALSE;
13259 tmp_reg = ins->dreg;
13260 ins->dreg = ins->sreg2;
13261 ins->sreg2 = tmp_reg;
13264 if (MONO_IS_CALL (ins)) {
13265 /* Clear vreg_to_lvreg array */
13266 for (i = 0; i < lvregs_len; i++)
13267 vreg_to_lvreg [lvregs [i]] = 0;
13269 } else if (ins->opcode == OP_NOP) {
13271 MONO_INST_NULLIFY_SREGS (ins);
13274 if (cfg->verbose_level > 2)
13275 mono_print_ins_index (1, ins);
13278 /* Extend the live range based on the liveness info */
13279 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
13280 for (i = 0; i < cfg->num_varinfo; i ++) {
13281 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
13283 if (vreg_is_volatile (cfg, vi->vreg))
13284 /* The liveness info is incomplete */
13287 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
13288 /* Live from at least the first ins of this bb */
13289 live_range_start [vi->vreg] = bb->code;
13290 live_range_start_bb [vi->vreg] = bb;
13293 if (mono_bitset_test_fast (bb->live_out_set, i)) {
13294 /* Live at least until the last ins of this bb */
13295 live_range_end [vi->vreg] = bb->last_ins;
13296 live_range_end_bb [vi->vreg] = bb;
13302 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
13304 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
13305 * by storing the current native offset into MonoMethodVar->live_range_start/end.
13307 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
13308 for (i = 0; i < cfg->num_varinfo; ++i) {
13309 int vreg = MONO_VARINFO (cfg, i)->vreg;
13312 if (live_range_start [vreg]) {
13313 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
13315 ins->inst_c1 = vreg;
13316 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
13318 if (live_range_end [vreg]) {
13319 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
13321 ins->inst_c1 = vreg;
13322 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
13323 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
13325 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
13331 if (cfg->gsharedvt_locals_var_ins) {
13332 /* Nullify if unused */
13333 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
13334 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
13337 g_free (live_range_start);
13338 g_free (live_range_end);
13339 g_free (live_range_start_bb);
13340 g_free (live_range_end_bb);
13345 * - use 'iadd' instead of 'int_add'
13346 * - handling ovf opcodes: decompose in method_to_ir.
13347 * - unify iregs/fregs
13348 * -> partly done, the missing parts are:
13349 * - a more complete unification would involve unifying the hregs as well, so
13350 * code wouldn't need if (fp) all over the place. but that would mean the hregs
13351 * would no longer map to the machine hregs, so the code generators would need to
13352 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
13353 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
13354 * fp/non-fp branches speeds it up by about 15%.
13355 * - use sext/zext opcodes instead of shifts
13357 * - get rid of TEMPLOADs if possible and use vregs instead
13358 * - clean up usage of OP_P/OP_ opcodes
13359 * - cleanup usage of DUMMY_USE
13360 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
13362 * - set the stack type and allocate a dreg in the EMIT_NEW macros
13363 * - get rid of all the <foo>2 stuff when the new JIT is ready.
13364 * - make sure handle_stack_args () is called before the branch is emitted
13365 * - when the new IR is done, get rid of all unused stuff
13366 * - COMPARE/BEQ as separate instructions or unify them ?
13367 * - keeping them separate allows specialized compare instructions like
13368 * compare_imm, compare_membase
13369 * - most back ends unify fp compare+branch, fp compare+ceq
13370 * - integrate mono_save_args into inline_method
13371 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
13372 * - handle long shift opts on 32 bit platforms somehow: they require
13373 * 3 sregs (2 for arg1 and 1 for arg2)
13374 * - make byref a 'normal' type.
13375 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
13376 * variable if needed.
13377 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
13378 * like inline_method.
13379 * - remove inlining restrictions
13380 * - fix LNEG and enable cfold of INEG
13381 * - generalize x86 optimizations like ldelema as a peephole optimization
13382 * - add store_mem_imm for amd64
13383 * - optimize the loading of the interruption flag in the managed->native wrappers
13384 * - avoid special handling of OP_NOP in passes
13385 * - move code inserting instructions into one function/macro.
13386 * - try a coalescing phase after liveness analysis
13387 * - add float -> vreg conversion + local optimizations on !x86
13388 * - figure out how to handle decomposed branches during optimizations, ie.
13389 * compare+branch, op_jump_table+op_br etc.
13390 * - promote RuntimeXHandles to vregs
13391 * - vtype cleanups:
13392 * - add a NEW_VARLOADA_VREG macro
13393 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
13394 * accessing vtype fields.
13395 * - get rid of I8CONST on 64 bit platforms
13396 * - dealing with the increase in code size due to branches created during opcode
13398 * - use extended basic blocks
13399 * - all parts of the JIT
13400 * - handle_global_vregs () && local regalloc
13401 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
13402 * - sources of increase in code size:
13405 * - isinst and castclass
13406 * - lvregs not allocated to global registers even if used multiple times
13407 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
13409 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
13410 * - add all micro optimizations from the old JIT
13411 * - put tree optimizations into the deadce pass
13412 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
13413 * specific function.
13414 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
13415 * fcompare + branchCC.
13416 * - create a helper function for allocating a stack slot, taking into account
13417 * MONO_CFG_HAS_SPILLUP.
13419 * - merge the ia64 switch changes.
13420 * - optimize mono_regstate2_alloc_int/float.
13421 * - fix the pessimistic handling of variables accessed in exception handler blocks.
13422 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
13423 * parts of the tree could be separated by other instructions, killing the tree
13424 * arguments, or stores killing loads etc. Also, should we fold loads into other
13425 * instructions if the result of the load is used multiple times ?
13426 * - make the REM_IMM optimization in mini-x86.c arch-independent.
13427 * - LAST MERGE: 108395.
13428 * - when returning vtypes in registers, generate IR and append it to the end of the
13429 * last bb instead of doing it in the epilog.
13430 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
13438 - When to decompose opcodes:
13439 - earlier: this makes some optimizations hard to implement, since the low level IR
13440 no longer contains the neccessary information. But it is easier to do.
13441 - later: harder to implement, enables more optimizations.
13442 - Branches inside bblocks:
13443 - created when decomposing complex opcodes.
13444 - branches to another bblock: harmless, but not tracked by the branch
13445 optimizations, so need to branch to a label at the start of the bblock.
13446 - branches to inside the same bblock: very problematic, trips up the local
13447 reg allocator. Can be fixed by spitting the current bblock, but that is a
13448 complex operation, since some local vregs can become global vregs etc.
13449 - Local/global vregs:
13450 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
13451 local register allocator.
13452 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
13453 structure, created by mono_create_var (). Assigned to hregs or the stack by
13454 the global register allocator.
13455 - When to do optimizations like alu->alu_imm:
13456 - earlier -> saves work later on since the IR will be smaller/simpler
13457 - later -> can work on more instructions
13458 - Handling of valuetypes:
13459 - When a vtype is pushed on the stack, a new temporary is created, an
13460 instruction computing its address (LDADDR) is emitted and pushed on
13461 the stack. Need to optimize cases when the vtype is used immediately as in
13462 argument passing, stloc etc.
13463 - Instead of the to_end stuff in the old JIT, simply call the function handling
13464 the values on the stack before emitting the last instruction of the bb.
13467 #endif /* DISABLE_JIT */