2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
30 #include <mono/utils/memcheck.h>
32 #include <mono/metadata/assembly.h>
33 #include <mono/metadata/attrdefs.h>
34 #include <mono/metadata/loader.h>
35 #include <mono/metadata/tabledefs.h>
36 #include <mono/metadata/class.h>
37 #include <mono/metadata/object.h>
38 #include <mono/metadata/exception.h>
39 #include <mono/metadata/opcodes.h>
40 #include <mono/metadata/mono-endian.h>
41 #include <mono/metadata/tokentype.h>
42 #include <mono/metadata/tabledefs.h>
43 #include <mono/metadata/marshal.h>
44 #include <mono/metadata/debug-helpers.h>
45 #include <mono/metadata/mono-debug.h>
46 #include <mono/metadata/gc-internal.h>
47 #include <mono/metadata/security-manager.h>
48 #include <mono/metadata/threads-types.h>
49 #include <mono/metadata/security-core-clr.h>
50 #include <mono/metadata/monitor.h>
51 #include <mono/metadata/profiler-private.h>
52 #include <mono/metadata/profiler.h>
53 #include <mono/utils/mono-compiler.h>
54 #include <mono/metadata/mono-basic-block.h>
61 #include "jit-icalls.h"
63 #include "debugger-agent.h"
65 #define BRANCH_COST 10
66 #define INLINE_LENGTH_LIMIT 20
67 #define INLINE_FAILURE do {\
68 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
71 #define CHECK_CFG_EXCEPTION do {\
72 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
75 #define METHOD_ACCESS_FAILURE do { \
76 char *method_fname = mono_method_full_name (method, TRUE); \
77 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
78 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS); \
79 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
80 g_free (method_fname); \
81 g_free (cil_method_fname); \
82 goto exception_exit; \
84 #define FIELD_ACCESS_FAILURE do { \
85 char *method_fname = mono_method_full_name (method, TRUE); \
86 char *field_fname = mono_field_full_name (field); \
87 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS); \
88 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
89 g_free (method_fname); \
90 g_free (field_fname); \
91 goto exception_exit; \
93 #define GENERIC_SHARING_FAILURE(opcode) do { \
94 if (cfg->generic_sharing_context) { \
95 if (cfg->verbose_level > 2) \
96 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
97 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
98 goto exception_exit; \
102 /* Determine whenever 'ins' represents a load of the 'this' argument */
103 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
105 static int ldind_to_load_membase (int opcode);
106 static int stind_to_store_membase (int opcode);
108 int mono_op_to_op_imm (int opcode);
109 int mono_op_to_op_imm_noemul (int opcode);
111 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
112 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
113 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
115 /* helper methods signatures */
116 static MonoMethodSignature *helper_sig_class_init_trampoline = NULL;
117 static MonoMethodSignature *helper_sig_domain_get = NULL;
118 static MonoMethodSignature *helper_sig_generic_class_init_trampoline = NULL;
119 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm = NULL;
120 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline = NULL;
121 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline = NULL;
122 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm = NULL;
125 * Instruction metadata
133 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
134 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
140 #if SIZEOF_REGISTER == 8
145 /* keep in sync with the enum in mini.h */
148 #include "mini-ops.h"
153 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
154 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
156 * This should contain the index of the last sreg + 1. This is not the same
157 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
159 const gint8 ins_sreg_counts[] = {
160 #include "mini-ops.h"
165 #define MONO_INIT_VARINFO(vi,id) do { \
166 (vi)->range.first_use.pos.bid = 0xffff; \
172 mono_inst_set_src_registers (MonoInst *ins, int *regs)
174 ins->sreg1 = regs [0];
175 ins->sreg2 = regs [1];
176 ins->sreg3 = regs [2];
180 mono_alloc_ireg (MonoCompile *cfg)
182 return alloc_ireg (cfg);
186 mono_alloc_freg (MonoCompile *cfg)
188 return alloc_freg (cfg);
192 mono_alloc_preg (MonoCompile *cfg)
194 return alloc_preg (cfg);
198 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
200 return alloc_dreg (cfg, stack_type);
204 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
210 switch (type->type) {
213 case MONO_TYPE_BOOLEAN:
225 case MONO_TYPE_FNPTR:
227 case MONO_TYPE_CLASS:
228 case MONO_TYPE_STRING:
229 case MONO_TYPE_OBJECT:
230 case MONO_TYPE_SZARRAY:
231 case MONO_TYPE_ARRAY:
235 #if SIZEOF_REGISTER == 8
244 case MONO_TYPE_VALUETYPE:
245 if (type->data.klass->enumtype) {
246 type = mono_class_enum_basetype (type->data.klass);
249 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
252 case MONO_TYPE_TYPEDBYREF:
254 case MONO_TYPE_GENERICINST:
255 type = &type->data.generic_class->container_class->byval_arg;
259 g_assert (cfg->generic_sharing_context);
262 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
268 mono_print_bb (MonoBasicBlock *bb, const char *msg)
273 printf ("\n%s %d: [IN: ", msg, bb->block_num);
274 for (i = 0; i < bb->in_count; ++i)
275 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
277 for (i = 0; i < bb->out_count; ++i)
278 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
280 for (tree = bb->code; tree; tree = tree->next)
281 mono_print_ins_index (-1, tree);
285 * Can't put this at the beginning, since other files reference stuff from this
290 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
292 #define LOAD_ERROR do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto load_error; } while (0)
294 #define GET_BBLOCK(cfg,tblock,ip) do { \
295 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
297 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
298 NEW_BBLOCK (cfg, (tblock)); \
299 (tblock)->cil_code = (ip); \
300 ADD_BBLOCK (cfg, (tblock)); \
304 #if defined(TARGET_X86) || defined(TARGET_AMD64)
305 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
306 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
307 (dest)->dreg = alloc_preg ((cfg)); \
308 (dest)->sreg1 = (sr1); \
309 (dest)->sreg2 = (sr2); \
310 (dest)->inst_imm = (imm); \
311 (dest)->backend.shift_amount = (shift); \
312 MONO_ADD_INS ((cfg)->cbb, (dest)); \
316 #if SIZEOF_REGISTER == 8
317 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
318 /* FIXME: Need to add many more cases */ \
319 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
321 int dr = alloc_preg (cfg); \
322 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
323 (ins)->sreg2 = widen->dreg; \
327 #define ADD_WIDEN_OP(ins, arg1, arg2)
330 #define ADD_BINOP(op) do { \
331 MONO_INST_NEW (cfg, ins, (op)); \
333 ins->sreg1 = sp [0]->dreg; \
334 ins->sreg2 = sp [1]->dreg; \
335 type_from_op (ins, sp [0], sp [1]); \
337 /* Have to insert a widening op */ \
338 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
339 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
340 MONO_ADD_INS ((cfg)->cbb, (ins)); \
341 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
344 #define ADD_UNOP(op) do { \
345 MONO_INST_NEW (cfg, ins, (op)); \
347 ins->sreg1 = sp [0]->dreg; \
348 type_from_op (ins, sp [0], NULL); \
350 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
351 MONO_ADD_INS ((cfg)->cbb, (ins)); \
352 *sp++ = mono_decompose_opcode (cfg, ins); \
355 #define ADD_BINCOND(next_block) do { \
358 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
359 cmp->sreg1 = sp [0]->dreg; \
360 cmp->sreg2 = sp [1]->dreg; \
361 type_from_op (cmp, sp [0], sp [1]); \
363 type_from_op (ins, sp [0], sp [1]); \
364 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
365 GET_BBLOCK (cfg, tblock, target); \
366 link_bblock (cfg, bblock, tblock); \
367 ins->inst_true_bb = tblock; \
368 if ((next_block)) { \
369 link_bblock (cfg, bblock, (next_block)); \
370 ins->inst_false_bb = (next_block); \
371 start_new_bblock = 1; \
373 GET_BBLOCK (cfg, tblock, ip); \
374 link_bblock (cfg, bblock, tblock); \
375 ins->inst_false_bb = tblock; \
376 start_new_bblock = 2; \
378 if (sp != stack_start) { \
379 handle_stack_args (cfg, stack_start, sp - stack_start); \
380 CHECK_UNVERIFIABLE (cfg); \
382 MONO_ADD_INS (bblock, cmp); \
383 MONO_ADD_INS (bblock, ins); \
387 * link_bblock: Links two basic blocks
389 * links two basic blocks in the control flow graph, the 'from'
390 * argument is the starting block and the 'to' argument is the block
391 * the control flow ends to after 'from'.
394 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
396 MonoBasicBlock **newa;
400 if (from->cil_code) {
402 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
404 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
407 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
409 printf ("edge from entry to exit\n");
414 for (i = 0; i < from->out_count; ++i) {
415 if (to == from->out_bb [i]) {
421 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
422 for (i = 0; i < from->out_count; ++i) {
423 newa [i] = from->out_bb [i];
431 for (i = 0; i < to->in_count; ++i) {
432 if (from == to->in_bb [i]) {
438 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
439 for (i = 0; i < to->in_count; ++i) {
440 newa [i] = to->in_bb [i];
449 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
451 link_bblock (cfg, from, to);
455 * mono_find_block_region:
457 * We mark each basic block with a region ID. We use that to avoid BB
458 * optimizations when blocks are in different regions.
461 * A region token that encodes where this region is, and information
462 * about the clause owner for this block.
464 * The region encodes the try/catch/filter clause that owns this block
465 * as well as the type. -1 is a special value that represents a block
466 * that is in none of try/catch/filter.
469 mono_find_block_region (MonoCompile *cfg, int offset)
471 MonoMethodHeader *header = cfg->header;
472 MonoExceptionClause *clause;
475 for (i = 0; i < header->num_clauses; ++i) {
476 clause = &header->clauses [i];
477 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
478 (offset < (clause->handler_offset)))
479 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
481 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
482 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
483 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
484 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
485 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
487 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
490 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
491 return ((i + 1) << 8) | clause->flags;
498 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
500 MonoMethodHeader *header = cfg->header;
501 MonoExceptionClause *clause;
505 for (i = 0; i < header->num_clauses; ++i) {
506 clause = &header->clauses [i];
507 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
508 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
509 if (clause->flags == type)
510 res = g_list_append (res, clause);
517 mono_create_spvar_for_region (MonoCompile *cfg, int region)
521 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
525 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
526 /* prevent it from being register allocated */
527 var->flags |= MONO_INST_INDIRECT;
529 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
533 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
535 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
539 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
543 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
547 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
548 /* prevent it from being register allocated */
549 var->flags |= MONO_INST_INDIRECT;
551 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
557 * Returns the type used in the eval stack when @type is loaded.
558 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
561 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
565 inst->klass = klass = mono_class_from_mono_type (type);
567 inst->type = STACK_MP;
572 switch (type->type) {
574 inst->type = STACK_INV;
578 case MONO_TYPE_BOOLEAN:
584 inst->type = STACK_I4;
589 case MONO_TYPE_FNPTR:
590 inst->type = STACK_PTR;
592 case MONO_TYPE_CLASS:
593 case MONO_TYPE_STRING:
594 case MONO_TYPE_OBJECT:
595 case MONO_TYPE_SZARRAY:
596 case MONO_TYPE_ARRAY:
597 inst->type = STACK_OBJ;
601 inst->type = STACK_I8;
605 inst->type = STACK_R8;
607 case MONO_TYPE_VALUETYPE:
608 if (type->data.klass->enumtype) {
609 type = mono_class_enum_basetype (type->data.klass);
613 inst->type = STACK_VTYPE;
616 case MONO_TYPE_TYPEDBYREF:
617 inst->klass = mono_defaults.typed_reference_class;
618 inst->type = STACK_VTYPE;
620 case MONO_TYPE_GENERICINST:
621 type = &type->data.generic_class->container_class->byval_arg;
624 case MONO_TYPE_MVAR :
625 /* FIXME: all the arguments must be references for now,
626 * later look inside cfg and see if the arg num is
629 g_assert (cfg->generic_sharing_context);
630 inst->type = STACK_OBJ;
633 g_error ("unknown type 0x%02x in eval stack type", type->type);
638 * The following tables are used to quickly validate the IL code in type_from_op ().
641 bin_num_table [STACK_MAX] [STACK_MAX] = {
642 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
643 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
644 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
645 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
646 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
647 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
648 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
649 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
654 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
657 /* reduce the size of this table */
659 bin_int_table [STACK_MAX] [STACK_MAX] = {
660 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
661 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
662 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
663 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
664 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
665 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
666 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
667 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
671 bin_comp_table [STACK_MAX] [STACK_MAX] = {
672 /* Inv i L p F & O vt */
674 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
675 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
676 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
677 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
678 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
679 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
680 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
683 /* reduce the size of this table */
685 shift_table [STACK_MAX] [STACK_MAX] = {
686 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
687 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
688 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
689 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
690 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
691 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
692 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
693 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
697 * Tables to map from the non-specific opcode to the matching
698 * type-specific opcode.
700 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
702 binops_op_map [STACK_MAX] = {
703 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
706 /* handles from CEE_NEG to CEE_CONV_U8 */
708 unops_op_map [STACK_MAX] = {
709 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
712 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
714 ovfops_op_map [STACK_MAX] = {
715 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
718 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
720 ovf2ops_op_map [STACK_MAX] = {
721 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
724 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
726 ovf3ops_op_map [STACK_MAX] = {
727 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
730 /* handles from CEE_BEQ to CEE_BLT_UN */
732 beqops_op_map [STACK_MAX] = {
733 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
736 /* handles from CEE_CEQ to CEE_CLT_UN */
738 ceqops_op_map [STACK_MAX] = {
739 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
743 * Sets ins->type (the type on the eval stack) according to the
744 * type of the opcode and the arguments to it.
745 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
747 * FIXME: this function sets ins->type unconditionally in some cases, but
748 * it should set it to invalid for some types (a conv.x on an object)
751 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
753 switch (ins->opcode) {
760 /* FIXME: check unverifiable args for STACK_MP */
761 ins->type = bin_num_table [src1->type] [src2->type];
762 ins->opcode += binops_op_map [ins->type];
769 ins->type = bin_int_table [src1->type] [src2->type];
770 ins->opcode += binops_op_map [ins->type];
775 ins->type = shift_table [src1->type] [src2->type];
776 ins->opcode += binops_op_map [ins->type];
781 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
782 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
783 ins->opcode = OP_LCOMPARE;
784 else if (src1->type == STACK_R8)
785 ins->opcode = OP_FCOMPARE;
787 ins->opcode = OP_ICOMPARE;
789 case OP_ICOMPARE_IMM:
790 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
791 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
792 ins->opcode = OP_LCOMPARE_IMM;
804 ins->opcode += beqops_op_map [src1->type];
807 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
808 ins->opcode += ceqops_op_map [src1->type];
814 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
815 ins->opcode += ceqops_op_map [src1->type];
819 ins->type = neg_table [src1->type];
820 ins->opcode += unops_op_map [ins->type];
823 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
824 ins->type = src1->type;
826 ins->type = STACK_INV;
827 ins->opcode += unops_op_map [ins->type];
833 ins->type = STACK_I4;
834 ins->opcode += unops_op_map [src1->type];
837 ins->type = STACK_R8;
838 switch (src1->type) {
841 ins->opcode = OP_ICONV_TO_R_UN;
844 ins->opcode = OP_LCONV_TO_R_UN;
848 case CEE_CONV_OVF_I1:
849 case CEE_CONV_OVF_U1:
850 case CEE_CONV_OVF_I2:
851 case CEE_CONV_OVF_U2:
852 case CEE_CONV_OVF_I4:
853 case CEE_CONV_OVF_U4:
854 ins->type = STACK_I4;
855 ins->opcode += ovf3ops_op_map [src1->type];
857 case CEE_CONV_OVF_I_UN:
858 case CEE_CONV_OVF_U_UN:
859 ins->type = STACK_PTR;
860 ins->opcode += ovf2ops_op_map [src1->type];
862 case CEE_CONV_OVF_I1_UN:
863 case CEE_CONV_OVF_I2_UN:
864 case CEE_CONV_OVF_I4_UN:
865 case CEE_CONV_OVF_U1_UN:
866 case CEE_CONV_OVF_U2_UN:
867 case CEE_CONV_OVF_U4_UN:
868 ins->type = STACK_I4;
869 ins->opcode += ovf2ops_op_map [src1->type];
872 ins->type = STACK_PTR;
873 switch (src1->type) {
875 ins->opcode = OP_ICONV_TO_U;
879 #if SIZEOF_REGISTER == 8
880 ins->opcode = OP_LCONV_TO_U;
882 ins->opcode = OP_MOVE;
886 ins->opcode = OP_LCONV_TO_U;
889 ins->opcode = OP_FCONV_TO_U;
895 ins->type = STACK_I8;
896 ins->opcode += unops_op_map [src1->type];
898 case CEE_CONV_OVF_I8:
899 case CEE_CONV_OVF_U8:
900 ins->type = STACK_I8;
901 ins->opcode += ovf3ops_op_map [src1->type];
903 case CEE_CONV_OVF_U8_UN:
904 case CEE_CONV_OVF_I8_UN:
905 ins->type = STACK_I8;
906 ins->opcode += ovf2ops_op_map [src1->type];
910 ins->type = STACK_R8;
911 ins->opcode += unops_op_map [src1->type];
914 ins->type = STACK_R8;
918 ins->type = STACK_I4;
919 ins->opcode += ovfops_op_map [src1->type];
924 ins->type = STACK_PTR;
925 ins->opcode += ovfops_op_map [src1->type];
933 ins->type = bin_num_table [src1->type] [src2->type];
934 ins->opcode += ovfops_op_map [src1->type];
935 if (ins->type == STACK_R8)
936 ins->type = STACK_INV;
938 case OP_LOAD_MEMBASE:
939 ins->type = STACK_PTR;
941 case OP_LOADI1_MEMBASE:
942 case OP_LOADU1_MEMBASE:
943 case OP_LOADI2_MEMBASE:
944 case OP_LOADU2_MEMBASE:
945 case OP_LOADI4_MEMBASE:
946 case OP_LOADU4_MEMBASE:
947 ins->type = STACK_PTR;
949 case OP_LOADI8_MEMBASE:
950 ins->type = STACK_I8;
952 case OP_LOADR4_MEMBASE:
953 case OP_LOADR8_MEMBASE:
954 ins->type = STACK_R8;
957 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
961 if (ins->type == STACK_MP)
962 ins->klass = mono_defaults.object_class;
967 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
973 param_table [STACK_MAX] [STACK_MAX] = {
978 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
982 switch (args->type) {
992 for (i = 0; i < sig->param_count; ++i) {
993 switch (args [i].type) {
997 if (!sig->params [i]->byref)
1001 if (sig->params [i]->byref)
1003 switch (sig->params [i]->type) {
1004 case MONO_TYPE_CLASS:
1005 case MONO_TYPE_STRING:
1006 case MONO_TYPE_OBJECT:
1007 case MONO_TYPE_SZARRAY:
1008 case MONO_TYPE_ARRAY:
1015 if (sig->params [i]->byref)
1017 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1026 /*if (!param_table [args [i].type] [sig->params [i]->type])
1034 * When we need a pointer to the current domain many times in a method, we
1035 * call mono_domain_get() once and we store the result in a local variable.
1036 * This function returns the variable that represents the MonoDomain*.
1038 inline static MonoInst *
1039 mono_get_domainvar (MonoCompile *cfg)
1041 if (!cfg->domainvar)
1042 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1043 return cfg->domainvar;
1047 * The got_var contains the address of the Global Offset Table when AOT
1051 mono_get_got_var (MonoCompile *cfg)
1053 #ifdef MONO_ARCH_NEED_GOT_VAR
1054 if (!cfg->compile_aot)
1056 if (!cfg->got_var) {
1057 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1059 return cfg->got_var;
1066 mono_get_vtable_var (MonoCompile *cfg)
1068 g_assert (cfg->generic_sharing_context);
1070 if (!cfg->rgctx_var) {
1071 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1072 /* force the var to be stack allocated */
1073 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1076 return cfg->rgctx_var;
1080 type_from_stack_type (MonoInst *ins) {
1081 switch (ins->type) {
1082 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1083 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1084 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1085 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1087 return &ins->klass->this_arg;
1088 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1089 case STACK_VTYPE: return &ins->klass->byval_arg;
1091 g_error ("stack type %d to monotype not handled\n", ins->type);
1096 static G_GNUC_UNUSED int
1097 type_to_stack_type (MonoType *t)
1099 t = mono_type_get_underlying_type (t);
1103 case MONO_TYPE_BOOLEAN:
1106 case MONO_TYPE_CHAR:
1113 case MONO_TYPE_FNPTR:
1115 case MONO_TYPE_CLASS:
1116 case MONO_TYPE_STRING:
1117 case MONO_TYPE_OBJECT:
1118 case MONO_TYPE_SZARRAY:
1119 case MONO_TYPE_ARRAY:
1127 case MONO_TYPE_VALUETYPE:
1128 case MONO_TYPE_TYPEDBYREF:
1130 case MONO_TYPE_GENERICINST:
1131 if (mono_type_generic_inst_is_valuetype (t))
1137 g_assert_not_reached ();
1144 array_access_to_klass (int opcode)
1148 return mono_defaults.byte_class;
1150 return mono_defaults.uint16_class;
1153 return mono_defaults.int_class;
1156 return mono_defaults.sbyte_class;
1159 return mono_defaults.int16_class;
1162 return mono_defaults.int32_class;
1164 return mono_defaults.uint32_class;
1167 return mono_defaults.int64_class;
1170 return mono_defaults.single_class;
1173 return mono_defaults.double_class;
1174 case CEE_LDELEM_REF:
1175 case CEE_STELEM_REF:
1176 return mono_defaults.object_class;
1178 g_assert_not_reached ();
1184 * We try to share variables when possible
1187 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1192 /* inlining can result in deeper stacks */
1193 if (slot >= cfg->header->max_stack)
1194 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1196 pos = ins->type - 1 + slot * STACK_MAX;
1198 switch (ins->type) {
1205 if ((vnum = cfg->intvars [pos]))
1206 return cfg->varinfo [vnum];
1207 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1208 cfg->intvars [pos] = res->inst_c0;
1211 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1217 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1220 * Don't use this if a generic_context is set, since that means AOT can't
1221 * look up the method using just the image+token.
1222 * table == 0 means this is a reference made from a wrapper.
1224 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1225 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1226 jump_info_token->image = image;
1227 jump_info_token->token = token;
1228 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1233 * This function is called to handle items that are left on the evaluation stack
1234 * at basic block boundaries. What happens is that we save the values to local variables
1235 * and we reload them later when first entering the target basic block (with the
1236 * handle_loaded_temps () function).
1237 * A single joint point will use the same variables (stored in the array bb->out_stack or
1238 * bb->in_stack, if the basic block is before or after the joint point).
1240 * This function needs to be called _before_ emitting the last instruction of
1241 * the bb (i.e. before emitting a branch).
1242 * If the stack merge fails at a join point, cfg->unverifiable is set.
1245 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1248 MonoBasicBlock *bb = cfg->cbb;
1249 MonoBasicBlock *outb;
1250 MonoInst *inst, **locals;
1255 if (cfg->verbose_level > 3)
1256 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1257 if (!bb->out_scount) {
1258 bb->out_scount = count;
1259 //printf ("bblock %d has out:", bb->block_num);
1261 for (i = 0; i < bb->out_count; ++i) {
1262 outb = bb->out_bb [i];
1263 /* exception handlers are linked, but they should not be considered for stack args */
1264 if (outb->flags & BB_EXCEPTION_HANDLER)
1266 //printf (" %d", outb->block_num);
1267 if (outb->in_stack) {
1269 bb->out_stack = outb->in_stack;
1275 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1276 for (i = 0; i < count; ++i) {
1278 * try to reuse temps already allocated for this purpouse, if they occupy the same
1279 * stack slot and if they are of the same type.
1280 * This won't cause conflicts since if 'local' is used to
1281 * store one of the values in the in_stack of a bblock, then
1282 * the same variable will be used for the same outgoing stack
1284 * This doesn't work when inlining methods, since the bblocks
1285 * in the inlined methods do not inherit their in_stack from
1286 * the bblock they are inlined to. See bug #58863 for an
1289 if (cfg->inlined_method)
1290 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1292 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1297 for (i = 0; i < bb->out_count; ++i) {
1298 outb = bb->out_bb [i];
1299 /* exception handlers are linked, but they should not be considered for stack args */
1300 if (outb->flags & BB_EXCEPTION_HANDLER)
1302 if (outb->in_scount) {
1303 if (outb->in_scount != bb->out_scount) {
1304 cfg->unverifiable = TRUE;
1307 continue; /* check they are the same locals */
1309 outb->in_scount = count;
1310 outb->in_stack = bb->out_stack;
1313 locals = bb->out_stack;
1315 for (i = 0; i < count; ++i) {
1316 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1317 inst->cil_code = sp [i]->cil_code;
1318 sp [i] = locals [i];
1319 if (cfg->verbose_level > 3)
1320 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1324 * It is possible that the out bblocks already have in_stack assigned, and
1325 * the in_stacks differ. In this case, we will store to all the different
1332 /* Find a bblock which has a different in_stack */
1334 while (bindex < bb->out_count) {
1335 outb = bb->out_bb [bindex];
1336 /* exception handlers are linked, but they should not be considered for stack args */
1337 if (outb->flags & BB_EXCEPTION_HANDLER) {
1341 if (outb->in_stack != locals) {
1342 for (i = 0; i < count; ++i) {
1343 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1344 inst->cil_code = sp [i]->cil_code;
1345 sp [i] = locals [i];
1346 if (cfg->verbose_level > 3)
1347 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1349 locals = outb->in_stack;
1358 /* Emit code which loads interface_offsets [klass->interface_id]
1359 * The array is stored in memory before vtable.
1362 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1364 if (cfg->compile_aot) {
1365 int ioffset_reg = alloc_preg (cfg);
1366 int iid_reg = alloc_preg (cfg);
1368 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1369 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1370 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1373 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1378 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1380 int ibitmap_reg = alloc_preg (cfg);
1381 #ifdef COMPRESSED_INTERFACE_BITMAP
1383 MonoInst *res, *ins;
1384 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1385 MONO_ADD_INS (cfg->cbb, ins);
1387 if (cfg->compile_aot)
1388 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1390 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1391 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1392 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1394 int ibitmap_byte_reg = alloc_preg (cfg);
1396 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1398 if (cfg->compile_aot) {
1399 int iid_reg = alloc_preg (cfg);
1400 int shifted_iid_reg = alloc_preg (cfg);
1401 int ibitmap_byte_address_reg = alloc_preg (cfg);
1402 int masked_iid_reg = alloc_preg (cfg);
1403 int iid_one_bit_reg = alloc_preg (cfg);
1404 int iid_bit_reg = alloc_preg (cfg);
1405 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1406 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1407 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1408 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1409 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1410 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1411 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1412 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1414 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1415 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1421 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1422 * stored in "klass_reg" implements the interface "klass".
1425 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1427 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1431 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1432 * stored in "vtable_reg" implements the interface "klass".
1435 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1437 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1441 * Emit code which checks whenever the interface id of @klass is smaller than
1442 * than the value given by max_iid_reg.
1445 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1446 MonoBasicBlock *false_target)
1448 if (cfg->compile_aot) {
1449 int iid_reg = alloc_preg (cfg);
1450 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1451 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1454 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1456 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1458 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1461 /* Same as above, but obtains max_iid from a vtable */
1463 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1464 MonoBasicBlock *false_target)
1466 int max_iid_reg = alloc_preg (cfg);
1468 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1469 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1472 /* Same as above, but obtains max_iid from a klass */
1474 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1475 MonoBasicBlock *false_target)
1477 int max_iid_reg = alloc_preg (cfg);
1479 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1480 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1484 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1486 int idepth_reg = alloc_preg (cfg);
1487 int stypes_reg = alloc_preg (cfg);
1488 int stype = alloc_preg (cfg);
1490 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1491 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1492 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1493 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1495 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1496 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1498 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1499 } else if (cfg->compile_aot) {
1500 int const_reg = alloc_preg (cfg);
1501 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1502 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1504 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1506 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1510 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1512 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1516 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1518 int intf_reg = alloc_preg (cfg);
1520 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1521 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1522 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1524 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1526 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1530 * Variant of the above that takes a register to the class, not the vtable.
1533 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1535 int intf_bit_reg = alloc_preg (cfg);
1537 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1538 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1539 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1541 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1543 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1547 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1550 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1551 } else if (cfg->compile_aot) {
1552 int const_reg = alloc_preg (cfg);
1553 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1554 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1556 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1558 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1562 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1564 return mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1568 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1570 if (cfg->compile_aot) {
1571 int const_reg = alloc_preg (cfg);
1572 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1573 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1575 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1577 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1581 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1584 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1587 int rank_reg = alloc_preg (cfg);
1588 int eclass_reg = alloc_preg (cfg);
1590 g_assert (!klass_inst);
1591 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1592 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1593 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1594 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1595 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1596 if (klass->cast_class == mono_defaults.object_class) {
1597 int parent_reg = alloc_preg (cfg);
1598 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1599 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1600 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1601 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1602 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1603 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1604 } else if (klass->cast_class == mono_defaults.enum_class) {
1605 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1606 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1607 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1609 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1610 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1613 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1614 /* Check that the object is a vector too */
1615 int bounds_reg = alloc_preg (cfg);
1616 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1617 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1618 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1621 int idepth_reg = alloc_preg (cfg);
1622 int stypes_reg = alloc_preg (cfg);
1623 int stype = alloc_preg (cfg);
1625 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1626 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1627 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1628 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1630 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1631 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1632 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1637 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1639 return mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1643 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1647 g_assert (val == 0);
1652 if ((size <= 4) && (size <= align)) {
1655 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1658 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1661 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1663 #if SIZEOF_REGISTER == 8
1665 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1671 val_reg = alloc_preg (cfg);
1673 if (SIZEOF_REGISTER == 8)
1674 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1676 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1679 /* This could be optimized further if neccesary */
1681 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1688 #if !NO_UNALIGNED_ACCESS
1689 if (SIZEOF_REGISTER == 8) {
1691 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1696 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1704 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1709 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1714 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1721 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1728 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1729 g_assert (size < 10000);
1732 /* This could be optimized further if neccesary */
1734 cur_reg = alloc_preg (cfg);
1735 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1736 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1743 #if !NO_UNALIGNED_ACCESS
1744 if (SIZEOF_REGISTER == 8) {
1746 cur_reg = alloc_preg (cfg);
1747 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1748 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1757 cur_reg = alloc_preg (cfg);
1758 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1759 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1765 cur_reg = alloc_preg (cfg);
1766 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1767 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1773 cur_reg = alloc_preg (cfg);
1774 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1775 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1783 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1786 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1789 type = mini_get_basic_type_from_generic (gsctx, type);
1790 switch (type->type) {
1791 case MONO_TYPE_VOID:
1792 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1795 case MONO_TYPE_BOOLEAN:
1798 case MONO_TYPE_CHAR:
1801 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1805 case MONO_TYPE_FNPTR:
1806 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1807 case MONO_TYPE_CLASS:
1808 case MONO_TYPE_STRING:
1809 case MONO_TYPE_OBJECT:
1810 case MONO_TYPE_SZARRAY:
1811 case MONO_TYPE_ARRAY:
1812 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1815 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1818 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1819 case MONO_TYPE_VALUETYPE:
1820 if (type->data.klass->enumtype) {
1821 type = mono_class_enum_basetype (type->data.klass);
1824 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1825 case MONO_TYPE_TYPEDBYREF:
1826 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1827 case MONO_TYPE_GENERICINST:
1828 type = &type->data.generic_class->container_class->byval_arg;
1831 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1837 * target_type_is_incompatible:
1838 * @cfg: MonoCompile context
1840 * Check that the item @arg on the evaluation stack can be stored
1841 * in the target type (can be a local, or field, etc).
1842 * The cfg arg can be used to check if we need verification or just
1845 * Returns: non-0 value if arg can't be stored on a target.
1848 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1850 MonoType *simple_type;
1853 if (target->byref) {
1854 /* FIXME: check that the pointed to types match */
1855 if (arg->type == STACK_MP)
1856 return arg->klass != mono_class_from_mono_type (target);
1857 if (arg->type == STACK_PTR)
1862 simple_type = mono_type_get_underlying_type (target);
1863 switch (simple_type->type) {
1864 case MONO_TYPE_VOID:
1868 case MONO_TYPE_BOOLEAN:
1871 case MONO_TYPE_CHAR:
1874 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1878 /* STACK_MP is needed when setting pinned locals */
1879 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1884 case MONO_TYPE_FNPTR:
1885 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1888 case MONO_TYPE_CLASS:
1889 case MONO_TYPE_STRING:
1890 case MONO_TYPE_OBJECT:
1891 case MONO_TYPE_SZARRAY:
1892 case MONO_TYPE_ARRAY:
1893 if (arg->type != STACK_OBJ)
1895 /* FIXME: check type compatibility */
1899 if (arg->type != STACK_I8)
1904 if (arg->type != STACK_R8)
1907 case MONO_TYPE_VALUETYPE:
1908 if (arg->type != STACK_VTYPE)
1910 klass = mono_class_from_mono_type (simple_type);
1911 if (klass != arg->klass)
1914 case MONO_TYPE_TYPEDBYREF:
1915 if (arg->type != STACK_VTYPE)
1917 klass = mono_class_from_mono_type (simple_type);
1918 if (klass != arg->klass)
1921 case MONO_TYPE_GENERICINST:
1922 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1923 if (arg->type != STACK_VTYPE)
1925 klass = mono_class_from_mono_type (simple_type);
1926 if (klass != arg->klass)
1930 if (arg->type != STACK_OBJ)
1932 /* FIXME: check type compatibility */
1936 case MONO_TYPE_MVAR:
1937 /* FIXME: all the arguments must be references for now,
1938 * later look inside cfg and see if the arg num is
1939 * really a reference
1941 g_assert (cfg->generic_sharing_context);
1942 if (arg->type != STACK_OBJ)
1946 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1952 * Prepare arguments for passing to a function call.
1953 * Return a non-zero value if the arguments can't be passed to the given
1955 * The type checks are not yet complete and some conversions may need
1956 * casts on 32 or 64 bit architectures.
1958 * FIXME: implement this using target_type_is_incompatible ()
1961 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1963 MonoType *simple_type;
1967 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1971 for (i = 0; i < sig->param_count; ++i) {
1972 if (sig->params [i]->byref) {
1973 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1977 simple_type = sig->params [i];
1978 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1980 switch (simple_type->type) {
1981 case MONO_TYPE_VOID:
1986 case MONO_TYPE_BOOLEAN:
1989 case MONO_TYPE_CHAR:
1992 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
1998 case MONO_TYPE_FNPTR:
1999 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2002 case MONO_TYPE_CLASS:
2003 case MONO_TYPE_STRING:
2004 case MONO_TYPE_OBJECT:
2005 case MONO_TYPE_SZARRAY:
2006 case MONO_TYPE_ARRAY:
2007 if (args [i]->type != STACK_OBJ)
2012 if (args [i]->type != STACK_I8)
2017 if (args [i]->type != STACK_R8)
2020 case MONO_TYPE_VALUETYPE:
2021 if (simple_type->data.klass->enumtype) {
2022 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2025 if (args [i]->type != STACK_VTYPE)
2028 case MONO_TYPE_TYPEDBYREF:
2029 if (args [i]->type != STACK_VTYPE)
2032 case MONO_TYPE_GENERICINST:
2033 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2037 g_error ("unknown type 0x%02x in check_call_signature",
2045 callvirt_to_call (int opcode)
2050 case OP_VOIDCALLVIRT:
2059 g_assert_not_reached ();
2066 callvirt_to_call_membase (int opcode)
2070 return OP_CALL_MEMBASE;
2071 case OP_VOIDCALLVIRT:
2072 return OP_VOIDCALL_MEMBASE;
2074 return OP_FCALL_MEMBASE;
2076 return OP_LCALL_MEMBASE;
2078 return OP_VCALL_MEMBASE;
2080 g_assert_not_reached ();
2086 #ifdef MONO_ARCH_HAVE_IMT
2088 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2092 if (COMPILE_LLVM (cfg)) {
2093 method_reg = alloc_preg (cfg);
2096 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2097 } else if (cfg->compile_aot) {
2098 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2101 MONO_INST_NEW (cfg, ins, OP_PCONST);
2102 ins->inst_p0 = call->method;
2103 ins->dreg = method_reg;
2104 MONO_ADD_INS (cfg->cbb, ins);
2108 call->imt_arg_reg = method_reg;
2110 #ifdef MONO_ARCH_IMT_REG
2111 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2113 /* Need this to keep the IMT arg alive */
2114 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2119 #ifdef MONO_ARCH_IMT_REG
2120 method_reg = alloc_preg (cfg);
2123 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2124 } else if (cfg->compile_aot) {
2125 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2128 MONO_INST_NEW (cfg, ins, OP_PCONST);
2129 ins->inst_p0 = call->method;
2130 ins->dreg = method_reg;
2131 MONO_ADD_INS (cfg->cbb, ins);
2134 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2136 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2141 static MonoJumpInfo *
2142 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2144 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2148 ji->data.target = target;
2153 inline static MonoCallInst *
2154 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2155 MonoInst **args, int calli, int virtual, int tail)
2158 #ifdef MONO_ARCH_SOFT_FLOAT
2163 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2165 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2168 call->signature = sig;
2170 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2173 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2174 call->vret_var = cfg->vret_addr;
2175 //g_assert_not_reached ();
2177 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2178 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2181 temp->backend.is_pinvoke = sig->pinvoke;
2184 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2185 * address of return value to increase optimization opportunities.
2186 * Before vtype decomposition, the dreg of the call ins itself represents the
2187 * fact the call modifies the return value. After decomposition, the call will
2188 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2189 * will be transformed into an LDADDR.
2191 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2192 loada->dreg = alloc_preg (cfg);
2193 loada->inst_p0 = temp;
2194 /* We reference the call too since call->dreg could change during optimization */
2195 loada->inst_p1 = call;
2196 MONO_ADD_INS (cfg->cbb, loada);
2198 call->inst.dreg = temp->dreg;
2200 call->vret_var = loada;
2201 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2202 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2204 #ifdef MONO_ARCH_SOFT_FLOAT
2205 if (COMPILE_SOFT_FLOAT (cfg)) {
2207 * If the call has a float argument, we would need to do an r8->r4 conversion using
2208 * an icall, but that cannot be done during the call sequence since it would clobber
2209 * the call registers + the stack. So we do it before emitting the call.
2211 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2213 MonoInst *in = call->args [i];
2215 if (i >= sig->hasthis)
2216 t = sig->params [i - sig->hasthis];
2218 t = &mono_defaults.int_class->byval_arg;
2219 t = mono_type_get_underlying_type (t);
2221 if (!t->byref && t->type == MONO_TYPE_R4) {
2222 MonoInst *iargs [1];
2226 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2228 /* The result will be in an int vreg */
2229 call->args [i] = conv;
2236 if (COMPILE_LLVM (cfg))
2237 mono_llvm_emit_call (cfg, call);
2239 mono_arch_emit_call (cfg, call);
2241 mono_arch_emit_call (cfg, call);
2244 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2245 cfg->flags |= MONO_CFG_HAS_CALLS;
2250 inline static MonoInst*
2251 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2253 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE);
2255 call->inst.sreg1 = addr->dreg;
2257 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2259 return (MonoInst*)call;
2263 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2265 #ifdef MONO_ARCH_RGCTX_REG
2266 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2267 cfg->uses_rgctx_reg = TRUE;
2268 call->rgctx_reg = TRUE;
2270 call->rgctx_arg_reg = rgctx_reg;
2277 inline static MonoInst*
2278 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2284 rgctx_reg = mono_alloc_preg (cfg);
2285 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2287 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2289 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2290 return (MonoInst*)call;
2294 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, int rgctx_type);
2296 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, int rgctx_type);
2299 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2300 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2302 gboolean might_be_remote;
2303 gboolean virtual = this != NULL;
2304 gboolean enable_for_aot = TRUE;
2308 if (method->string_ctor) {
2309 /* Create the real signature */
2310 /* FIXME: Cache these */
2311 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2312 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2317 might_be_remote = this && sig->hasthis &&
2318 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2319 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this);
2321 context_used = mono_method_check_context_used (method);
2322 if (might_be_remote && context_used) {
2325 g_assert (cfg->generic_sharing_context);
2327 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2329 return mono_emit_calli (cfg, sig, args, addr);
2332 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE);
2334 if (might_be_remote)
2335 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2337 call->method = method;
2338 call->inst.flags |= MONO_INST_HAS_METHOD;
2339 call->inst.inst_left = this;
2342 int vtable_reg, slot_reg, this_reg;
2344 this_reg = this->dreg;
2346 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2347 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2348 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2350 /* Make a call to delegate->invoke_impl */
2351 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2352 call->inst.inst_basereg = this_reg;
2353 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2354 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2356 return (MonoInst*)call;
2360 if ((!cfg->compile_aot || enable_for_aot) &&
2361 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2362 (MONO_METHOD_IS_FINAL (method) &&
2363 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2364 !(method->klass->marshalbyref && context_used)) {
2366 * the method is not virtual, we just need to ensure this is not null
2367 * and then we can call the method directly.
2369 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2371 * The check above ensures method is not gshared, this is needed since
2372 * gshared methods can't have wrappers.
2374 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2377 if (!method->string_ctor)
2378 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2380 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2382 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2384 return (MonoInst*)call;
2387 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2389 * the method is virtual, but we can statically dispatch since either
2390 * it's class or the method itself are sealed.
2391 * But first we need to ensure it's not a null reference.
2393 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2395 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2396 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2398 return (MonoInst*)call;
2401 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2403 vtable_reg = alloc_preg (cfg);
2404 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2405 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2407 #ifdef MONO_ARCH_HAVE_IMT
2409 guint32 imt_slot = mono_method_get_imt_slot (method);
2410 emit_imt_argument (cfg, call, imt_arg);
2411 slot_reg = vtable_reg;
2412 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2415 if (slot_reg == -1) {
2416 slot_reg = alloc_preg (cfg);
2417 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2418 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2421 slot_reg = vtable_reg;
2422 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2423 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2424 #ifdef MONO_ARCH_HAVE_IMT
2426 g_assert (mono_method_signature (method)->generic_param_count);
2427 emit_imt_argument (cfg, call, imt_arg);
2432 call->inst.sreg1 = slot_reg;
2433 call->virtual = TRUE;
2436 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2438 return (MonoInst*)call;
2442 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2443 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2450 rgctx_reg = mono_alloc_preg (cfg);
2451 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2453 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2455 call = (MonoCallInst*)ins;
2457 set_rgctx_arg (cfg, call, rgctx_reg, vtable_arg);
2463 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2465 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2469 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2476 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE);
2479 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2481 return (MonoInst*)call;
2485 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2487 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2491 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2495 * mono_emit_abs_call:
2497 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2499 inline static MonoInst*
2500 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2501 MonoMethodSignature *sig, MonoInst **args)
2503 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2507 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2510 if (cfg->abs_patches == NULL)
2511 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2512 g_hash_table_insert (cfg->abs_patches, ji, ji);
2513 ins = mono_emit_native_call (cfg, ji, sig, args);
2514 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2519 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2521 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2522 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2526 * Native code might return non register sized integers
2527 * without initializing the upper bits.
2529 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2530 case OP_LOADI1_MEMBASE:
2531 widen_op = OP_ICONV_TO_I1;
2533 case OP_LOADU1_MEMBASE:
2534 widen_op = OP_ICONV_TO_U1;
2536 case OP_LOADI2_MEMBASE:
2537 widen_op = OP_ICONV_TO_I2;
2539 case OP_LOADU2_MEMBASE:
2540 widen_op = OP_ICONV_TO_U2;
2546 if (widen_op != -1) {
2547 int dreg = alloc_preg (cfg);
2550 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2551 widen->type = ins->type;
2561 get_memcpy_method (void)
2563 static MonoMethod *memcpy_method = NULL;
2564 if (!memcpy_method) {
2565 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2567 g_error ("Old corlib found. Install a new one");
2569 return memcpy_method;
2573 create_write_barrier_bitmap (MonoClass *klass, unsigned *wb_bitmap, int offset)
2575 MonoClassField *field;
2576 gpointer iter = NULL;
2578 while ((field = mono_class_get_fields (klass, &iter))) {
2581 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2583 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2584 if (mono_type_is_reference (field->type)) {
2585 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2586 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2588 /*FIXME support nested value types so this works for: struct X { Y y; int z;} struct Y { object a,b; }*/
2589 MonoClass *field_class = mono_class_from_mono_type (field->type);
2590 if (field_class->has_references)
2591 create_write_barrier_bitmap (field_class, wb_bitmap, offset + foffset);
2597 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value, int value_reg)
2600 int card_table_shift_bits;
2601 gpointer card_table_mask;
2602 guint8 *card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2603 MonoInst *dummy_use;
2605 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
2606 int nursery_shift_bits;
2607 size_t nursery_size;
2609 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2611 if (!cfg->compile_aot && card_table && nursery_shift_bits > 0) {
2614 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2615 wbarrier->sreg1 = ptr->dreg;
2617 wbarrier->sreg2 = value->dreg;
2619 wbarrier->sreg2 = value_reg;
2620 MONO_ADD_INS (cfg->cbb, wbarrier);
2624 int offset_reg = alloc_preg (cfg);
2625 int card_reg = alloc_preg (cfg);
2628 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2629 if (card_table_mask)
2630 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
2632 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2633 * IMM's larger than 32bits.
2635 if (cfg->compile_aot) {
2636 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
2638 MONO_INST_NEW (cfg, ins, OP_PCONST);
2639 ins->inst_p0 = card_table;
2640 ins->dreg = card_reg;
2641 MONO_ADD_INS (cfg->cbb, ins);
2644 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
2645 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
2647 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2648 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
2652 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
2654 MONO_INST_NEW (cfg, dummy_use, OP_DUMMY_USE);
2655 dummy_use->sreg1 = value_reg;
2656 MONO_ADD_INS (cfg->cbb, dummy_use);
2662 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
2664 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
2665 unsigned need_wb = 0;
2670 /*types with references can't have alignment smaller than sizeof(void*) */
2671 if (align < SIZEOF_VOID_P)
2674 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
2675 if (size > 32 * SIZEOF_VOID_P)
2678 create_write_barrier_bitmap (klass, &need_wb, 0);
2680 /* We don't unroll more than 5 stores to avoid code bloat. */
2681 if (size > 5 * SIZEOF_VOID_P) {
2682 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
2683 size += (SIZEOF_VOID_P - 1);
2684 size &= ~(SIZEOF_VOID_P - 1);
2686 EMIT_NEW_ICONST (cfg, iargs [2], size);
2687 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
2688 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
2692 destreg = iargs [0]->dreg;
2693 srcreg = iargs [1]->dreg;
2696 dest_ptr_reg = alloc_preg (cfg);
2697 tmp_reg = alloc_preg (cfg);
2700 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
2702 while (size >= SIZEOF_VOID_P) {
2703 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, tmp_reg, srcreg, offset);
2704 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
2707 emit_write_barrier (cfg, iargs [0], NULL, tmp_reg);
2709 offset += SIZEOF_VOID_P;
2710 size -= SIZEOF_VOID_P;
2713 /*tmp += sizeof (void*)*/
2714 if (size >= SIZEOF_VOID_P) {
2715 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
2716 MONO_ADD_INS (cfg->cbb, iargs [0]);
2720 /* Those cannot be references since size < sizeof (void*) */
2722 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
2723 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
2729 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
2730 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
2736 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
2737 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
2746 * Emit code to copy a valuetype of type @klass whose address is stored in
2747 * @src->dreg to memory whose address is stored at @dest->dreg.
2750 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2752 MonoInst *iargs [4];
2755 MonoMethod *memcpy_method;
2759 * This check breaks with spilled vars... need to handle it during verification anyway.
2760 * g_assert (klass && klass == src->klass && klass == dest->klass);
2764 n = mono_class_native_size (klass, &align);
2766 n = mono_class_value_size (klass, &align);
2768 /* if native is true there should be no references in the struct */
2769 if (cfg->gen_write_barriers && klass->has_references && !native) {
2770 /* Avoid barriers when storing to the stack */
2771 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2772 (dest->opcode == OP_LDADDR))) {
2773 int context_used = 0;
2778 if (cfg->generic_sharing_context)
2779 context_used = mono_class_check_context_used (klass);
2781 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
2782 if ((cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
2784 } else if (context_used) {
2785 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2787 if (cfg->compile_aot) {
2788 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2790 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2791 mono_class_compute_gc_descriptor (klass);
2795 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2800 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2801 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2802 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2806 EMIT_NEW_ICONST (cfg, iargs [2], n);
2808 memcpy_method = get_memcpy_method ();
2809 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2814 get_memset_method (void)
2816 static MonoMethod *memset_method = NULL;
2817 if (!memset_method) {
2818 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2820 g_error ("Old corlib found. Install a new one");
2822 return memset_method;
2826 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2828 MonoInst *iargs [3];
2831 MonoMethod *memset_method;
2833 /* FIXME: Optimize this for the case when dest is an LDADDR */
2835 mono_class_init (klass);
2836 n = mono_class_value_size (klass, &align);
2838 if (n <= sizeof (gpointer) * 5) {
2839 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2842 memset_method = get_memset_method ();
2844 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2845 EMIT_NEW_ICONST (cfg, iargs [2], n);
2846 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2851 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2853 MonoInst *this = NULL;
2855 g_assert (cfg->generic_sharing_context);
2857 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2858 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2859 !method->klass->valuetype)
2860 EMIT_NEW_ARGLOAD (cfg, this, 0);
2862 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2863 MonoInst *mrgctx_loc, *mrgctx_var;
2866 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2868 mrgctx_loc = mono_get_vtable_var (cfg);
2869 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2872 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2873 MonoInst *vtable_loc, *vtable_var;
2877 vtable_loc = mono_get_vtable_var (cfg);
2878 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2880 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2881 MonoInst *mrgctx_var = vtable_var;
2884 vtable_reg = alloc_preg (cfg);
2885 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2886 vtable_var->type = STACK_PTR;
2892 int vtable_reg, res_reg;
2894 vtable_reg = alloc_preg (cfg);
2895 res_reg = alloc_preg (cfg);
2896 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2901 static MonoJumpInfoRgctxEntry *
2902 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2904 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2905 res->method = method;
2906 res->in_mrgctx = in_mrgctx;
2907 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2908 res->data->type = patch_type;
2909 res->data->data.target = patch_data;
2910 res->info_type = info_type;
2915 static inline MonoInst*
2916 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2918 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2922 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2923 MonoClass *klass, int rgctx_type)
2925 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2926 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2928 return emit_rgctx_fetch (cfg, rgctx, entry);
2932 * emit_get_rgctx_method:
2934 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2935 * normal constants, else emit a load from the rgctx.
2938 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2939 MonoMethod *cmethod, int rgctx_type)
2941 if (!context_used) {
2944 switch (rgctx_type) {
2945 case MONO_RGCTX_INFO_METHOD:
2946 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
2948 case MONO_RGCTX_INFO_METHOD_RGCTX:
2949 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
2952 g_assert_not_reached ();
2955 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2956 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2958 return emit_rgctx_fetch (cfg, rgctx, entry);
2963 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2964 MonoClassField *field, int rgctx_type)
2966 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2967 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2969 return emit_rgctx_fetch (cfg, rgctx, entry);
2973 * On return the caller must check @klass for load errors.
2976 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
2978 MonoInst *vtable_arg;
2980 int context_used = 0;
2982 if (cfg->generic_sharing_context)
2983 context_used = mono_class_check_context_used (klass);
2986 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
2987 klass, MONO_RGCTX_INFO_VTABLE);
2989 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2993 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
2996 if (COMPILE_LLVM (cfg))
2997 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
2999 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3000 #ifdef MONO_ARCH_VTABLE_REG
3001 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3002 cfg->uses_vtable_reg = TRUE;
3009 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
3011 if (mini_get_debug_options ()->better_cast_details) {
3012 int to_klass_reg = alloc_preg (cfg);
3013 int vtable_reg = alloc_preg (cfg);
3014 int klass_reg = alloc_preg (cfg);
3015 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3018 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3022 MONO_ADD_INS (cfg->cbb, tls_get);
3023 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3024 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3026 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3027 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3028 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3033 reset_cast_details (MonoCompile *cfg)
3035 /* Reset the variables holding the cast details */
3036 if (mini_get_debug_options ()->better_cast_details) {
3037 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3039 MONO_ADD_INS (cfg->cbb, tls_get);
3040 /* It is enough to reset the from field */
3041 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3046 * On return the caller must check @array_class for load errors
3049 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3051 int vtable_reg = alloc_preg (cfg);
3052 int context_used = 0;
3054 if (cfg->generic_sharing_context)
3055 context_used = mono_class_check_context_used (array_class);
3057 save_cast_details (cfg, array_class, obj->dreg);
3059 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3061 if (cfg->opt & MONO_OPT_SHARED) {
3062 int class_reg = alloc_preg (cfg);
3063 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3064 if (cfg->compile_aot) {
3065 int klass_reg = alloc_preg (cfg);
3066 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3067 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3069 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3071 } else if (context_used) {
3072 MonoInst *vtable_ins;
3074 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3075 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3077 if (cfg->compile_aot) {
3081 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3083 vt_reg = alloc_preg (cfg);
3084 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3085 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3088 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3090 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3094 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3096 reset_cast_details (cfg);
3100 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3101 * generic code is generated.
3104 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3106 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3109 MonoInst *rgctx, *addr;
3111 /* FIXME: What if the class is shared? We might not
3112 have to get the address of the method from the
3114 addr = emit_get_rgctx_method (cfg, context_used, method,
3115 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3117 rgctx = emit_get_rgctx (cfg, method, context_used);
3119 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3121 return mono_emit_method_call (cfg, method, &val, NULL);
3126 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3130 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3131 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3132 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3133 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3135 obj_reg = sp [0]->dreg;
3136 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3137 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3139 /* FIXME: generics */
3140 g_assert (klass->rank == 0);
3143 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3144 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3146 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3147 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3150 MonoInst *element_class;
3152 /* This assertion is from the unboxcast insn */
3153 g_assert (klass->rank == 0);
3155 element_class = emit_get_rgctx_klass (cfg, context_used,
3156 klass->element_class, MONO_RGCTX_INFO_KLASS);
3158 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3159 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3161 save_cast_details (cfg, klass->element_class, obj_reg);
3162 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3163 reset_cast_details (cfg);
3166 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
3167 MONO_ADD_INS (cfg->cbb, add);
3168 add->type = STACK_MP;
3175 * Returns NULL and set the cfg exception on error.
3178 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3180 MonoInst *iargs [2];
3186 MonoInst *iargs [2];
3189 FIXME: we cannot get managed_alloc here because we can't get
3190 the class's vtable (because it's not a closed class)
3192 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3193 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3196 if (cfg->opt & MONO_OPT_SHARED)
3197 rgctx_info = MONO_RGCTX_INFO_KLASS;
3199 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3200 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3202 if (cfg->opt & MONO_OPT_SHARED) {
3203 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3205 alloc_ftn = mono_object_new;
3208 alloc_ftn = mono_object_new_specific;
3211 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3214 if (cfg->opt & MONO_OPT_SHARED) {
3215 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3216 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3218 alloc_ftn = mono_object_new;
3219 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3220 /* This happens often in argument checking code, eg. throw new FooException... */
3221 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3222 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3223 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3225 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3226 MonoMethod *managed_alloc = NULL;
3230 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3231 cfg->exception_ptr = klass;
3235 #ifndef MONO_CROSS_COMPILE
3236 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3239 if (managed_alloc) {
3240 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3241 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3243 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3245 guint32 lw = vtable->klass->instance_size;
3246 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3247 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3248 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3251 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3255 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3259 * Returns NULL and set the cfg exception on error.
3262 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
3264 MonoInst *alloc, *ins;
3266 if (mono_class_is_nullable (klass)) {
3267 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3270 /* FIXME: What if the class is shared? We might not
3271 have to get the method address from the RGCTX. */
3272 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3273 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3274 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3276 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3278 return mono_emit_method_call (cfg, method, &val, NULL);
3282 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3286 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3293 mini_class_has_reference_variant_generic_argument (MonoClass *klass, int context_used)
3296 MonoGenericContainer *container;
3297 MonoGenericInst *ginst;
3299 if (klass->generic_class) {
3300 container = klass->generic_class->container_class->generic_container;
3301 ginst = klass->generic_class->context.class_inst;
3302 } else if (klass->generic_container && context_used) {
3303 container = klass->generic_container;
3304 ginst = container->context.class_inst;
3309 for (i = 0; i < container->type_argc; ++i) {
3311 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
3313 type = ginst->type_argv [i];
3314 if (MONO_TYPE_IS_REFERENCE (type))
3317 if (context_used && (type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR))
3323 // FIXME: This doesn't work yet (class libs tests fail?)
3324 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3327 * Returns NULL and set the cfg exception on error.
3330 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3332 MonoBasicBlock *is_null_bb;
3333 int obj_reg = src->dreg;
3334 int vtable_reg = alloc_preg (cfg);
3335 MonoInst *klass_inst = NULL;
3340 if(mini_class_has_reference_variant_generic_argument (klass, context_used)) {
3341 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
3342 MonoInst *cache_ins;
3344 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3349 /* klass - it's the second element of the cache entry*/
3350 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3353 args [2] = cache_ins;
3355 return mono_emit_method_call (cfg, mono_castclass, args, NULL);
3358 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3360 if (is_complex_isinst (klass)) {
3361 /* Complex case, handle by an icall */
3367 args [1] = klass_inst;
3369 return mono_emit_jit_icall (cfg, mono_object_castclass, args);
3371 /* Simple case, handled by the code below */
3375 NEW_BBLOCK (cfg, is_null_bb);
3377 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3378 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3380 save_cast_details (cfg, klass, obj_reg);
3382 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3383 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3384 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3386 int klass_reg = alloc_preg (cfg);
3388 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3390 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3391 /* the remoting code is broken, access the class for now */
3392 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3393 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3395 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3396 cfg->exception_ptr = klass;
3399 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3401 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3402 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3404 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3406 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3407 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3411 MONO_START_BB (cfg, is_null_bb);
3413 reset_cast_details (cfg);
3419 * Returns NULL and set the cfg exception on error.
3422 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3425 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3426 int obj_reg = src->dreg;
3427 int vtable_reg = alloc_preg (cfg);
3428 int res_reg = alloc_preg (cfg);
3429 MonoInst *klass_inst = NULL;
3434 if(mini_class_has_reference_variant_generic_argument (klass, context_used)) {
3435 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
3436 MonoInst *cache_ins;
3438 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3443 /* klass - it's the second element of the cache entry*/
3444 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3447 args [2] = cache_ins;
3449 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
3452 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3454 if (is_complex_isinst (klass)) {
3455 /* Complex case, handle by an icall */
3461 args [1] = klass_inst;
3463 return mono_emit_jit_icall (cfg, mono_object_isinst, args);
3465 /* Simple case, the code below can handle it */
3469 NEW_BBLOCK (cfg, is_null_bb);
3470 NEW_BBLOCK (cfg, false_bb);
3471 NEW_BBLOCK (cfg, end_bb);
3473 /* Do the assignment at the beginning, so the other assignment can be if converted */
3474 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3475 ins->type = STACK_OBJ;
3478 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3479 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3481 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3483 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3484 g_assert (!context_used);
3485 /* the is_null_bb target simply copies the input register to the output */
3486 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3488 int klass_reg = alloc_preg (cfg);
3491 int rank_reg = alloc_preg (cfg);
3492 int eclass_reg = alloc_preg (cfg);
3494 g_assert (!context_used);
3495 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3496 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3497 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3498 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3499 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3500 if (klass->cast_class == mono_defaults.object_class) {
3501 int parent_reg = alloc_preg (cfg);
3502 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3503 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3504 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3505 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3506 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3507 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3508 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3509 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3510 } else if (klass->cast_class == mono_defaults.enum_class) {
3511 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3512 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3513 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3514 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3516 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3517 /* Check that the object is a vector too */
3518 int bounds_reg = alloc_preg (cfg);
3519 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3520 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3521 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3524 /* the is_null_bb target simply copies the input register to the output */
3525 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3527 } else if (mono_class_is_nullable (klass)) {
3528 g_assert (!context_used);
3529 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3530 /* the is_null_bb target simply copies the input register to the output */
3531 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3533 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3534 g_assert (!context_used);
3535 /* the remoting code is broken, access the class for now */
3536 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3537 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3539 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3540 cfg->exception_ptr = klass;
3543 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3545 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3546 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3548 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3549 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3551 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3552 /* the is_null_bb target simply copies the input register to the output */
3553 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
3558 MONO_START_BB (cfg, false_bb);
3560 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3561 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3563 MONO_START_BB (cfg, is_null_bb);
3565 MONO_START_BB (cfg, end_bb);
3571 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3573 /* This opcode takes as input an object reference and a class, and returns:
3574 0) if the object is an instance of the class,
3575 1) if the object is not instance of the class,
3576 2) if the object is a proxy whose type cannot be determined */
3579 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3580 int obj_reg = src->dreg;
3581 int dreg = alloc_ireg (cfg);
3583 int klass_reg = alloc_preg (cfg);
3585 NEW_BBLOCK (cfg, true_bb);
3586 NEW_BBLOCK (cfg, false_bb);
3587 NEW_BBLOCK (cfg, false2_bb);
3588 NEW_BBLOCK (cfg, end_bb);
3589 NEW_BBLOCK (cfg, no_proxy_bb);
3591 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3592 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3594 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3595 NEW_BBLOCK (cfg, interface_fail_bb);
3597 tmp_reg = alloc_preg (cfg);
3598 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3599 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3600 MONO_START_BB (cfg, interface_fail_bb);
3601 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3603 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3605 tmp_reg = alloc_preg (cfg);
3606 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3607 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3608 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3610 tmp_reg = alloc_preg (cfg);
3611 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3612 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3614 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3615 tmp_reg = alloc_preg (cfg);
3616 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3617 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3619 tmp_reg = alloc_preg (cfg);
3620 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3621 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3622 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3624 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3625 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3627 MONO_START_BB (cfg, no_proxy_bb);
3629 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3632 MONO_START_BB (cfg, false_bb);
3634 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3635 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3637 MONO_START_BB (cfg, false2_bb);
3639 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3640 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3642 MONO_START_BB (cfg, true_bb);
3644 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3646 MONO_START_BB (cfg, end_bb);
3649 MONO_INST_NEW (cfg, ins, OP_ICONST);
3651 ins->type = STACK_I4;
3657 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3659 /* This opcode takes as input an object reference and a class, and returns:
3660 0) if the object is an instance of the class,
3661 1) if the object is a proxy whose type cannot be determined
3662 an InvalidCastException exception is thrown otherwhise*/
3665 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3666 int obj_reg = src->dreg;
3667 int dreg = alloc_ireg (cfg);
3668 int tmp_reg = alloc_preg (cfg);
3669 int klass_reg = alloc_preg (cfg);
3671 NEW_BBLOCK (cfg, end_bb);
3672 NEW_BBLOCK (cfg, ok_result_bb);
3674 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3675 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3677 save_cast_details (cfg, klass, obj_reg);
3679 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3680 NEW_BBLOCK (cfg, interface_fail_bb);
3682 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3683 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3684 MONO_START_BB (cfg, interface_fail_bb);
3685 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3687 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3689 tmp_reg = alloc_preg (cfg);
3690 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3691 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3692 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3694 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3695 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3698 NEW_BBLOCK (cfg, no_proxy_bb);
3700 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3701 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3702 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3704 tmp_reg = alloc_preg (cfg);
3705 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3706 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3708 tmp_reg = alloc_preg (cfg);
3709 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3710 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3711 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3713 NEW_BBLOCK (cfg, fail_1_bb);
3715 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3717 MONO_START_BB (cfg, fail_1_bb);
3719 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3720 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3722 MONO_START_BB (cfg, no_proxy_bb);
3724 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3727 MONO_START_BB (cfg, ok_result_bb);
3729 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3731 MONO_START_BB (cfg, end_bb);
3734 MONO_INST_NEW (cfg, ins, OP_ICONST);
3736 ins->type = STACK_I4;
3742 * Returns NULL and set the cfg exception on error.
3744 static G_GNUC_UNUSED MonoInst*
3745 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3749 gpointer *trampoline;
3750 MonoInst *obj, *method_ins, *tramp_ins;
3754 obj = handle_alloc (cfg, klass, FALSE, 0);
3758 /* Inline the contents of mono_delegate_ctor */
3760 /* Set target field */
3761 /* Optimize away setting of NULL target */
3762 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
3763 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3764 if (cfg->gen_write_barriers) {
3765 dreg = alloc_preg (cfg);
3766 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target));
3767 emit_write_barrier (cfg, ptr, target, 0);
3771 /* Set method field */
3772 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3773 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3774 if (cfg->gen_write_barriers) {
3775 dreg = alloc_preg (cfg);
3776 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method));
3777 emit_write_barrier (cfg, ptr, method_ins, 0);
3780 * To avoid looking up the compiled code belonging to the target method
3781 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3782 * store it, and we fill it after the method has been compiled.
3784 if (!cfg->compile_aot && !method->dynamic) {
3785 MonoInst *code_slot_ins;
3788 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3790 domain = mono_domain_get ();
3791 mono_domain_lock (domain);
3792 if (!domain_jit_info (domain)->method_code_hash)
3793 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3794 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3796 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3797 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3799 mono_domain_unlock (domain);
3801 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3803 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3806 /* Set invoke_impl field */
3807 if (cfg->compile_aot) {
3808 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3810 trampoline = mono_create_delegate_trampoline (klass);
3811 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3813 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3815 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3821 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3823 MonoJitICallInfo *info;
3825 /* Need to register the icall so it gets an icall wrapper */
3826 info = mono_get_array_new_va_icall (rank);
3828 cfg->flags |= MONO_CFG_HAS_VARARGS;
3830 /* mono_array_new_va () needs a vararg calling convention */
3831 cfg->disable_llvm = TRUE;
3833 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3834 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3838 mono_emit_load_got_addr (MonoCompile *cfg)
3840 MonoInst *getaddr, *dummy_use;
3842 if (!cfg->got_var || cfg->got_var_allocated)
3845 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3846 getaddr->dreg = cfg->got_var->dreg;
3848 /* Add it to the start of the first bblock */
3849 if (cfg->bb_entry->code) {
3850 getaddr->next = cfg->bb_entry->code;
3851 cfg->bb_entry->code = getaddr;
3854 MONO_ADD_INS (cfg->bb_entry, getaddr);
3856 cfg->got_var_allocated = TRUE;
3859 * Add a dummy use to keep the got_var alive, since real uses might
3860 * only be generated by the back ends.
3861 * Add it to end_bblock, so the variable's lifetime covers the whole
3863 * It would be better to make the usage of the got var explicit in all
3864 * cases when the backend needs it (i.e. calls, throw etc.), so this
3865 * wouldn't be needed.
3867 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3868 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3871 static int inline_limit;
3872 static gboolean inline_limit_inited;
3875 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3877 MonoMethodHeaderSummary header;
3879 #ifdef MONO_ARCH_SOFT_FLOAT
3880 MonoMethodSignature *sig = mono_method_signature (method);
3884 if (cfg->generic_sharing_context)
3887 if (cfg->inline_depth > 10)
3890 #ifdef MONO_ARCH_HAVE_LMF_OPS
3891 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3892 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3893 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3898 if (!mono_method_get_header_summary (method, &header))
3901 /*runtime, icall and pinvoke are checked by summary call*/
3902 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3903 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3904 (method->klass->marshalbyref) ||
3908 /* also consider num_locals? */
3909 /* Do the size check early to avoid creating vtables */
3910 if (!inline_limit_inited) {
3911 if (getenv ("MONO_INLINELIMIT"))
3912 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3914 inline_limit = INLINE_LENGTH_LIMIT;
3915 inline_limit_inited = TRUE;
3917 if (header.code_size >= inline_limit)
3921 * if we can initialize the class of the method right away, we do,
3922 * otherwise we don't allow inlining if the class needs initialization,
3923 * since it would mean inserting a call to mono_runtime_class_init()
3924 * inside the inlined code
3926 if (!(cfg->opt & MONO_OPT_SHARED)) {
3927 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3928 if (cfg->run_cctors && method->klass->has_cctor) {
3929 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3930 if (!method->klass->runtime_info)
3931 /* No vtable created yet */
3933 vtable = mono_class_vtable (cfg->domain, method->klass);
3936 /* This makes so that inline cannot trigger */
3937 /* .cctors: too many apps depend on them */
3938 /* running with a specific order... */
3939 if (! vtable->initialized)
3941 mono_runtime_class_init (vtable);
3943 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3944 if (!method->klass->runtime_info)
3945 /* No vtable created yet */
3947 vtable = mono_class_vtable (cfg->domain, method->klass);
3950 if (!vtable->initialized)
3955 * If we're compiling for shared code
3956 * the cctor will need to be run at aot method load time, for example,
3957 * or at the end of the compilation of the inlining method.
3959 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3964 * CAS - do not inline methods with declarative security
3965 * Note: this has to be before any possible return TRUE;
3967 if (mono_method_has_declsec (method))
3970 #ifdef MONO_ARCH_SOFT_FLOAT
3972 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3974 for (i = 0; i < sig->param_count; ++i)
3975 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3983 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3985 if (vtable->initialized && !cfg->compile_aot)
3988 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3991 if (!mono_class_needs_cctor_run (vtable->klass, method))
3994 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3995 /* The initialization is already done before the method is called */
4002 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4006 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4008 mono_class_init (klass);
4009 size = mono_class_array_element_size (klass);
4011 mult_reg = alloc_preg (cfg);
4012 array_reg = arr->dreg;
4013 index_reg = index->dreg;
4015 #if SIZEOF_REGISTER == 8
4016 /* The array reg is 64 bits but the index reg is only 32 */
4017 if (COMPILE_LLVM (cfg)) {
4019 index2_reg = index_reg;
4021 index2_reg = alloc_preg (cfg);
4022 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4025 if (index->type == STACK_I8) {
4026 index2_reg = alloc_preg (cfg);
4027 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4029 index2_reg = index_reg;
4034 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4036 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4037 if (size == 1 || size == 2 || size == 4 || size == 8) {
4038 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4040 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
4041 ins->type = STACK_PTR;
4047 add_reg = alloc_preg (cfg);
4049 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4050 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4051 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4052 ins->type = STACK_PTR;
4053 MONO_ADD_INS (cfg->cbb, ins);
4058 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4060 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4062 int bounds_reg = alloc_preg (cfg);
4063 int add_reg = alloc_preg (cfg);
4064 int mult_reg = alloc_preg (cfg);
4065 int mult2_reg = alloc_preg (cfg);
4066 int low1_reg = alloc_preg (cfg);
4067 int low2_reg = alloc_preg (cfg);
4068 int high1_reg = alloc_preg (cfg);
4069 int high2_reg = alloc_preg (cfg);
4070 int realidx1_reg = alloc_preg (cfg);
4071 int realidx2_reg = alloc_preg (cfg);
4072 int sum_reg = alloc_preg (cfg);
4077 mono_class_init (klass);
4078 size = mono_class_array_element_size (klass);
4080 index1 = index_ins1->dreg;
4081 index2 = index_ins2->dreg;
4083 /* range checking */
4084 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4085 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4087 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4088 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4089 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4090 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4091 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4092 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4093 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4095 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4096 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4097 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4098 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4099 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
4100 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4101 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4103 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4104 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4105 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4106 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4107 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4109 ins->type = STACK_MP;
4111 MONO_ADD_INS (cfg->cbb, ins);
4118 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4122 MonoMethod *addr_method;
4125 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4128 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
4130 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4131 /* emit_ldelema_2 depends on OP_LMUL */
4132 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
4133 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
4137 element_size = mono_class_array_element_size (cmethod->klass->element_class);
4138 addr_method = mono_marshal_get_array_address (rank, element_size);
4139 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4144 static MonoBreakPolicy
4145 always_insert_breakpoint (MonoMethod *method)
4147 return MONO_BREAK_POLICY_ALWAYS;
4150 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4153 * mono_set_break_policy:
4154 * policy_callback: the new callback function
4156 * Allow embedders to decide wherther to actually obey breakpoint instructions
4157 * (both break IL instructions and Debugger.Break () method calls), for example
4158 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4159 * untrusted or semi-trusted code.
4161 * @policy_callback will be called every time a break point instruction needs to
4162 * be inserted with the method argument being the method that calls Debugger.Break()
4163 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4164 * if it wants the breakpoint to not be effective in the given method.
4165 * #MONO_BREAK_POLICY_ALWAYS is the default.
4168 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4170 if (policy_callback)
4171 break_policy_func = policy_callback;
4173 break_policy_func = always_insert_breakpoint;
4177 should_insert_brekpoint (MonoMethod *method) {
4178 switch (break_policy_func (method)) {
4179 case MONO_BREAK_POLICY_ALWAYS:
4181 case MONO_BREAK_POLICY_NEVER:
4183 case MONO_BREAK_POLICY_ON_DBG:
4184 return mono_debug_using_mono_debugger ();
4186 g_warning ("Incorrect value returned from break policy callback");
4191 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4193 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4195 MonoInst *addr, *store, *load;
4196 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4198 /* the bounds check is already done by the callers */
4199 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4201 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4202 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4204 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4205 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4211 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4213 MonoInst *ins = NULL;
4214 #ifdef MONO_ARCH_SIMD_INTRINSICS
4215 if (cfg->opt & MONO_OPT_SIMD) {
4216 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4226 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4228 MonoInst *ins = NULL;
4230 static MonoClass *runtime_helpers_class = NULL;
4231 if (! runtime_helpers_class)
4232 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
4233 "System.Runtime.CompilerServices", "RuntimeHelpers");
4235 if (cmethod->klass == mono_defaults.string_class) {
4236 if (strcmp (cmethod->name, "get_Chars") == 0) {
4237 int dreg = alloc_ireg (cfg);
4238 int index_reg = alloc_preg (cfg);
4239 int mult_reg = alloc_preg (cfg);
4240 int add_reg = alloc_preg (cfg);
4242 #if SIZEOF_REGISTER == 8
4243 /* The array reg is 64 bits but the index reg is only 32 */
4244 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
4246 index_reg = args [1]->dreg;
4248 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
4250 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4251 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
4252 add_reg = ins->dreg;
4253 /* Avoid a warning */
4255 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4258 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
4259 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4260 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4261 add_reg, G_STRUCT_OFFSET (MonoString, chars));
4263 type_from_op (ins, NULL, NULL);
4265 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4266 int dreg = alloc_ireg (cfg);
4267 /* Decompose later to allow more optimizations */
4268 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
4269 ins->type = STACK_I4;
4270 ins->flags |= MONO_INST_FAULT;
4271 cfg->cbb->has_array_access = TRUE;
4272 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
4275 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
4276 int mult_reg = alloc_preg (cfg);
4277 int add_reg = alloc_preg (cfg);
4279 /* The corlib functions check for oob already. */
4280 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
4281 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4282 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
4283 return cfg->cbb->last_ins;
4286 } else if (cmethod->klass == mono_defaults.object_class) {
4288 if (strcmp (cmethod->name, "GetType") == 0) {
4289 int dreg = alloc_preg (cfg);
4290 int vt_reg = alloc_preg (cfg);
4291 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4292 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
4293 type_from_op (ins, NULL, NULL);
4296 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
4297 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
4298 int dreg = alloc_ireg (cfg);
4299 int t1 = alloc_ireg (cfg);
4301 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
4302 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
4303 ins->type = STACK_I4;
4307 } else if (strcmp (cmethod->name, ".ctor") == 0) {
4308 MONO_INST_NEW (cfg, ins, OP_NOP);
4309 MONO_ADD_INS (cfg->cbb, ins);
4313 } else if (cmethod->klass == mono_defaults.array_class) {
4314 if (strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
4315 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
4317 #ifndef MONO_BIG_ARRAYS
4319 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
4322 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
4323 int dreg = alloc_ireg (cfg);
4324 int bounds_reg = alloc_ireg (cfg);
4325 MonoBasicBlock *end_bb, *szarray_bb;
4326 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
4328 NEW_BBLOCK (cfg, end_bb);
4329 NEW_BBLOCK (cfg, szarray_bb);
4331 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
4332 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4333 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4334 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
4335 /* Non-szarray case */
4337 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4338 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4340 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4341 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4342 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4343 MONO_START_BB (cfg, szarray_bb);
4346 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4347 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4349 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4350 MONO_START_BB (cfg, end_bb);
4352 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
4353 ins->type = STACK_I4;
4359 if (cmethod->name [0] != 'g')
4362 if (strcmp (cmethod->name, "get_Rank") == 0) {
4363 int dreg = alloc_ireg (cfg);
4364 int vtable_reg = alloc_preg (cfg);
4365 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
4366 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4367 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
4368 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4369 type_from_op (ins, NULL, NULL);
4372 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4373 int dreg = alloc_ireg (cfg);
4375 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4376 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4377 type_from_op (ins, NULL, NULL);
4382 } else if (cmethod->klass == runtime_helpers_class) {
4384 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
4385 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
4389 } else if (cmethod->klass == mono_defaults.thread_class) {
4390 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
4391 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
4392 MONO_ADD_INS (cfg->cbb, ins);
4394 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
4395 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4396 MONO_ADD_INS (cfg->cbb, ins);
4399 } else if (cmethod->klass == mono_defaults.monitor_class) {
4400 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4401 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
4404 if (COMPILE_LLVM (cfg)) {
4406 * Pass the argument normally, the LLVM backend will handle the
4407 * calling convention problems.
4409 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4411 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
4412 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4413 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4414 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4417 return (MonoInst*)call;
4418 } else if (strcmp (cmethod->name, "Exit") == 0) {
4421 if (COMPILE_LLVM (cfg)) {
4422 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4424 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
4425 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4426 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4427 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4430 return (MonoInst*)call;
4432 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4433 MonoMethod *fast_method = NULL;
4435 /* Avoid infinite recursion */
4436 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
4437 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
4438 strcmp (cfg->method->name, "FastMonitorExit") == 0))
4441 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) ||
4442 strcmp (cmethod->name, "Exit") == 0)
4443 fast_method = mono_monitor_get_fast_path (cmethod);
4447 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4449 } else if (cmethod->klass->image == mono_defaults.corlib &&
4450 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4451 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4454 #if SIZEOF_REGISTER == 8
4455 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
4456 /* 64 bit reads are already atomic */
4457 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
4458 ins->dreg = mono_alloc_preg (cfg);
4459 ins->inst_basereg = args [0]->dreg;
4460 ins->inst_offset = 0;
4461 MONO_ADD_INS (cfg->cbb, ins);
4465 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4466 if (strcmp (cmethod->name, "Increment") == 0) {
4467 MonoInst *ins_iconst;
4470 if (fsig->params [0]->type == MONO_TYPE_I4)
4471 opcode = OP_ATOMIC_ADD_NEW_I4;
4472 #if SIZEOF_REGISTER == 8
4473 else if (fsig->params [0]->type == MONO_TYPE_I8)
4474 opcode = OP_ATOMIC_ADD_NEW_I8;
4477 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4478 ins_iconst->inst_c0 = 1;
4479 ins_iconst->dreg = mono_alloc_ireg (cfg);
4480 MONO_ADD_INS (cfg->cbb, ins_iconst);
4482 MONO_INST_NEW (cfg, ins, opcode);
4483 ins->dreg = mono_alloc_ireg (cfg);
4484 ins->inst_basereg = args [0]->dreg;
4485 ins->inst_offset = 0;
4486 ins->sreg2 = ins_iconst->dreg;
4487 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4488 MONO_ADD_INS (cfg->cbb, ins);
4490 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4491 MonoInst *ins_iconst;
4494 if (fsig->params [0]->type == MONO_TYPE_I4)
4495 opcode = OP_ATOMIC_ADD_NEW_I4;
4496 #if SIZEOF_REGISTER == 8
4497 else if (fsig->params [0]->type == MONO_TYPE_I8)
4498 opcode = OP_ATOMIC_ADD_NEW_I8;
4501 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4502 ins_iconst->inst_c0 = -1;
4503 ins_iconst->dreg = mono_alloc_ireg (cfg);
4504 MONO_ADD_INS (cfg->cbb, ins_iconst);
4506 MONO_INST_NEW (cfg, ins, opcode);
4507 ins->dreg = mono_alloc_ireg (cfg);
4508 ins->inst_basereg = args [0]->dreg;
4509 ins->inst_offset = 0;
4510 ins->sreg2 = ins_iconst->dreg;
4511 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4512 MONO_ADD_INS (cfg->cbb, ins);
4514 } else if (strcmp (cmethod->name, "Add") == 0) {
4517 if (fsig->params [0]->type == MONO_TYPE_I4)
4518 opcode = OP_ATOMIC_ADD_NEW_I4;
4519 #if SIZEOF_REGISTER == 8
4520 else if (fsig->params [0]->type == MONO_TYPE_I8)
4521 opcode = OP_ATOMIC_ADD_NEW_I8;
4525 MONO_INST_NEW (cfg, ins, opcode);
4526 ins->dreg = mono_alloc_ireg (cfg);
4527 ins->inst_basereg = args [0]->dreg;
4528 ins->inst_offset = 0;
4529 ins->sreg2 = args [1]->dreg;
4530 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4531 MONO_ADD_INS (cfg->cbb, ins);
4534 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4536 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4537 if (strcmp (cmethod->name, "Exchange") == 0) {
4539 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4541 if (fsig->params [0]->type == MONO_TYPE_I4)
4542 opcode = OP_ATOMIC_EXCHANGE_I4;
4543 #if SIZEOF_REGISTER == 8
4544 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4545 (fsig->params [0]->type == MONO_TYPE_I))
4546 opcode = OP_ATOMIC_EXCHANGE_I8;
4548 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4549 opcode = OP_ATOMIC_EXCHANGE_I4;
4554 MONO_INST_NEW (cfg, ins, opcode);
4555 ins->dreg = mono_alloc_ireg (cfg);
4556 ins->inst_basereg = args [0]->dreg;
4557 ins->inst_offset = 0;
4558 ins->sreg2 = args [1]->dreg;
4559 MONO_ADD_INS (cfg->cbb, ins);
4561 switch (fsig->params [0]->type) {
4563 ins->type = STACK_I4;
4567 ins->type = STACK_I8;
4569 case MONO_TYPE_OBJECT:
4570 ins->type = STACK_OBJ;
4573 g_assert_not_reached ();
4576 if (cfg->gen_write_barriers && is_ref)
4577 emit_write_barrier (cfg, args [0], args [1], -1);
4579 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4581 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4582 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4584 gboolean is_ref = MONO_TYPE_IS_REFERENCE (fsig->params [1]);
4585 if (fsig->params [1]->type == MONO_TYPE_I4)
4587 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4588 size = sizeof (gpointer);
4589 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
4592 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4593 ins->dreg = alloc_ireg (cfg);
4594 ins->sreg1 = args [0]->dreg;
4595 ins->sreg2 = args [1]->dreg;
4596 ins->sreg3 = args [2]->dreg;
4597 ins->type = STACK_I4;
4598 MONO_ADD_INS (cfg->cbb, ins);
4599 } else if (size == 8) {
4600 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4601 ins->dreg = alloc_ireg (cfg);
4602 ins->sreg1 = args [0]->dreg;
4603 ins->sreg2 = args [1]->dreg;
4604 ins->sreg3 = args [2]->dreg;
4605 ins->type = STACK_I8;
4606 MONO_ADD_INS (cfg->cbb, ins);
4608 /* g_assert_not_reached (); */
4610 if (cfg->gen_write_barriers && is_ref)
4611 emit_write_barrier (cfg, args [0], args [1], -1);
4613 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4617 } else if (cmethod->klass->image == mono_defaults.corlib) {
4618 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4619 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4620 if (should_insert_brekpoint (cfg->method))
4621 MONO_INST_NEW (cfg, ins, OP_BREAK);
4623 MONO_INST_NEW (cfg, ins, OP_NOP);
4624 MONO_ADD_INS (cfg->cbb, ins);
4627 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4628 && strcmp (cmethod->klass->name, "Environment") == 0) {
4630 EMIT_NEW_ICONST (cfg, ins, 1);
4632 EMIT_NEW_ICONST (cfg, ins, 0);
4636 } else if (cmethod->klass == mono_defaults.math_class) {
4638 * There is general branches code for Min/Max, but it does not work for
4640 * http://everything2.com/?node_id=1051618
4644 #ifdef MONO_ARCH_SIMD_INTRINSICS
4645 if (cfg->opt & MONO_OPT_SIMD) {
4646 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4652 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4656 * This entry point could be used later for arbitrary method
4659 inline static MonoInst*
4660 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4661 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4663 if (method->klass == mono_defaults.string_class) {
4664 /* managed string allocation support */
4665 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
4666 MonoInst *iargs [2];
4667 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4668 MonoMethod *managed_alloc = NULL;
4670 g_assert (vtable); /*Should not fail since it System.String*/
4671 #ifndef MONO_CROSS_COMPILE
4672 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4676 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4677 iargs [1] = args [0];
4678 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4685 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4687 MonoInst *store, *temp;
4690 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4691 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4694 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4695 * would be different than the MonoInst's used to represent arguments, and
4696 * the ldelema implementation can't deal with that.
4697 * Solution: When ldelema is used on an inline argument, create a var for
4698 * it, emit ldelema on that var, and emit the saving code below in
4699 * inline_method () if needed.
4701 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4702 cfg->args [i] = temp;
4703 /* This uses cfg->args [i] which is set by the preceeding line */
4704 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4705 store->cil_code = sp [0]->cil_code;
4710 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4711 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4713 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4715 check_inline_called_method_name_limit (MonoMethod *called_method)
4718 static char *limit = NULL;
4720 if (limit == NULL) {
4721 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4723 if (limit_string != NULL)
4724 limit = limit_string;
4726 limit = (char *) "";
4729 if (limit [0] != '\0') {
4730 char *called_method_name = mono_method_full_name (called_method, TRUE);
4732 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4733 g_free (called_method_name);
4735 //return (strncmp_result <= 0);
4736 return (strncmp_result == 0);
4743 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4745 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4748 static char *limit = NULL;
4750 if (limit == NULL) {
4751 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4752 if (limit_string != NULL) {
4753 limit = limit_string;
4755 limit = (char *) "";
4759 if (limit [0] != '\0') {
4760 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4762 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4763 g_free (caller_method_name);
4765 //return (strncmp_result <= 0);
4766 return (strncmp_result == 0);
4774 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4775 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4777 MonoInst *ins, *rvar = NULL;
4778 MonoMethodHeader *cheader;
4779 MonoBasicBlock *ebblock, *sbblock;
4781 MonoMethod *prev_inlined_method;
4782 MonoInst **prev_locals, **prev_args;
4783 MonoType **prev_arg_types;
4784 guint prev_real_offset;
4785 GHashTable *prev_cbb_hash;
4786 MonoBasicBlock **prev_cil_offset_to_bb;
4787 MonoBasicBlock *prev_cbb;
4788 unsigned char* prev_cil_start;
4789 guint32 prev_cil_offset_to_bb_len;
4790 MonoMethod *prev_current_method;
4791 MonoGenericContext *prev_generic_context;
4792 gboolean ret_var_set, prev_ret_var_set;
4794 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4796 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4797 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4800 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4801 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4805 if (cfg->verbose_level > 2)
4806 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4808 if (!cmethod->inline_info) {
4809 mono_jit_stats.inlineable_methods++;
4810 cmethod->inline_info = 1;
4813 /* allocate local variables */
4814 cheader = mono_method_get_header (cmethod);
4816 if (cheader == NULL || mono_loader_get_last_error ()) {
4818 mono_metadata_free_mh (cheader);
4819 mono_loader_clear_error ();
4823 /*Must verify before creating locals as it can cause the JIT to assert.*/
4824 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
4825 mono_metadata_free_mh (cheader);
4829 /* allocate space to store the return value */
4830 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4831 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4835 prev_locals = cfg->locals;
4836 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4837 for (i = 0; i < cheader->num_locals; ++i)
4838 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4840 /* allocate start and end blocks */
4841 /* This is needed so if the inline is aborted, we can clean up */
4842 NEW_BBLOCK (cfg, sbblock);
4843 sbblock->real_offset = real_offset;
4845 NEW_BBLOCK (cfg, ebblock);
4846 ebblock->block_num = cfg->num_bblocks++;
4847 ebblock->real_offset = real_offset;
4849 prev_args = cfg->args;
4850 prev_arg_types = cfg->arg_types;
4851 prev_inlined_method = cfg->inlined_method;
4852 cfg->inlined_method = cmethod;
4853 cfg->ret_var_set = FALSE;
4854 cfg->inline_depth ++;
4855 prev_real_offset = cfg->real_offset;
4856 prev_cbb_hash = cfg->cbb_hash;
4857 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4858 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4859 prev_cil_start = cfg->cil_start;
4860 prev_cbb = cfg->cbb;
4861 prev_current_method = cfg->current_method;
4862 prev_generic_context = cfg->generic_context;
4863 prev_ret_var_set = cfg->ret_var_set;
4865 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4867 ret_var_set = cfg->ret_var_set;
4869 cfg->inlined_method = prev_inlined_method;
4870 cfg->real_offset = prev_real_offset;
4871 cfg->cbb_hash = prev_cbb_hash;
4872 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4873 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4874 cfg->cil_start = prev_cil_start;
4875 cfg->locals = prev_locals;
4876 cfg->args = prev_args;
4877 cfg->arg_types = prev_arg_types;
4878 cfg->current_method = prev_current_method;
4879 cfg->generic_context = prev_generic_context;
4880 cfg->ret_var_set = prev_ret_var_set;
4881 cfg->inline_depth --;
4883 if ((costs >= 0 && costs < 60) || inline_allways) {
4884 if (cfg->verbose_level > 2)
4885 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4887 mono_jit_stats.inlined_methods++;
4889 /* always add some code to avoid block split failures */
4890 MONO_INST_NEW (cfg, ins, OP_NOP);
4891 MONO_ADD_INS (prev_cbb, ins);
4893 prev_cbb->next_bb = sbblock;
4894 link_bblock (cfg, prev_cbb, sbblock);
4897 * Get rid of the begin and end bblocks if possible to aid local
4900 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4902 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4903 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4905 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4906 MonoBasicBlock *prev = ebblock->in_bb [0];
4907 mono_merge_basic_blocks (cfg, prev, ebblock);
4909 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4910 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4911 cfg->cbb = prev_cbb;
4919 * If the inlined method contains only a throw, then the ret var is not
4920 * set, so set it to a dummy value.
4923 static double r8_0 = 0.0;
4925 switch (rvar->type) {
4927 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4930 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4935 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4938 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4939 ins->type = STACK_R8;
4940 ins->inst_p0 = (void*)&r8_0;
4941 ins->dreg = rvar->dreg;
4942 MONO_ADD_INS (cfg->cbb, ins);
4945 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4948 g_assert_not_reached ();
4952 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4955 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4958 if (cfg->verbose_level > 2)
4959 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4960 cfg->exception_type = MONO_EXCEPTION_NONE;
4961 mono_loader_clear_error ();
4963 /* This gets rid of the newly added bblocks */
4964 cfg->cbb = prev_cbb;
4966 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4971 * Some of these comments may well be out-of-date.
4972 * Design decisions: we do a single pass over the IL code (and we do bblock
4973 * splitting/merging in the few cases when it's required: a back jump to an IL
4974 * address that was not already seen as bblock starting point).
4975 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4976 * Complex operations are decomposed in simpler ones right away. We need to let the
4977 * arch-specific code peek and poke inside this process somehow (except when the
4978 * optimizations can take advantage of the full semantic info of coarse opcodes).
4979 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4980 * MonoInst->opcode initially is the IL opcode or some simplification of that
4981 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4982 * opcode with value bigger than OP_LAST.
4983 * At this point the IR can be handed over to an interpreter, a dumb code generator
4984 * or to the optimizing code generator that will translate it to SSA form.
4986 * Profiling directed optimizations.
4987 * We may compile by default with few or no optimizations and instrument the code
4988 * or the user may indicate what methods to optimize the most either in a config file
4989 * or through repeated runs where the compiler applies offline the optimizations to
4990 * each method and then decides if it was worth it.
4993 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4994 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4995 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4996 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4997 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4998 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4999 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
5000 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; LOAD_ERROR;}
5002 /* offset from br.s -> br like opcodes */
5003 #define BIG_BRANCH_OFFSET 13
5006 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
5008 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
5010 return b == NULL || b == bb;
5014 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
5016 unsigned char *ip = start;
5017 unsigned char *target;
5020 MonoBasicBlock *bblock;
5021 const MonoOpcode *opcode;
5024 cli_addr = ip - start;
5025 i = mono_opcode_value ((const guint8 **)&ip, end);
5028 opcode = &mono_opcodes [i];
5029 switch (opcode->argument) {
5030 case MonoInlineNone:
5033 case MonoInlineString:
5034 case MonoInlineType:
5035 case MonoInlineField:
5036 case MonoInlineMethod:
5039 case MonoShortInlineR:
5046 case MonoShortInlineVar:
5047 case MonoShortInlineI:
5050 case MonoShortInlineBrTarget:
5051 target = start + cli_addr + 2 + (signed char)ip [1];
5052 GET_BBLOCK (cfg, bblock, target);
5055 GET_BBLOCK (cfg, bblock, ip);
5057 case MonoInlineBrTarget:
5058 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
5059 GET_BBLOCK (cfg, bblock, target);
5062 GET_BBLOCK (cfg, bblock, ip);
5064 case MonoInlineSwitch: {
5065 guint32 n = read32 (ip + 1);
5068 cli_addr += 5 + 4 * n;
5069 target = start + cli_addr;
5070 GET_BBLOCK (cfg, bblock, target);
5072 for (j = 0; j < n; ++j) {
5073 target = start + cli_addr + (gint32)read32 (ip);
5074 GET_BBLOCK (cfg, bblock, target);
5084 g_assert_not_reached ();
5087 if (i == CEE_THROW) {
5088 unsigned char *bb_start = ip - 1;
5090 /* Find the start of the bblock containing the throw */
5092 while ((bb_start >= start) && !bblock) {
5093 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
5097 bblock->out_of_line = 1;
5106 static inline MonoMethod *
5107 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5111 if (m->wrapper_type != MONO_WRAPPER_NONE)
5112 return mono_method_get_wrapper_data (m, token);
5114 method = mono_get_method_full (m->klass->image, token, klass, context);
5119 static inline MonoMethod *
5120 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5122 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
5124 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
5130 static inline MonoClass*
5131 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
5135 if (method->wrapper_type != MONO_WRAPPER_NONE)
5136 klass = mono_method_get_wrapper_data (method, token);
5138 klass = mono_class_get_full (method->klass->image, token, context);
5140 mono_class_init (klass);
5145 * Returns TRUE if the JIT should abort inlining because "callee"
5146 * is influenced by security attributes.
5149 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
5153 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
5157 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
5158 if (result == MONO_JIT_SECURITY_OK)
5161 if (result == MONO_JIT_LINKDEMAND_ECMA) {
5162 /* Generate code to throw a SecurityException before the actual call/link */
5163 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5166 NEW_ICONST (cfg, args [0], 4);
5167 NEW_METHODCONST (cfg, args [1], caller);
5168 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
5169 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
5170 /* don't hide previous results */
5171 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
5172 cfg->exception_data = result;
5180 throw_exception (void)
5182 static MonoMethod *method = NULL;
5185 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5186 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
5193 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
5195 MonoMethod *thrower = throw_exception ();
5198 EMIT_NEW_PCONST (cfg, args [0], ex);
5199 mono_emit_method_call (cfg, thrower, args, NULL);
5203 * Return the original method is a wrapper is specified. We can only access
5204 * the custom attributes from the original method.
5207 get_original_method (MonoMethod *method)
5209 if (method->wrapper_type == MONO_WRAPPER_NONE)
5212 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
5213 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
5216 /* in other cases we need to find the original method */
5217 return mono_marshal_method_from_wrapper (method);
5221 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
5222 MonoBasicBlock *bblock, unsigned char *ip)
5224 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5225 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
5227 emit_throw_exception (cfg, ex);
5231 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
5232 MonoBasicBlock *bblock, unsigned char *ip)
5234 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5235 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
5237 emit_throw_exception (cfg, ex);
5241 * Check that the IL instructions at ip are the array initialization
5242 * sequence and return the pointer to the data and the size.
5245 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
5248 * newarr[System.Int32]
5250 * ldtoken field valuetype ...
5251 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
5253 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
5254 guint32 token = read32 (ip + 7);
5255 guint32 field_token = read32 (ip + 2);
5256 guint32 field_index = field_token & 0xffffff;
5258 const char *data_ptr;
5260 MonoMethod *cmethod;
5261 MonoClass *dummy_class;
5262 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
5268 *out_field_token = field_token;
5270 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
5273 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
5275 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
5276 case MONO_TYPE_BOOLEAN:
5280 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
5281 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
5282 case MONO_TYPE_CHAR:
5292 return NULL; /* stupid ARM FP swapped format */
5302 if (size > mono_type_size (field->type, &dummy_align))
5305 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
5306 if (!method->klass->image->dynamic) {
5307 field_index = read32 (ip + 2) & 0xffffff;
5308 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
5309 data_ptr = mono_image_rva_map (method->klass->image, rva);
5310 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
5311 /* for aot code we do the lookup on load */
5312 if (aot && data_ptr)
5313 return GUINT_TO_POINTER (rva);
5315 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
5317 data_ptr = mono_field_get_data (field);
5325 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
5327 char *method_fname = mono_method_full_name (method, TRUE);
5329 MonoMethodHeader *header = mono_method_get_header (method);
5331 if (header->code_size == 0)
5332 method_code = g_strdup ("method body is empty.");
5334 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
5335 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
5336 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
5337 g_free (method_fname);
5338 g_free (method_code);
5339 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
5343 set_exception_object (MonoCompile *cfg, MonoException *exception)
5345 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
5346 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
5347 cfg->exception_ptr = exception;
5351 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5355 if (cfg->generic_sharing_context)
5356 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
5358 type = &klass->byval_arg;
5359 return MONO_TYPE_IS_REFERENCE (type);
5363 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5366 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5367 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5368 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5369 /* Optimize reg-reg moves away */
5371 * Can't optimize other opcodes, since sp[0] might point to
5372 * the last ins of a decomposed opcode.
5374 sp [0]->dreg = (cfg)->locals [n]->dreg;
5376 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5381 * ldloca inhibits many optimizations so try to get rid of it in common
5384 static inline unsigned char *
5385 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5394 local = read16 (ip + 2);
5398 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5399 gboolean skip = FALSE;
5401 /* From the INITOBJ case */
5402 token = read32 (ip + 2);
5403 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5404 CHECK_TYPELOAD (klass);
5405 if (generic_class_is_reference_type (cfg, klass)) {
5406 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5407 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5408 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5409 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5410 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5423 is_exception_class (MonoClass *class)
5426 if (class == mono_defaults.exception_class)
5428 class = class->parent;
5434 * is_jit_optimizer_disabled:
5436 * Determine whenever M's assembly has a DebuggableAttribute with the
5437 * IsJITOptimizerDisabled flag set.
5440 is_jit_optimizer_disabled (MonoMethod *m)
5442 MonoAssembly *ass = m->klass->image->assembly;
5443 MonoCustomAttrInfo* attrs;
5444 static MonoClass *klass;
5446 gboolean val = FALSE;
5449 if (ass->jit_optimizer_disabled_inited)
5450 return ass->jit_optimizer_disabled;
5452 klass = mono_class_from_name_cached (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
5454 attrs = mono_custom_attrs_from_assembly (ass);
5456 for (i = 0; i < attrs->num_attrs; ++i) {
5457 MonoCustomAttrEntry *attr = &attrs->attrs [i];
5460 MonoMethodSignature *sig;
5462 if (!attr->ctor || attr->ctor->klass != klass)
5464 /* Decode the attribute. See reflection.c */
5465 len = attr->data_size;
5466 p = (const char*)attr->data;
5467 g_assert (read16 (p) == 0x0001);
5470 // FIXME: Support named parameters
5471 sig = mono_method_signature (attr->ctor);
5472 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
5474 /* Two boolean arguments */
5480 ass->jit_optimizer_disabled = val;
5481 mono_memory_barrier ();
5482 ass->jit_optimizer_disabled_inited = TRUE;
5488 * mono_method_to_ir:
5490 * Translate the .net IL into linear IR.
5493 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5494 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5495 guint inline_offset, gboolean is_virtual_call)
5498 MonoInst *ins, **sp, **stack_start;
5499 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5500 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
5501 MonoMethod *cmethod, *method_definition;
5502 MonoInst **arg_array;
5503 MonoMethodHeader *header;
5505 guint32 token, ins_flag;
5507 MonoClass *constrained_call = NULL;
5508 unsigned char *ip, *end, *target, *err_pos;
5509 static double r8_0 = 0.0;
5510 MonoMethodSignature *sig;
5511 MonoGenericContext *generic_context = NULL;
5512 MonoGenericContainer *generic_container = NULL;
5513 MonoType **param_types;
5514 int i, n, start_new_bblock, dreg;
5515 int num_calls = 0, inline_costs = 0;
5516 int breakpoint_id = 0;
5518 MonoBoolean security, pinvoke;
5519 MonoSecurityManager* secman = NULL;
5520 MonoDeclSecurityActions actions;
5521 GSList *class_inits = NULL;
5522 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5524 gboolean init_locals, seq_points, skip_dead_blocks;
5525 gboolean disable_inline;
5527 disable_inline = is_jit_optimizer_disabled (method);
5529 /* serialization and xdomain stuff may need access to private fields and methods */
5530 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5531 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5532 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5533 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5534 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5535 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5537 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5539 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5540 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5541 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5542 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5544 image = method->klass->image;
5545 header = mono_method_get_header (method);
5547 MonoLoaderError *error;
5549 if ((error = mono_loader_get_last_error ())) {
5550 mono_cfg_set_exception (cfg, error->exception_type);
5552 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
5553 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
5555 goto exception_exit;
5557 generic_container = mono_method_get_generic_container (method);
5558 sig = mono_method_signature (method);
5559 num_args = sig->hasthis + sig->param_count;
5560 ip = (unsigned char*)header->code;
5561 cfg->cil_start = ip;
5562 end = ip + header->code_size;
5563 mono_jit_stats.cil_code_size += header->code_size;
5564 init_locals = header->init_locals;
5566 seq_points = cfg->gen_seq_points && cfg->method == method;
5569 * Methods without init_locals set could cause asserts in various passes
5574 method_definition = method;
5575 while (method_definition->is_inflated) {
5576 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5577 method_definition = imethod->declaring;
5580 /* SkipVerification is not allowed if core-clr is enabled */
5581 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5583 dont_verify_stloc = TRUE;
5586 if (mono_debug_using_mono_debugger ())
5587 cfg->keep_cil_nops = TRUE;
5589 if (sig->is_inflated)
5590 generic_context = mono_method_get_context (method);
5591 else if (generic_container)
5592 generic_context = &generic_container->context;
5593 cfg->generic_context = generic_context;
5595 if (!cfg->generic_sharing_context)
5596 g_assert (!sig->has_type_parameters);
5598 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5599 g_assert (method->is_inflated);
5600 g_assert (mono_method_get_context (method)->method_inst);
5602 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5603 g_assert (sig->generic_param_count);
5605 if (cfg->method == method) {
5606 cfg->real_offset = 0;
5608 cfg->real_offset = inline_offset;
5611 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5612 cfg->cil_offset_to_bb_len = header->code_size;
5614 cfg->current_method = method;
5616 if (cfg->verbose_level > 2)
5617 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5619 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5621 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5622 for (n = 0; n < sig->param_count; ++n)
5623 param_types [n + sig->hasthis] = sig->params [n];
5624 cfg->arg_types = param_types;
5626 dont_inline = g_list_prepend (dont_inline, method);
5627 if (cfg->method == method) {
5629 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5630 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5633 NEW_BBLOCK (cfg, start_bblock);
5634 cfg->bb_entry = start_bblock;
5635 start_bblock->cil_code = NULL;
5636 start_bblock->cil_length = 0;
5639 NEW_BBLOCK (cfg, end_bblock);
5640 cfg->bb_exit = end_bblock;
5641 end_bblock->cil_code = NULL;
5642 end_bblock->cil_length = 0;
5643 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
5644 g_assert (cfg->num_bblocks == 2);
5646 arg_array = cfg->args;
5648 if (header->num_clauses) {
5649 cfg->spvars = g_hash_table_new (NULL, NULL);
5650 cfg->exvars = g_hash_table_new (NULL, NULL);
5652 /* handle exception clauses */
5653 for (i = 0; i < header->num_clauses; ++i) {
5654 MonoBasicBlock *try_bb;
5655 MonoExceptionClause *clause = &header->clauses [i];
5656 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5657 try_bb->real_offset = clause->try_offset;
5658 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5659 tblock->real_offset = clause->handler_offset;
5660 tblock->flags |= BB_EXCEPTION_HANDLER;
5662 link_bblock (cfg, try_bb, tblock);
5664 if (*(ip + clause->handler_offset) == CEE_POP)
5665 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5667 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5668 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5669 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5670 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5671 MONO_ADD_INS (tblock, ins);
5673 /* todo: is a fault block unsafe to optimize? */
5674 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5675 tblock->flags |= BB_EXCEPTION_UNSAFE;
5679 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5681 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5683 /* catch and filter blocks get the exception object on the stack */
5684 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5685 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5686 MonoInst *dummy_use;
5688 /* mostly like handle_stack_args (), but just sets the input args */
5689 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5690 tblock->in_scount = 1;
5691 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5692 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5695 * Add a dummy use for the exvar so its liveness info will be
5699 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5701 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5702 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5703 tblock->flags |= BB_EXCEPTION_HANDLER;
5704 tblock->real_offset = clause->data.filter_offset;
5705 tblock->in_scount = 1;
5706 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5707 /* The filter block shares the exvar with the handler block */
5708 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5709 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5710 MONO_ADD_INS (tblock, ins);
5714 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5715 clause->data.catch_class &&
5716 cfg->generic_sharing_context &&
5717 mono_class_check_context_used (clause->data.catch_class)) {
5719 * In shared generic code with catch
5720 * clauses containing type variables
5721 * the exception handling code has to
5722 * be able to get to the rgctx.
5723 * Therefore we have to make sure that
5724 * the vtable/mrgctx argument (for
5725 * static or generic methods) or the
5726 * "this" argument (for non-static
5727 * methods) are live.
5729 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5730 mini_method_get_context (method)->method_inst ||
5731 method->klass->valuetype) {
5732 mono_get_vtable_var (cfg);
5734 MonoInst *dummy_use;
5736 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5741 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5742 cfg->cbb = start_bblock;
5743 cfg->args = arg_array;
5744 mono_save_args (cfg, sig, inline_args);
5747 /* FIRST CODE BLOCK */
5748 NEW_BBLOCK (cfg, bblock);
5749 bblock->cil_code = ip;
5753 ADD_BBLOCK (cfg, bblock);
5755 if (cfg->method == method) {
5756 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5757 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5758 MONO_INST_NEW (cfg, ins, OP_BREAK);
5759 MONO_ADD_INS (bblock, ins);
5763 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5764 secman = mono_security_manager_get_methods ();
5766 security = (secman && mono_method_has_declsec (method));
5767 /* at this point having security doesn't mean we have any code to generate */
5768 if (security && (cfg->method == method)) {
5769 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5770 * And we do not want to enter the next section (with allocation) if we
5771 * have nothing to generate */
5772 security = mono_declsec_get_demands (method, &actions);
5775 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5776 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5778 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5779 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5780 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5782 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5783 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5787 mono_custom_attrs_free (custom);
5790 custom = mono_custom_attrs_from_class (wrapped->klass);
5791 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5795 mono_custom_attrs_free (custom);
5798 /* not a P/Invoke after all */
5803 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5804 /* we use a separate basic block for the initialization code */
5805 NEW_BBLOCK (cfg, init_localsbb);
5806 cfg->bb_init = init_localsbb;
5807 init_localsbb->real_offset = cfg->real_offset;
5808 start_bblock->next_bb = init_localsbb;
5809 init_localsbb->next_bb = bblock;
5810 link_bblock (cfg, start_bblock, init_localsbb);
5811 link_bblock (cfg, init_localsbb, bblock);
5813 cfg->cbb = init_localsbb;
5815 start_bblock->next_bb = bblock;
5816 link_bblock (cfg, start_bblock, bblock);
5819 /* at this point we know, if security is TRUE, that some code needs to be generated */
5820 if (security && (cfg->method == method)) {
5823 mono_jit_stats.cas_demand_generation++;
5825 if (actions.demand.blob) {
5826 /* Add code for SecurityAction.Demand */
5827 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5828 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5829 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5830 mono_emit_method_call (cfg, secman->demand, args, NULL);
5832 if (actions.noncasdemand.blob) {
5833 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5834 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5835 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5836 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5837 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5838 mono_emit_method_call (cfg, secman->demand, args, NULL);
5840 if (actions.demandchoice.blob) {
5841 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5842 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5843 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5844 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5845 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5849 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5851 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5854 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5855 /* check if this is native code, e.g. an icall or a p/invoke */
5856 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5857 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5859 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
5860 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
5862 /* if this ia a native call then it can only be JITted from platform code */
5863 if ((icall || pinvk) && method->klass && method->klass->image) {
5864 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
5865 MonoException *ex = icall ? mono_get_exception_security () :
5866 mono_get_exception_method_access ();
5867 emit_throw_exception (cfg, ex);
5874 if (header->code_size == 0)
5877 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5882 if (cfg->method == method)
5883 mono_debug_init_method (cfg, bblock, breakpoint_id);
5885 for (n = 0; n < header->num_locals; ++n) {
5886 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5891 /* We force the vtable variable here for all shared methods
5892 for the possibility that they might show up in a stack
5893 trace where their exact instantiation is needed. */
5894 if (cfg->generic_sharing_context && method == cfg->method) {
5895 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5896 mini_method_get_context (method)->method_inst ||
5897 method->klass->valuetype) {
5898 mono_get_vtable_var (cfg);
5900 /* FIXME: Is there a better way to do this?
5901 We need the variable live for the duration
5902 of the whole method. */
5903 cfg->args [0]->flags |= MONO_INST_INDIRECT;
5907 /* add a check for this != NULL to inlined methods */
5908 if (is_virtual_call) {
5911 NEW_ARGLOAD (cfg, arg_ins, 0);
5912 MONO_ADD_INS (cfg->cbb, arg_ins);
5913 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
5916 skip_dead_blocks = !dont_verify;
5917 if (skip_dead_blocks) {
5918 original_bb = bb = mono_basic_block_split (method, &error);
5919 if (!mono_error_ok (&error)) {
5920 mono_error_cleanup (&error);
5926 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5927 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5930 start_new_bblock = 0;
5933 if (cfg->method == method)
5934 cfg->real_offset = ip - header->code;
5936 cfg->real_offset = inline_offset;
5941 if (start_new_bblock) {
5942 bblock->cil_length = ip - bblock->cil_code;
5943 if (start_new_bblock == 2) {
5944 g_assert (ip == tblock->cil_code);
5946 GET_BBLOCK (cfg, tblock, ip);
5948 bblock->next_bb = tblock;
5951 start_new_bblock = 0;
5952 for (i = 0; i < bblock->in_scount; ++i) {
5953 if (cfg->verbose_level > 3)
5954 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5955 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5959 g_slist_free (class_inits);
5962 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5963 link_bblock (cfg, bblock, tblock);
5964 if (sp != stack_start) {
5965 handle_stack_args (cfg, stack_start, sp - stack_start);
5967 CHECK_UNVERIFIABLE (cfg);
5969 bblock->next_bb = tblock;
5972 for (i = 0; i < bblock->in_scount; ++i) {
5973 if (cfg->verbose_level > 3)
5974 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5975 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5978 g_slist_free (class_inits);
5983 if (skip_dead_blocks) {
5984 int ip_offset = ip - header->code;
5986 if (ip_offset == bb->end)
5990 int op_size = mono_opcode_size (ip, end);
5991 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
5993 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
5995 if (ip_offset + op_size == bb->end) {
5996 MONO_INST_NEW (cfg, ins, OP_NOP);
5997 MONO_ADD_INS (bblock, ins);
5998 start_new_bblock = 1;
6006 * Sequence points are points where the debugger can place a breakpoint.
6007 * Currently, we generate these automatically at points where the IL
6010 if (seq_points && sp == stack_start) {
6011 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
6012 MONO_ADD_INS (cfg->cbb, ins);
6015 bblock->real_offset = cfg->real_offset;
6017 if ((cfg->method == method) && cfg->coverage_info) {
6018 guint32 cil_offset = ip - header->code;
6019 cfg->coverage_info->data [cil_offset].cil_code = ip;
6021 /* TODO: Use an increment here */
6022 #if defined(TARGET_X86)
6023 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
6024 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
6026 MONO_ADD_INS (cfg->cbb, ins);
6028 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
6029 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
6033 if (cfg->verbose_level > 3)
6034 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
6038 if (cfg->keep_cil_nops)
6039 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
6041 MONO_INST_NEW (cfg, ins, OP_NOP);
6043 MONO_ADD_INS (bblock, ins);
6046 if (should_insert_brekpoint (cfg->method))
6047 MONO_INST_NEW (cfg, ins, OP_BREAK);
6049 MONO_INST_NEW (cfg, ins, OP_NOP);
6051 MONO_ADD_INS (bblock, ins);
6057 CHECK_STACK_OVF (1);
6058 n = (*ip)-CEE_LDARG_0;
6060 EMIT_NEW_ARGLOAD (cfg, ins, n);
6068 CHECK_STACK_OVF (1);
6069 n = (*ip)-CEE_LDLOC_0;
6071 EMIT_NEW_LOCLOAD (cfg, ins, n);
6080 n = (*ip)-CEE_STLOC_0;
6083 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
6085 emit_stloc_ir (cfg, sp, header, n);
6092 CHECK_STACK_OVF (1);
6095 EMIT_NEW_ARGLOAD (cfg, ins, n);
6101 CHECK_STACK_OVF (1);
6104 NEW_ARGLOADA (cfg, ins, n);
6105 MONO_ADD_INS (cfg->cbb, ins);
6115 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
6117 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
6122 CHECK_STACK_OVF (1);
6125 EMIT_NEW_LOCLOAD (cfg, ins, n);
6129 case CEE_LDLOCA_S: {
6130 unsigned char *tmp_ip;
6132 CHECK_STACK_OVF (1);
6133 CHECK_LOCAL (ip [1]);
6135 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
6141 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
6150 CHECK_LOCAL (ip [1]);
6151 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
6153 emit_stloc_ir (cfg, sp, header, ip [1]);
6158 CHECK_STACK_OVF (1);
6159 EMIT_NEW_PCONST (cfg, ins, NULL);
6160 ins->type = STACK_OBJ;
6165 CHECK_STACK_OVF (1);
6166 EMIT_NEW_ICONST (cfg, ins, -1);
6179 CHECK_STACK_OVF (1);
6180 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
6186 CHECK_STACK_OVF (1);
6188 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
6194 CHECK_STACK_OVF (1);
6195 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
6201 CHECK_STACK_OVF (1);
6202 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6203 ins->type = STACK_I8;
6204 ins->dreg = alloc_dreg (cfg, STACK_I8);
6206 ins->inst_l = (gint64)read64 (ip);
6207 MONO_ADD_INS (bblock, ins);
6213 gboolean use_aotconst = FALSE;
6215 #ifdef TARGET_POWERPC
6216 /* FIXME: Clean this up */
6217 if (cfg->compile_aot)
6218 use_aotconst = TRUE;
6221 /* FIXME: we should really allocate this only late in the compilation process */
6222 f = mono_domain_alloc (cfg->domain, sizeof (float));
6224 CHECK_STACK_OVF (1);
6230 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
6232 dreg = alloc_freg (cfg);
6233 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
6234 ins->type = STACK_R8;
6236 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6237 ins->type = STACK_R8;
6238 ins->dreg = alloc_dreg (cfg, STACK_R8);
6240 MONO_ADD_INS (bblock, ins);
6250 gboolean use_aotconst = FALSE;
6252 #ifdef TARGET_POWERPC
6253 /* FIXME: Clean this up */
6254 if (cfg->compile_aot)
6255 use_aotconst = TRUE;
6258 /* FIXME: we should really allocate this only late in the compilation process */
6259 d = mono_domain_alloc (cfg->domain, sizeof (double));
6261 CHECK_STACK_OVF (1);
6267 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
6269 dreg = alloc_freg (cfg);
6270 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
6271 ins->type = STACK_R8;
6273 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6274 ins->type = STACK_R8;
6275 ins->dreg = alloc_dreg (cfg, STACK_R8);
6277 MONO_ADD_INS (bblock, ins);
6286 MonoInst *temp, *store;
6288 CHECK_STACK_OVF (1);
6292 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
6293 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
6295 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6298 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6311 if (sp [0]->type == STACK_R8)
6312 /* we need to pop the value from the x86 FP stack */
6313 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
6322 if (stack_start != sp)
6324 token = read32 (ip + 1);
6325 /* FIXME: check the signature matches */
6326 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6328 if (!cmethod || mono_loader_get_last_error ())
6331 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
6332 GENERIC_SHARING_FAILURE (CEE_JMP);
6334 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
6335 CHECK_CFG_EXCEPTION;
6337 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6339 MonoMethodSignature *fsig = mono_method_signature (cmethod);
6342 /* Handle tail calls similarly to calls */
6343 n = fsig->param_count + fsig->hasthis;
6345 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
6346 call->method = cmethod;
6347 call->tail_call = TRUE;
6348 call->signature = mono_method_signature (cmethod);
6349 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
6350 call->inst.inst_p0 = cmethod;
6351 for (i = 0; i < n; ++i)
6352 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
6354 mono_arch_emit_call (cfg, call);
6355 MONO_ADD_INS (bblock, (MonoInst*)call);
6358 for (i = 0; i < num_args; ++i)
6359 /* Prevent arguments from being optimized away */
6360 arg_array [i]->flags |= MONO_INST_VOLATILE;
6362 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6363 ins = (MonoInst*)call;
6364 ins->inst_p0 = cmethod;
6365 MONO_ADD_INS (bblock, ins);
6369 start_new_bblock = 1;
6374 case CEE_CALLVIRT: {
6375 MonoInst *addr = NULL;
6376 MonoMethodSignature *fsig = NULL;
6378 int virtual = *ip == CEE_CALLVIRT;
6379 int calli = *ip == CEE_CALLI;
6380 gboolean pass_imt_from_rgctx = FALSE;
6381 MonoInst *imt_arg = NULL;
6382 gboolean pass_vtable = FALSE;
6383 gboolean pass_mrgctx = FALSE;
6384 MonoInst *vtable_arg = NULL;
6385 gboolean check_this = FALSE;
6386 gboolean supported_tail_call = FALSE;
6389 token = read32 (ip + 1);
6396 if (method->wrapper_type != MONO_WRAPPER_NONE)
6397 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6399 fsig = mono_metadata_parse_signature (image, token);
6401 n = fsig->param_count + fsig->hasthis;
6403 if (method->dynamic && fsig->pinvoke) {
6407 * This is a call through a function pointer using a pinvoke
6408 * signature. Have to create a wrapper and call that instead.
6409 * FIXME: This is very slow, need to create a wrapper at JIT time
6410 * instead based on the signature.
6412 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
6413 EMIT_NEW_PCONST (cfg, args [1], fsig);
6415 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
6418 MonoMethod *cil_method;
6420 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6421 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6422 cil_method = cmethod;
6423 } else if (constrained_call) {
6424 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6426 * This is needed since get_method_constrained can't find
6427 * the method in klass representing a type var.
6428 * The type var is guaranteed to be a reference type in this
6431 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6432 cil_method = cmethod;
6433 g_assert (!cmethod->klass->valuetype);
6435 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6438 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6439 cil_method = cmethod;
6442 if (!cmethod || mono_loader_get_last_error ())
6444 if (!dont_verify && !cfg->skip_visibility) {
6445 MonoMethod *target_method = cil_method;
6446 if (method->is_inflated) {
6447 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6449 if (!mono_method_can_access_method (method_definition, target_method) &&
6450 !mono_method_can_access_method (method, cil_method))
6451 METHOD_ACCESS_FAILURE;
6454 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6455 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6457 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6458 /* MS.NET seems to silently convert this to a callvirt */
6463 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
6464 * converts to a callvirt.
6466 * tests/bug-515884.il is an example of this behavior
6468 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
6469 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
6470 if (!virtual && cmethod->klass->marshalbyref && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
6474 if (!cmethod->klass->inited)
6475 if (!mono_class_init (cmethod->klass))
6478 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6479 mini_class_is_system_array (cmethod->klass)) {
6480 array_rank = cmethod->klass->rank;
6481 fsig = mono_method_signature (cmethod);
6483 fsig = mono_method_signature (cmethod);
6488 if (fsig->pinvoke) {
6489 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6490 check_for_pending_exc, FALSE);
6491 fsig = mono_method_signature (wrapper);
6492 } else if (constrained_call) {
6493 fsig = mono_method_signature (cmethod);
6495 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6499 mono_save_token_info (cfg, image, token, cil_method);
6501 n = fsig->param_count + fsig->hasthis;
6503 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6504 if (check_linkdemand (cfg, method, cmethod))
6506 CHECK_CFG_EXCEPTION;
6509 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6510 g_assert_not_reached ();
6513 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6516 if (!cfg->generic_sharing_context && cmethod)
6517 g_assert (!mono_method_check_context_used (cmethod));
6521 //g_assert (!virtual || fsig->hasthis);
6525 if (constrained_call) {
6527 * We have the `constrained.' prefix opcode.
6529 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6531 * The type parameter is instantiated as a valuetype,
6532 * but that type doesn't override the method we're
6533 * calling, so we need to box `this'.
6535 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
6536 ins->klass = constrained_call;
6537 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call));
6538 CHECK_CFG_EXCEPTION;
6539 } else if (!constrained_call->valuetype) {
6540 int dreg = alloc_preg (cfg);
6543 * The type parameter is instantiated as a reference
6544 * type. We have a managed pointer on the stack, so
6545 * we need to dereference it here.
6547 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6548 ins->type = STACK_OBJ;
6550 } else if (cmethod->klass->valuetype)
6552 constrained_call = NULL;
6555 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6559 * If the callee is a shared method, then its static cctor
6560 * might not get called after the call was patched.
6562 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6563 emit_generic_class_init (cfg, cmethod->klass);
6564 CHECK_TYPELOAD (cmethod->klass);
6567 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6568 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6569 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6570 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6571 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6574 * Pass vtable iff target method might
6575 * be shared, which means that sharing
6576 * is enabled for its class and its
6577 * context is sharable (and it's not a
6580 if (sharing_enabled && context_sharable &&
6581 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6585 if (cmethod && mini_method_get_context (cmethod) &&
6586 mini_method_get_context (cmethod)->method_inst) {
6587 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6588 MonoGenericContext *context = mini_method_get_context (cmethod);
6589 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6591 g_assert (!pass_vtable);
6593 if (sharing_enabled && context_sharable)
6597 if (cfg->generic_sharing_context && cmethod) {
6598 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6600 context_used = mono_method_check_context_used (cmethod);
6602 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6603 /* Generic method interface
6604 calls are resolved via a
6605 helper function and don't
6607 if (!cmethod_context || !cmethod_context->method_inst)
6608 pass_imt_from_rgctx = TRUE;
6612 * If a shared method calls another
6613 * shared method then the caller must
6614 * have a generic sharing context
6615 * because the magic trampoline
6616 * requires it. FIXME: We shouldn't
6617 * have to force the vtable/mrgctx
6618 * variable here. Instead there
6619 * should be a flag in the cfg to
6620 * request a generic sharing context.
6623 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6624 mono_get_vtable_var (cfg);
6629 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6631 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6633 CHECK_TYPELOAD (cmethod->klass);
6634 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6639 g_assert (!vtable_arg);
6641 if (!cfg->compile_aot) {
6643 * emit_get_rgctx_method () calls mono_class_vtable () so check
6644 * for type load errors before.
6646 mono_class_setup_vtable (cmethod->klass);
6647 CHECK_TYPELOAD (cmethod->klass);
6650 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6652 /* !marshalbyref is needed to properly handle generic methods + remoting */
6653 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6654 MONO_METHOD_IS_FINAL (cmethod)) &&
6655 !cmethod->klass->marshalbyref) {
6662 if (pass_imt_from_rgctx) {
6663 g_assert (!pass_vtable);
6666 imt_arg = emit_get_rgctx_method (cfg, context_used,
6667 cmethod, MONO_RGCTX_INFO_METHOD);
6671 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
6673 /* Calling virtual generic methods */
6674 if (cmethod && virtual &&
6675 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6676 !(MONO_METHOD_IS_FINAL (cmethod) &&
6677 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6678 mono_method_signature (cmethod)->generic_param_count) {
6679 MonoInst *this_temp, *this_arg_temp, *store;
6680 MonoInst *iargs [4];
6682 g_assert (mono_method_signature (cmethod)->is_inflated);
6684 /* Prevent inlining of methods that contain indirect calls */
6687 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
6688 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt) {
6689 g_assert (!imt_arg);
6691 g_assert (cmethod->is_inflated);
6692 imt_arg = emit_get_rgctx_method (cfg, context_used,
6693 cmethod, MONO_RGCTX_INFO_METHOD);
6694 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6698 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6699 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6700 MONO_ADD_INS (bblock, store);
6702 /* FIXME: This should be a managed pointer */
6703 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6705 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6706 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6707 cmethod, MONO_RGCTX_INFO_METHOD);
6708 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6709 addr = mono_emit_jit_icall (cfg,
6710 mono_helper_compile_generic_method, iargs);
6712 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6714 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6717 if (!MONO_TYPE_IS_VOID (fsig->ret))
6718 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6720 CHECK_CFG_EXCEPTION;
6727 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6728 supported_tail_call = cmethod && MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
6730 supported_tail_call = cmethod && mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6734 /* FIXME: runtime generic context pointer for jumps? */
6735 /* FIXME: handle this for generic sharing eventually */
6736 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) && supported_tail_call) {
6739 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6742 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6743 /* Handle tail calls similarly to calls */
6744 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE);
6746 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6747 call->tail_call = TRUE;
6748 call->method = cmethod;
6749 call->signature = mono_method_signature (cmethod);
6752 * We implement tail calls by storing the actual arguments into the
6753 * argument variables, then emitting a CEE_JMP.
6755 for (i = 0; i < n; ++i) {
6756 /* Prevent argument from being register allocated */
6757 arg_array [i]->flags |= MONO_INST_VOLATILE;
6758 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6762 ins = (MonoInst*)call;
6763 ins->inst_p0 = cmethod;
6764 ins->inst_p1 = arg_array [0];
6765 MONO_ADD_INS (bblock, ins);
6766 link_bblock (cfg, bblock, end_bblock);
6767 start_new_bblock = 1;
6769 CHECK_CFG_EXCEPTION;
6771 /* skip CEE_RET as well */
6777 /* Conversion to a JIT intrinsic */
6778 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6780 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6781 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6786 CHECK_CFG_EXCEPTION;
6794 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6795 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6796 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
6797 !g_list_find (dont_inline, cmethod)) {
6799 gboolean allways = FALSE;
6801 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6802 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6803 /* Prevent inlining of methods that call wrappers */
6805 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6809 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6811 cfg->real_offset += 5;
6814 if (!MONO_TYPE_IS_VOID (fsig->ret))
6815 /* *sp is already set by inline_method */
6818 inline_costs += costs;
6824 inline_costs += 10 * num_calls++;
6826 /* Tail recursion elimination */
6827 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6828 gboolean has_vtargs = FALSE;
6831 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6834 /* keep it simple */
6835 for (i = fsig->param_count - 1; i >= 0; i--) {
6836 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6841 for (i = 0; i < n; ++i)
6842 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6843 MONO_INST_NEW (cfg, ins, OP_BR);
6844 MONO_ADD_INS (bblock, ins);
6845 tblock = start_bblock->out_bb [0];
6846 link_bblock (cfg, bblock, tblock);
6847 ins->inst_target_bb = tblock;
6848 start_new_bblock = 1;
6850 /* skip the CEE_RET, too */
6851 if (ip_in_bb (cfg, bblock, ip + 5))
6861 /* Generic sharing */
6862 /* FIXME: only do this for generic methods if
6863 they are not shared! */
6864 if (context_used && !imt_arg && !array_rank &&
6865 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6866 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6867 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6868 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6871 g_assert (cfg->generic_sharing_context && cmethod);
6875 * We are compiling a call to a
6876 * generic method from shared code,
6877 * which means that we have to look up
6878 * the method in the rgctx and do an
6881 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6884 /* Indirect calls */
6886 g_assert (!imt_arg);
6888 if (*ip == CEE_CALL)
6889 g_assert (context_used);
6890 else if (*ip == CEE_CALLI)
6891 g_assert (!vtable_arg);
6893 /* FIXME: what the hell is this??? */
6894 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6895 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6897 /* Prevent inlining of methods with indirect calls */
6902 int rgctx_reg = mono_alloc_preg (cfg);
6904 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6905 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6906 call = (MonoCallInst*)ins;
6907 set_rgctx_arg (cfg, call, rgctx_reg, vtable_arg);
6909 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6911 * Instead of emitting an indirect call, emit a direct call
6912 * with the contents of the aotconst as the patch info.
6914 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
6916 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6917 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
6920 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6923 if (!MONO_TYPE_IS_VOID (fsig->ret))
6924 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6926 CHECK_CFG_EXCEPTION;
6937 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6938 if (sp [fsig->param_count]->type == STACK_OBJ) {
6939 MonoInst *iargs [2];
6942 iargs [1] = sp [fsig->param_count];
6944 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6947 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6948 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6949 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6950 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6952 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6955 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6956 if (!cmethod->klass->element_class->valuetype && !readonly)
6957 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6958 CHECK_TYPELOAD (cmethod->klass);
6961 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6964 g_assert_not_reached ();
6967 CHECK_CFG_EXCEPTION;
6974 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6976 if (!MONO_TYPE_IS_VOID (fsig->ret))
6977 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6979 CHECK_CFG_EXCEPTION;
6989 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6991 } else if (imt_arg) {
6992 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6994 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6997 if (!MONO_TYPE_IS_VOID (fsig->ret))
6998 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7000 CHECK_CFG_EXCEPTION;
7007 if (cfg->method != method) {
7008 /* return from inlined method */
7010 * If in_count == 0, that means the ret is unreachable due to
7011 * being preceeded by a throw. In that case, inline_method () will
7012 * handle setting the return value
7013 * (test case: test_0_inline_throw ()).
7015 if (return_var && cfg->cbb->in_count) {
7019 //g_assert (returnvar != -1);
7020 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
7021 cfg->ret_var_set = TRUE;
7025 MonoType *ret_type = mono_method_signature (method)->ret;
7029 * Place a seq point here too even through the IL stack is not
7030 * empty, so a step over on
7033 * will work correctly.
7035 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
7036 MONO_ADD_INS (cfg->cbb, ins);
7039 g_assert (!return_var);
7042 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
7045 if (!cfg->vret_addr) {
7048 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
7050 EMIT_NEW_RETLOADA (cfg, ret_addr);
7052 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
7053 ins->klass = mono_class_from_mono_type (ret_type);
7056 #ifdef MONO_ARCH_SOFT_FLOAT
7057 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
7058 MonoInst *iargs [1];
7062 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
7063 mono_arch_emit_setret (cfg, method, conv);
7065 mono_arch_emit_setret (cfg, method, *sp);
7068 mono_arch_emit_setret (cfg, method, *sp);
7073 if (sp != stack_start)
7075 MONO_INST_NEW (cfg, ins, OP_BR);
7077 ins->inst_target_bb = end_bblock;
7078 MONO_ADD_INS (bblock, ins);
7079 link_bblock (cfg, bblock, end_bblock);
7080 start_new_bblock = 1;
7084 MONO_INST_NEW (cfg, ins, OP_BR);
7086 target = ip + 1 + (signed char)(*ip);
7088 GET_BBLOCK (cfg, tblock, target);
7089 link_bblock (cfg, bblock, tblock);
7090 ins->inst_target_bb = tblock;
7091 if (sp != stack_start) {
7092 handle_stack_args (cfg, stack_start, sp - stack_start);
7094 CHECK_UNVERIFIABLE (cfg);
7096 MONO_ADD_INS (bblock, ins);
7097 start_new_bblock = 1;
7098 inline_costs += BRANCH_COST;
7112 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
7114 target = ip + 1 + *(signed char*)ip;
7120 inline_costs += BRANCH_COST;
7124 MONO_INST_NEW (cfg, ins, OP_BR);
7127 target = ip + 4 + (gint32)read32(ip);
7129 GET_BBLOCK (cfg, tblock, target);
7130 link_bblock (cfg, bblock, tblock);
7131 ins->inst_target_bb = tblock;
7132 if (sp != stack_start) {
7133 handle_stack_args (cfg, stack_start, sp - stack_start);
7135 CHECK_UNVERIFIABLE (cfg);
7138 MONO_ADD_INS (bblock, ins);
7140 start_new_bblock = 1;
7141 inline_costs += BRANCH_COST;
7148 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
7149 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
7150 guint32 opsize = is_short ? 1 : 4;
7152 CHECK_OPSIZE (opsize);
7154 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
7157 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
7162 GET_BBLOCK (cfg, tblock, target);
7163 link_bblock (cfg, bblock, tblock);
7164 GET_BBLOCK (cfg, tblock, ip);
7165 link_bblock (cfg, bblock, tblock);
7167 if (sp != stack_start) {
7168 handle_stack_args (cfg, stack_start, sp - stack_start);
7169 CHECK_UNVERIFIABLE (cfg);
7172 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
7173 cmp->sreg1 = sp [0]->dreg;
7174 type_from_op (cmp, sp [0], NULL);
7177 #if SIZEOF_REGISTER == 4
7178 if (cmp->opcode == OP_LCOMPARE_IMM) {
7179 /* Convert it to OP_LCOMPARE */
7180 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7181 ins->type = STACK_I8;
7182 ins->dreg = alloc_dreg (cfg, STACK_I8);
7184 MONO_ADD_INS (bblock, ins);
7185 cmp->opcode = OP_LCOMPARE;
7186 cmp->sreg2 = ins->dreg;
7189 MONO_ADD_INS (bblock, cmp);
7191 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
7192 type_from_op (ins, sp [0], NULL);
7193 MONO_ADD_INS (bblock, ins);
7194 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
7195 GET_BBLOCK (cfg, tblock, target);
7196 ins->inst_true_bb = tblock;
7197 GET_BBLOCK (cfg, tblock, ip);
7198 ins->inst_false_bb = tblock;
7199 start_new_bblock = 2;
7202 inline_costs += BRANCH_COST;
7217 MONO_INST_NEW (cfg, ins, *ip);
7219 target = ip + 4 + (gint32)read32(ip);
7225 inline_costs += BRANCH_COST;
7229 MonoBasicBlock **targets;
7230 MonoBasicBlock *default_bblock;
7231 MonoJumpInfoBBTable *table;
7232 int offset_reg = alloc_preg (cfg);
7233 int target_reg = alloc_preg (cfg);
7234 int table_reg = alloc_preg (cfg);
7235 int sum_reg = alloc_preg (cfg);
7236 gboolean use_op_switch;
7240 n = read32 (ip + 1);
7243 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
7247 CHECK_OPSIZE (n * sizeof (guint32));
7248 target = ip + n * sizeof (guint32);
7250 GET_BBLOCK (cfg, default_bblock, target);
7251 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7253 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
7254 for (i = 0; i < n; ++i) {
7255 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
7256 targets [i] = tblock;
7257 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
7261 if (sp != stack_start) {
7263 * Link the current bb with the targets as well, so handle_stack_args
7264 * will set their in_stack correctly.
7266 link_bblock (cfg, bblock, default_bblock);
7267 for (i = 0; i < n; ++i)
7268 link_bblock (cfg, bblock, targets [i]);
7270 handle_stack_args (cfg, stack_start, sp - stack_start);
7272 CHECK_UNVERIFIABLE (cfg);
7275 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
7276 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
7279 for (i = 0; i < n; ++i)
7280 link_bblock (cfg, bblock, targets [i]);
7282 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
7283 table->table = targets;
7284 table->table_size = n;
7286 use_op_switch = FALSE;
7288 /* ARM implements SWITCH statements differently */
7289 /* FIXME: Make it use the generic implementation */
7290 if (!cfg->compile_aot)
7291 use_op_switch = TRUE;
7294 if (COMPILE_LLVM (cfg))
7295 use_op_switch = TRUE;
7297 cfg->cbb->has_jump_table = 1;
7299 if (use_op_switch) {
7300 MONO_INST_NEW (cfg, ins, OP_SWITCH);
7301 ins->sreg1 = src1->dreg;
7302 ins->inst_p0 = table;
7303 ins->inst_many_bb = targets;
7304 ins->klass = GUINT_TO_POINTER (n);
7305 MONO_ADD_INS (cfg->cbb, ins);
7307 if (sizeof (gpointer) == 8)
7308 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
7310 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
7312 #if SIZEOF_REGISTER == 8
7313 /* The upper word might not be zero, and we add it to a 64 bit address later */
7314 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
7317 if (cfg->compile_aot) {
7318 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
7320 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
7321 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
7322 ins->inst_p0 = table;
7323 ins->dreg = table_reg;
7324 MONO_ADD_INS (cfg->cbb, ins);
7327 /* FIXME: Use load_memindex */
7328 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
7329 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
7330 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
7332 start_new_bblock = 1;
7333 inline_costs += (BRANCH_COST * 2);
7353 dreg = alloc_freg (cfg);
7356 dreg = alloc_lreg (cfg);
7359 dreg = alloc_preg (cfg);
7362 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
7363 ins->type = ldind_type [*ip - CEE_LDIND_I1];
7364 ins->flags |= ins_flag;
7366 MONO_ADD_INS (bblock, ins);
7381 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
7382 ins->flags |= ins_flag;
7384 MONO_ADD_INS (bblock, ins);
7386 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
7387 emit_write_barrier (cfg, sp [0], sp [1], -1);
7396 MONO_INST_NEW (cfg, ins, (*ip));
7398 ins->sreg1 = sp [0]->dreg;
7399 ins->sreg2 = sp [1]->dreg;
7400 type_from_op (ins, sp [0], sp [1]);
7402 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7404 /* Use the immediate opcodes if possible */
7405 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
7406 int imm_opcode = mono_op_to_op_imm (ins->opcode);
7407 if (imm_opcode != -1) {
7408 ins->opcode = imm_opcode;
7409 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7412 sp [1]->opcode = OP_NOP;
7416 MONO_ADD_INS ((cfg)->cbb, (ins));
7418 *sp++ = mono_decompose_opcode (cfg, ins);
7435 MONO_INST_NEW (cfg, ins, (*ip));
7437 ins->sreg1 = sp [0]->dreg;
7438 ins->sreg2 = sp [1]->dreg;
7439 type_from_op (ins, sp [0], sp [1]);
7441 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7442 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7444 /* FIXME: Pass opcode to is_inst_imm */
7446 /* Use the immediate opcodes if possible */
7447 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7450 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7451 if (imm_opcode != -1) {
7452 ins->opcode = imm_opcode;
7453 if (sp [1]->opcode == OP_I8CONST) {
7454 #if SIZEOF_REGISTER == 8
7455 ins->inst_imm = sp [1]->inst_l;
7457 ins->inst_ls_word = sp [1]->inst_ls_word;
7458 ins->inst_ms_word = sp [1]->inst_ms_word;
7462 ins->inst_imm = (gssize)(sp [1]->inst_c0);
7465 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7466 if (sp [1]->next == NULL)
7467 sp [1]->opcode = OP_NOP;
7470 MONO_ADD_INS ((cfg)->cbb, (ins));
7472 *sp++ = mono_decompose_opcode (cfg, ins);
7485 case CEE_CONV_OVF_I8:
7486 case CEE_CONV_OVF_U8:
7490 /* Special case this earlier so we have long constants in the IR */
7491 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7492 int data = sp [-1]->inst_c0;
7493 sp [-1]->opcode = OP_I8CONST;
7494 sp [-1]->type = STACK_I8;
7495 #if SIZEOF_REGISTER == 8
7496 if ((*ip) == CEE_CONV_U8)
7497 sp [-1]->inst_c0 = (guint32)data;
7499 sp [-1]->inst_c0 = data;
7501 sp [-1]->inst_ls_word = data;
7502 if ((*ip) == CEE_CONV_U8)
7503 sp [-1]->inst_ms_word = 0;
7505 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7507 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7514 case CEE_CONV_OVF_I4:
7515 case CEE_CONV_OVF_I1:
7516 case CEE_CONV_OVF_I2:
7517 case CEE_CONV_OVF_I:
7518 case CEE_CONV_OVF_U:
7521 if (sp [-1]->type == STACK_R8) {
7522 ADD_UNOP (CEE_CONV_OVF_I8);
7529 case CEE_CONV_OVF_U1:
7530 case CEE_CONV_OVF_U2:
7531 case CEE_CONV_OVF_U4:
7534 if (sp [-1]->type == STACK_R8) {
7535 ADD_UNOP (CEE_CONV_OVF_U8);
7542 case CEE_CONV_OVF_I1_UN:
7543 case CEE_CONV_OVF_I2_UN:
7544 case CEE_CONV_OVF_I4_UN:
7545 case CEE_CONV_OVF_I8_UN:
7546 case CEE_CONV_OVF_U1_UN:
7547 case CEE_CONV_OVF_U2_UN:
7548 case CEE_CONV_OVF_U4_UN:
7549 case CEE_CONV_OVF_U8_UN:
7550 case CEE_CONV_OVF_I_UN:
7551 case CEE_CONV_OVF_U_UN:
7558 CHECK_CFG_EXCEPTION;
7562 case CEE_ADD_OVF_UN:
7564 case CEE_MUL_OVF_UN:
7566 case CEE_SUB_OVF_UN:
7574 token = read32 (ip + 1);
7575 klass = mini_get_class (method, token, generic_context);
7576 CHECK_TYPELOAD (klass);
7578 if (generic_class_is_reference_type (cfg, klass)) {
7579 MonoInst *store, *load;
7580 int dreg = alloc_preg (cfg);
7582 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7583 load->flags |= ins_flag;
7584 MONO_ADD_INS (cfg->cbb, load);
7586 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7587 store->flags |= ins_flag;
7588 MONO_ADD_INS (cfg->cbb, store);
7590 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
7591 emit_write_barrier (cfg, sp [0], sp [1], -1);
7593 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7605 token = read32 (ip + 1);
7606 klass = mini_get_class (method, token, generic_context);
7607 CHECK_TYPELOAD (klass);
7609 /* Optimize the common ldobj+stloc combination */
7619 loc_index = ip [5] - CEE_STLOC_0;
7626 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7627 CHECK_LOCAL (loc_index);
7629 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7630 ins->dreg = cfg->locals [loc_index]->dreg;
7636 /* Optimize the ldobj+stobj combination */
7637 /* The reference case ends up being a load+store anyway */
7638 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7643 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7650 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7659 CHECK_STACK_OVF (1);
7661 n = read32 (ip + 1);
7663 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7664 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7665 ins->type = STACK_OBJ;
7668 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7669 MonoInst *iargs [1];
7671 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7672 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7674 if (cfg->opt & MONO_OPT_SHARED) {
7675 MonoInst *iargs [3];
7677 if (cfg->compile_aot) {
7678 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7680 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7681 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7682 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7683 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7684 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7686 if (bblock->out_of_line) {
7687 MonoInst *iargs [2];
7689 if (image == mono_defaults.corlib) {
7691 * Avoid relocations in AOT and save some space by using a
7692 * version of helper_ldstr specialized to mscorlib.
7694 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7695 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7697 /* Avoid creating the string object */
7698 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7699 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7700 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7704 if (cfg->compile_aot) {
7705 NEW_LDSTRCONST (cfg, ins, image, n);
7707 MONO_ADD_INS (bblock, ins);
7710 NEW_PCONST (cfg, ins, NULL);
7711 ins->type = STACK_OBJ;
7712 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7714 MONO_ADD_INS (bblock, ins);
7723 MonoInst *iargs [2];
7724 MonoMethodSignature *fsig;
7727 MonoInst *vtable_arg = NULL;
7730 token = read32 (ip + 1);
7731 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7732 if (!cmethod || mono_loader_get_last_error ())
7734 fsig = mono_method_get_signature (cmethod, image, token);
7738 mono_save_token_info (cfg, image, token, cmethod);
7740 if (!mono_class_init (cmethod->klass))
7743 if (cfg->generic_sharing_context)
7744 context_used = mono_method_check_context_used (cmethod);
7746 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7747 if (check_linkdemand (cfg, method, cmethod))
7749 CHECK_CFG_EXCEPTION;
7750 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7751 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7754 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
7755 emit_generic_class_init (cfg, cmethod->klass);
7756 CHECK_TYPELOAD (cmethod->klass);
7759 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7760 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7761 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7762 mono_class_vtable (cfg->domain, cmethod->klass);
7763 CHECK_TYPELOAD (cmethod->klass);
7765 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7766 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7769 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7770 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7772 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7774 CHECK_TYPELOAD (cmethod->klass);
7775 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7780 n = fsig->param_count;
7784 * Generate smaller code for the common newobj <exception> instruction in
7785 * argument checking code.
7787 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7788 is_exception_class (cmethod->klass) && n <= 2 &&
7789 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7790 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7791 MonoInst *iargs [3];
7793 g_assert (!vtable_arg);
7797 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7800 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7804 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7809 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7812 g_assert_not_reached ();
7820 /* move the args to allow room for 'this' in the first position */
7826 /* check_call_signature () requires sp[0] to be set */
7827 this_ins.type = STACK_OBJ;
7829 if (check_call_signature (cfg, fsig, sp))
7834 if (mini_class_is_system_array (cmethod->klass)) {
7835 g_assert (!vtable_arg);
7837 *sp = emit_get_rgctx_method (cfg, context_used,
7838 cmethod, MONO_RGCTX_INFO_METHOD);
7840 /* Avoid varargs in the common case */
7841 if (fsig->param_count == 1)
7842 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7843 else if (fsig->param_count == 2)
7844 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7845 else if (fsig->param_count == 3)
7846 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
7848 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7849 } else if (cmethod->string_ctor) {
7850 g_assert (!context_used);
7851 g_assert (!vtable_arg);
7852 /* we simply pass a null pointer */
7853 EMIT_NEW_PCONST (cfg, *sp, NULL);
7854 /* now call the string ctor */
7855 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7857 MonoInst* callvirt_this_arg = NULL;
7859 if (cmethod->klass->valuetype) {
7860 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7861 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7862 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7867 * The code generated by mini_emit_virtual_call () expects
7868 * iargs [0] to be a boxed instance, but luckily the vcall
7869 * will be transformed into a normal call there.
7871 } else if (context_used) {
7872 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
7875 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7877 CHECK_TYPELOAD (cmethod->klass);
7880 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7881 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7882 * As a workaround, we call class cctors before allocating objects.
7884 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7885 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7886 if (cfg->verbose_level > 2)
7887 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7888 class_inits = g_slist_prepend (class_inits, vtable);
7891 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
7894 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
7897 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7899 /* Now call the actual ctor */
7900 /* Avoid virtual calls to ctors if possible */
7901 if (cmethod->klass->marshalbyref)
7902 callvirt_this_arg = sp [0];
7905 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7906 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7907 type_to_eval_stack_type ((cfg), fsig->ret, ins);
7912 CHECK_CFG_EXCEPTION;
7913 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7914 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
7915 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7916 !g_list_find (dont_inline, cmethod)) {
7919 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7920 cfg->real_offset += 5;
7923 inline_costs += costs - 5;
7926 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7928 } else if (context_used &&
7929 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7930 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7931 MonoInst *cmethod_addr;
7933 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7934 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7936 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7939 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7940 callvirt_this_arg, NULL, vtable_arg);
7944 if (alloc == NULL) {
7946 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7947 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7961 token = read32 (ip + 1);
7962 klass = mini_get_class (method, token, generic_context);
7963 CHECK_TYPELOAD (klass);
7964 if (sp [0]->type != STACK_OBJ)
7967 if (cfg->generic_sharing_context)
7968 context_used = mono_class_check_context_used (klass);
7970 if (!context_used && mini_class_has_reference_variant_generic_argument (klass, context_used)) {
7971 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
7978 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7981 /*FIXME AOT support*/
7982 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
7984 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
7985 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
7988 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7989 MonoMethod *mono_castclass;
7990 MonoInst *iargs [1];
7993 mono_castclass = mono_marshal_get_castclass (klass);
7996 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7997 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7998 g_assert (costs > 0);
8001 cfg->real_offset += 5;
8006 inline_costs += costs;
8009 ins = handle_castclass (cfg, klass, *sp, context_used);
8010 CHECK_CFG_EXCEPTION;
8020 token = read32 (ip + 1);
8021 klass = mini_get_class (method, token, generic_context);
8022 CHECK_TYPELOAD (klass);
8023 if (sp [0]->type != STACK_OBJ)
8026 if (cfg->generic_sharing_context)
8027 context_used = mono_class_check_context_used (klass);
8029 if (!context_used && mini_class_has_reference_variant_generic_argument (klass, context_used)) {
8030 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
8037 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8040 /*FIXME AOT support*/
8041 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8043 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
8046 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8047 MonoMethod *mono_isinst;
8048 MonoInst *iargs [1];
8051 mono_isinst = mono_marshal_get_isinst (klass);
8054 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
8055 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8056 g_assert (costs > 0);
8059 cfg->real_offset += 5;
8064 inline_costs += costs;
8067 ins = handle_isinst (cfg, klass, *sp, context_used);
8068 CHECK_CFG_EXCEPTION;
8075 case CEE_UNBOX_ANY: {
8079 token = read32 (ip + 1);
8080 klass = mini_get_class (method, token, generic_context);
8081 CHECK_TYPELOAD (klass);
8083 mono_save_token_info (cfg, image, token, klass);
8085 if (cfg->generic_sharing_context)
8086 context_used = mono_class_check_context_used (klass);
8088 if (generic_class_is_reference_type (cfg, klass)) {
8089 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
8090 if (!context_used && mini_class_has_reference_variant_generic_argument (klass, context_used)) {
8091 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
8098 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8101 /*FIXME AOT support*/
8102 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8104 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
8105 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
8108 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8109 MonoMethod *mono_castclass;
8110 MonoInst *iargs [1];
8113 mono_castclass = mono_marshal_get_castclass (klass);
8116 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
8117 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8119 g_assert (costs > 0);
8122 cfg->real_offset += 5;
8126 inline_costs += costs;
8128 ins = handle_castclass (cfg, klass, *sp, context_used);
8129 CHECK_CFG_EXCEPTION;
8137 if (mono_class_is_nullable (klass)) {
8138 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
8145 ins = handle_unbox (cfg, klass, sp, context_used);
8151 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
8164 token = read32 (ip + 1);
8165 klass = mini_get_class (method, token, generic_context);
8166 CHECK_TYPELOAD (klass);
8168 mono_save_token_info (cfg, image, token, klass);
8170 if (cfg->generic_sharing_context)
8171 context_used = mono_class_check_context_used (klass);
8173 if (generic_class_is_reference_type (cfg, klass)) {
8179 if (klass == mono_defaults.void_class)
8181 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
8183 /* frequent check in generic code: box (struct), brtrue */
8185 // FIXME: LLVM can't handle the inconsistent bb linking
8186 if (!mono_class_is_nullable (klass) &&
8187 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
8188 (ip [5] == CEE_BRTRUE ||
8189 ip [5] == CEE_BRTRUE_S ||
8190 ip [5] == CEE_BRFALSE ||
8191 ip [5] == CEE_BRFALSE_S)) {
8192 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
8194 MonoBasicBlock *true_bb, *false_bb;
8198 if (cfg->verbose_level > 3) {
8199 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8200 printf ("<box+brtrue opt>\n");
8208 target = ip + 1 + (signed char)(*ip);
8215 target = ip + 4 + (gint)(read32 (ip));
8219 g_assert_not_reached ();
8223 * We need to link both bblocks, since it is needed for handling stack
8224 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
8225 * Branching to only one of them would lead to inconsistencies, so
8226 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
8228 GET_BBLOCK (cfg, true_bb, target);
8229 GET_BBLOCK (cfg, false_bb, ip);
8231 mono_link_bblock (cfg, cfg->cbb, true_bb);
8232 mono_link_bblock (cfg, cfg->cbb, false_bb);
8234 if (sp != stack_start) {
8235 handle_stack_args (cfg, stack_start, sp - stack_start);
8237 CHECK_UNVERIFIABLE (cfg);
8240 if (COMPILE_LLVM (cfg)) {
8241 dreg = alloc_ireg (cfg);
8242 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
8243 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
8245 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
8247 /* The JIT can't eliminate the iconst+compare */
8248 MONO_INST_NEW (cfg, ins, OP_BR);
8249 ins->inst_target_bb = is_true ? true_bb : false_bb;
8250 MONO_ADD_INS (cfg->cbb, ins);
8253 start_new_bblock = 1;
8257 *sp++ = handle_box (cfg, val, klass, context_used);
8259 CHECK_CFG_EXCEPTION;
8268 token = read32 (ip + 1);
8269 klass = mini_get_class (method, token, generic_context);
8270 CHECK_TYPELOAD (klass);
8272 mono_save_token_info (cfg, image, token, klass);
8274 if (cfg->generic_sharing_context)
8275 context_used = mono_class_check_context_used (klass);
8277 if (mono_class_is_nullable (klass)) {
8280 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
8281 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
8285 ins = handle_unbox (cfg, klass, sp, context_used);
8295 MonoClassField *field;
8299 if (*ip == CEE_STFLD) {
8306 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
8308 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
8311 token = read32 (ip + 1);
8312 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8313 field = mono_method_get_wrapper_data (method, token);
8314 klass = field->parent;
8317 field = mono_field_from_token (image, token, &klass, generic_context);
8321 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8322 FIELD_ACCESS_FAILURE;
8323 mono_class_init (klass);
8325 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
8326 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
8327 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8328 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8331 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
8332 if (*ip == CEE_STFLD) {
8333 if (target_type_is_incompatible (cfg, field->type, sp [1]))
8335 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8336 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
8337 MonoInst *iargs [5];
8340 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8341 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8342 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
8346 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8347 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
8348 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8349 g_assert (costs > 0);
8351 cfg->real_offset += 5;
8354 inline_costs += costs;
8356 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
8361 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8363 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
8364 if (sp [0]->opcode != OP_LDADDR)
8365 store->flags |= MONO_INST_FAULT;
8367 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
8368 /* insert call to write barrier */
8372 dreg = alloc_preg (cfg);
8373 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8374 emit_write_barrier (cfg, ptr, sp [1], -1);
8377 store->flags |= ins_flag;
8384 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8385 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
8386 MonoInst *iargs [4];
8389 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8390 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8391 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
8392 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8393 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
8394 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8396 g_assert (costs > 0);
8398 cfg->real_offset += 5;
8402 inline_costs += costs;
8404 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
8408 if (sp [0]->type == STACK_VTYPE) {
8411 /* Have to compute the address of the variable */
8413 var = get_vreg_to_inst (cfg, sp [0]->dreg);
8415 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
8417 g_assert (var->klass == klass);
8419 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
8423 if (*ip == CEE_LDFLDA) {
8424 if (sp [0]->type == STACK_OBJ) {
8425 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
8426 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
8429 dreg = alloc_preg (cfg);
8431 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8432 ins->klass = mono_class_from_mono_type (field->type);
8433 ins->type = STACK_MP;
8438 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8440 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
8441 load->flags |= ins_flag;
8442 if (sp [0]->opcode != OP_LDADDR)
8443 load->flags |= MONO_INST_FAULT;
8454 MonoClassField *field;
8455 gpointer addr = NULL;
8456 gboolean is_special_static;
8459 token = read32 (ip + 1);
8461 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8462 field = mono_method_get_wrapper_data (method, token);
8463 klass = field->parent;
8466 field = mono_field_from_token (image, token, &klass, generic_context);
8469 mono_class_init (klass);
8470 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8471 FIELD_ACCESS_FAILURE;
8473 /* if the class is Critical then transparent code cannot access it's fields */
8474 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8475 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8478 * We can only support shared generic static
8479 * field access on architectures where the
8480 * trampoline code has been extended to handle
8481 * the generic class init.
8483 #ifndef MONO_ARCH_VTABLE_REG
8484 GENERIC_SHARING_FAILURE (*ip);
8487 if (cfg->generic_sharing_context)
8488 context_used = mono_class_check_context_used (klass);
8490 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
8492 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8493 * to be called here.
8495 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8496 mono_class_vtable (cfg->domain, klass);
8497 CHECK_TYPELOAD (klass);
8499 mono_domain_lock (cfg->domain);
8500 if (cfg->domain->special_static_fields)
8501 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8502 mono_domain_unlock (cfg->domain);
8504 is_special_static = mono_class_field_is_special_static (field);
8506 /* Generate IR to compute the field address */
8507 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
8509 * Fast access to TLS data
8510 * Inline version of get_thread_static_data () in
8514 int idx, static_data_reg, array_reg, dreg;
8515 MonoInst *thread_ins;
8517 // offset &= 0x7fffffff;
8518 // idx = (offset >> 24) - 1;
8519 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
8521 thread_ins = mono_get_thread_intrinsic (cfg);
8522 MONO_ADD_INS (cfg->cbb, thread_ins);
8523 static_data_reg = alloc_ireg (cfg);
8524 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
8526 if (cfg->compile_aot) {
8527 int offset_reg, offset2_reg, idx_reg;
8529 /* For TLS variables, this will return the TLS offset */
8530 EMIT_NEW_SFLDACONST (cfg, ins, field);
8531 offset_reg = ins->dreg;
8532 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
8533 idx_reg = alloc_ireg (cfg);
8534 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
8535 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
8536 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
8537 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
8538 array_reg = alloc_ireg (cfg);
8539 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
8540 offset2_reg = alloc_ireg (cfg);
8541 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
8542 dreg = alloc_ireg (cfg);
8543 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
8545 offset = (gsize)addr & 0x7fffffff;
8546 idx = (offset >> 24) - 1;
8548 array_reg = alloc_ireg (cfg);
8549 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
8550 dreg = alloc_ireg (cfg);
8551 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
8553 } else if ((cfg->opt & MONO_OPT_SHARED) ||
8554 (cfg->compile_aot && is_special_static) ||
8555 (context_used && is_special_static)) {
8556 MonoInst *iargs [2];
8558 g_assert (field->parent);
8559 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8561 iargs [1] = emit_get_rgctx_field (cfg, context_used,
8562 field, MONO_RGCTX_INFO_CLASS_FIELD);
8564 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8566 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8567 } else if (context_used) {
8568 MonoInst *static_data;
8571 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8572 method->klass->name_space, method->klass->name, method->name,
8573 depth, field->offset);
8576 if (mono_class_needs_cctor_run (klass, method))
8577 emit_generic_class_init (cfg, klass);
8580 * The pointer we're computing here is
8582 * super_info.static_data + field->offset
8584 static_data = emit_get_rgctx_klass (cfg, context_used,
8585 klass, MONO_RGCTX_INFO_STATIC_DATA);
8587 if (field->offset == 0) {
8590 int addr_reg = mono_alloc_preg (cfg);
8591 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8593 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8594 MonoInst *iargs [2];
8596 g_assert (field->parent);
8597 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8598 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8599 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8601 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8603 CHECK_TYPELOAD (klass);
8605 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8606 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8607 if (cfg->verbose_level > 2)
8608 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8609 class_inits = g_slist_prepend (class_inits, vtable);
8611 if (cfg->run_cctors) {
8613 /* This makes so that inline cannot trigger */
8614 /* .cctors: too many apps depend on them */
8615 /* running with a specific order... */
8616 if (! vtable->initialized)
8618 ex = mono_runtime_class_init_full (vtable, FALSE);
8620 set_exception_object (cfg, ex);
8621 goto exception_exit;
8625 addr = (char*)vtable->data + field->offset;
8627 if (cfg->compile_aot)
8628 EMIT_NEW_SFLDACONST (cfg, ins, field);
8630 EMIT_NEW_PCONST (cfg, ins, addr);
8632 MonoInst *iargs [1];
8633 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8634 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8638 /* Generate IR to do the actual load/store operation */
8640 if (*ip == CEE_LDSFLDA) {
8641 ins->klass = mono_class_from_mono_type (field->type);
8642 ins->type = STACK_PTR;
8644 } else if (*ip == CEE_STSFLD) {
8649 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
8650 store->flags |= ins_flag;
8652 gboolean is_const = FALSE;
8653 MonoVTable *vtable = NULL;
8655 if (!context_used) {
8656 vtable = mono_class_vtable (cfg->domain, klass);
8657 CHECK_TYPELOAD (klass);
8659 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8660 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8661 gpointer addr = (char*)vtable->data + field->offset;
8662 int ro_type = field->type->type;
8663 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
8664 ro_type = mono_class_enum_basetype (field->type->data.klass)->type;
8666 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8669 case MONO_TYPE_BOOLEAN:
8671 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8675 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8678 case MONO_TYPE_CHAR:
8680 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8684 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8689 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8693 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8699 case MONO_TYPE_FNPTR:
8700 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8701 type_to_eval_stack_type ((cfg), field->type, *sp);
8704 case MONO_TYPE_STRING:
8705 case MONO_TYPE_OBJECT:
8706 case MONO_TYPE_CLASS:
8707 case MONO_TYPE_SZARRAY:
8708 case MONO_TYPE_ARRAY:
8709 if (!mono_gc_is_moving ()) {
8710 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8711 type_to_eval_stack_type ((cfg), field->type, *sp);
8719 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8724 case MONO_TYPE_VALUETYPE:
8734 CHECK_STACK_OVF (1);
8736 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8737 load->flags |= ins_flag;
8750 token = read32 (ip + 1);
8751 klass = mini_get_class (method, token, generic_context);
8752 CHECK_TYPELOAD (klass);
8753 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8754 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8755 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
8756 generic_class_is_reference_type (cfg, klass)) {
8757 /* insert call to write barrier */
8758 emit_write_barrier (cfg, sp [0], sp [1], -1);
8770 const char *data_ptr;
8772 guint32 field_token;
8778 token = read32 (ip + 1);
8780 klass = mini_get_class (method, token, generic_context);
8781 CHECK_TYPELOAD (klass);
8783 if (cfg->generic_sharing_context)
8784 context_used = mono_class_check_context_used (klass);
8786 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
8787 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
8788 ins->sreg1 = sp [0]->dreg;
8789 ins->type = STACK_I4;
8790 ins->dreg = alloc_ireg (cfg);
8791 MONO_ADD_INS (cfg->cbb, ins);
8792 *sp = mono_decompose_opcode (cfg, ins);
8797 MonoClass *array_class = mono_array_class_get (klass, 1);
8798 /* FIXME: we cannot get a managed
8799 allocator because we can't get the
8800 open generic class's vtable. We
8801 have the same problem in
8802 handle_alloc(). This
8803 needs to be solved so that we can
8804 have managed allocs of shared
8807 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
8808 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
8810 MonoMethod *managed_alloc = NULL;
8812 /* FIXME: Decompose later to help abcrem */
8815 args [0] = emit_get_rgctx_klass (cfg, context_used,
8816 array_class, MONO_RGCTX_INFO_VTABLE);
8821 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
8823 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8825 if (cfg->opt & MONO_OPT_SHARED) {
8826 /* Decompose now to avoid problems with references to the domainvar */
8827 MonoInst *iargs [3];
8829 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8830 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8833 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8835 /* Decompose later since it is needed by abcrem */
8836 MonoClass *array_type = mono_array_class_get (klass, 1);
8837 mono_class_vtable (cfg->domain, array_type);
8838 CHECK_TYPELOAD (array_type);
8840 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8841 ins->dreg = alloc_preg (cfg);
8842 ins->sreg1 = sp [0]->dreg;
8843 ins->inst_newa_class = klass;
8844 ins->type = STACK_OBJ;
8846 MONO_ADD_INS (cfg->cbb, ins);
8847 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8848 cfg->cbb->has_array_access = TRUE;
8850 /* Needed so mono_emit_load_get_addr () gets called */
8851 mono_get_got_var (cfg);
8861 * we inline/optimize the initialization sequence if possible.
8862 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8863 * for small sizes open code the memcpy
8864 * ensure the rva field is big enough
8866 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8867 MonoMethod *memcpy_method = get_memcpy_method ();
8868 MonoInst *iargs [3];
8869 int add_reg = alloc_preg (cfg);
8871 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8872 if (cfg->compile_aot) {
8873 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8875 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8877 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8878 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8887 if (sp [0]->type != STACK_OBJ)
8890 dreg = alloc_preg (cfg);
8891 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8892 ins->dreg = alloc_preg (cfg);
8893 ins->sreg1 = sp [0]->dreg;
8894 ins->type = STACK_I4;
8895 /* This flag will be inherited by the decomposition */
8896 ins->flags |= MONO_INST_FAULT;
8897 MONO_ADD_INS (cfg->cbb, ins);
8898 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8899 cfg->cbb->has_array_access = TRUE;
8907 if (sp [0]->type != STACK_OBJ)
8910 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8912 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8913 CHECK_TYPELOAD (klass);
8914 /* we need to make sure that this array is exactly the type it needs
8915 * to be for correctness. the wrappers are lax with their usage
8916 * so we need to ignore them here
8918 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
8919 MonoClass *array_class = mono_array_class_get (klass, 1);
8920 mini_emit_check_array_type (cfg, sp [0], array_class);
8921 CHECK_TYPELOAD (array_class);
8925 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8940 case CEE_LDELEM_REF: {
8946 if (*ip == CEE_LDELEM) {
8948 token = read32 (ip + 1);
8949 klass = mini_get_class (method, token, generic_context);
8950 CHECK_TYPELOAD (klass);
8951 mono_class_init (klass);
8954 klass = array_access_to_klass (*ip);
8956 if (sp [0]->type != STACK_OBJ)
8959 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8961 if (sp [1]->opcode == OP_ICONST) {
8962 int array_reg = sp [0]->dreg;
8963 int index_reg = sp [1]->dreg;
8964 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8966 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8967 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8969 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8970 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8973 if (*ip == CEE_LDELEM)
8986 case CEE_STELEM_REF:
8993 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8995 if (*ip == CEE_STELEM) {
8997 token = read32 (ip + 1);
8998 klass = mini_get_class (method, token, generic_context);
8999 CHECK_TYPELOAD (klass);
9000 mono_class_init (klass);
9003 klass = array_access_to_klass (*ip);
9005 if (sp [0]->type != STACK_OBJ)
9008 /* storing a NULL doesn't need any of the complex checks in stelemref */
9009 if (generic_class_is_reference_type (cfg, klass) &&
9010 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
9011 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
9012 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
9013 MonoInst *iargs [3];
9016 mono_class_setup_vtable (obj_array);
9017 g_assert (helper->slot);
9019 if (sp [0]->type != STACK_OBJ)
9021 if (sp [2]->type != STACK_OBJ)
9028 mono_emit_method_call (cfg, helper, iargs, sp [0]);
9030 if (sp [1]->opcode == OP_ICONST) {
9031 int array_reg = sp [0]->dreg;
9032 int index_reg = sp [1]->dreg;
9033 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
9035 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
9036 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
9038 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9039 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
9043 if (*ip == CEE_STELEM)
9050 case CEE_CKFINITE: {
9054 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
9055 ins->sreg1 = sp [0]->dreg;
9056 ins->dreg = alloc_freg (cfg);
9057 ins->type = STACK_R8;
9058 MONO_ADD_INS (bblock, ins);
9060 *sp++ = mono_decompose_opcode (cfg, ins);
9065 case CEE_REFANYVAL: {
9066 MonoInst *src_var, *src;
9068 int klass_reg = alloc_preg (cfg);
9069 int dreg = alloc_preg (cfg);
9072 MONO_INST_NEW (cfg, ins, *ip);
9075 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
9076 CHECK_TYPELOAD (klass);
9077 mono_class_init (klass);
9079 if (cfg->generic_sharing_context)
9080 context_used = mono_class_check_context_used (klass);
9083 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9085 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9086 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9087 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
9090 MonoInst *klass_ins;
9092 klass_ins = emit_get_rgctx_klass (cfg, context_used,
9093 klass, MONO_RGCTX_INFO_KLASS);
9096 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
9097 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
9099 mini_emit_class_check (cfg, klass_reg, klass);
9101 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
9102 ins->type = STACK_MP;
9107 case CEE_MKREFANY: {
9108 MonoInst *loc, *addr;
9111 MONO_INST_NEW (cfg, ins, *ip);
9114 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
9115 CHECK_TYPELOAD (klass);
9116 mono_class_init (klass);
9118 if (cfg->generic_sharing_context)
9119 context_used = mono_class_check_context_used (klass);
9121 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
9122 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
9125 MonoInst *const_ins;
9126 int type_reg = alloc_preg (cfg);
9128 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
9129 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
9130 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
9131 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
9132 } else if (cfg->compile_aot) {
9133 int const_reg = alloc_preg (cfg);
9134 int type_reg = alloc_preg (cfg);
9136 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
9137 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
9138 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
9139 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
9141 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
9142 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
9144 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
9146 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
9147 ins->type = STACK_VTYPE;
9148 ins->klass = mono_defaults.typed_reference_class;
9155 MonoClass *handle_class;
9157 CHECK_STACK_OVF (1);
9160 n = read32 (ip + 1);
9162 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
9163 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9164 handle = mono_method_get_wrapper_data (method, n);
9165 handle_class = mono_method_get_wrapper_data (method, n + 1);
9166 if (handle_class == mono_defaults.typehandle_class)
9167 handle = &((MonoClass*)handle)->byval_arg;
9170 handle = mono_ldtoken (image, n, &handle_class, generic_context);
9174 mono_class_init (handle_class);
9175 if (cfg->generic_sharing_context) {
9176 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
9177 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
9178 /* This case handles ldtoken
9179 of an open type, like for
9182 } else if (handle_class == mono_defaults.typehandle_class) {
9183 /* If we get a MONO_TYPE_CLASS
9184 then we need to provide the
9186 instantiation of it. */
9187 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
9190 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
9191 } else if (handle_class == mono_defaults.fieldhandle_class)
9192 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
9193 else if (handle_class == mono_defaults.methodhandle_class)
9194 context_used = mono_method_check_context_used (handle);
9196 g_assert_not_reached ();
9199 if ((cfg->opt & MONO_OPT_SHARED) &&
9200 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
9201 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
9202 MonoInst *addr, *vtvar, *iargs [3];
9203 int method_context_used;
9205 if (cfg->generic_sharing_context)
9206 method_context_used = mono_method_check_context_used (method);
9208 method_context_used = 0;
9210 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9212 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9213 EMIT_NEW_ICONST (cfg, iargs [1], n);
9214 if (method_context_used) {
9215 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
9216 method, MONO_RGCTX_INFO_METHOD);
9217 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
9219 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
9220 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
9222 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9224 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9226 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9228 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
9229 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
9230 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
9231 (cmethod->klass == mono_defaults.monotype_class->parent) &&
9232 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
9233 MonoClass *tclass = mono_class_from_mono_type (handle);
9235 mono_class_init (tclass);
9237 ins = emit_get_rgctx_klass (cfg, context_used,
9238 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
9239 } else if (cfg->compile_aot) {
9240 if (method->wrapper_type) {
9241 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
9242 /* Special case for static synchronized wrappers */
9243 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
9245 /* FIXME: n is not a normal token */
9246 cfg->disable_aot = TRUE;
9247 EMIT_NEW_PCONST (cfg, ins, NULL);
9250 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
9253 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
9255 ins->type = STACK_OBJ;
9256 ins->klass = cmethod->klass;
9259 MonoInst *addr, *vtvar;
9261 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9264 if (handle_class == mono_defaults.typehandle_class) {
9265 ins = emit_get_rgctx_klass (cfg, context_used,
9266 mono_class_from_mono_type (handle),
9267 MONO_RGCTX_INFO_TYPE);
9268 } else if (handle_class == mono_defaults.methodhandle_class) {
9269 ins = emit_get_rgctx_method (cfg, context_used,
9270 handle, MONO_RGCTX_INFO_METHOD);
9271 } else if (handle_class == mono_defaults.fieldhandle_class) {
9272 ins = emit_get_rgctx_field (cfg, context_used,
9273 handle, MONO_RGCTX_INFO_CLASS_FIELD);
9275 g_assert_not_reached ();
9277 } else if (cfg->compile_aot) {
9278 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
9280 EMIT_NEW_PCONST (cfg, ins, handle);
9282 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9283 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9284 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9294 MONO_INST_NEW (cfg, ins, OP_THROW);
9296 ins->sreg1 = sp [0]->dreg;
9298 bblock->out_of_line = TRUE;
9299 MONO_ADD_INS (bblock, ins);
9300 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
9301 MONO_ADD_INS (bblock, ins);
9304 link_bblock (cfg, bblock, end_bblock);
9305 start_new_bblock = 1;
9307 case CEE_ENDFINALLY:
9308 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
9309 MONO_ADD_INS (bblock, ins);
9311 start_new_bblock = 1;
9314 * Control will leave the method so empty the stack, otherwise
9315 * the next basic block will start with a nonempty stack.
9317 while (sp != stack_start) {
9325 if (*ip == CEE_LEAVE) {
9327 target = ip + 5 + (gint32)read32(ip + 1);
9330 target = ip + 2 + (signed char)(ip [1]);
9333 /* empty the stack */
9334 while (sp != stack_start) {
9339 * If this leave statement is in a catch block, check for a
9340 * pending exception, and rethrow it if necessary.
9341 * We avoid doing this in runtime invoke wrappers, since those are called
9342 * by native code which excepts the wrapper to catch all exceptions.
9344 for (i = 0; i < header->num_clauses; ++i) {
9345 MonoExceptionClause *clause = &header->clauses [i];
9348 * Use <= in the final comparison to handle clauses with multiple
9349 * leave statements, like in bug #78024.
9350 * The ordering of the exception clauses guarantees that we find the
9353 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
9355 MonoBasicBlock *dont_throw;
9360 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
9363 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
9365 NEW_BBLOCK (cfg, dont_throw);
9368 * Currently, we allways rethrow the abort exception, despite the
9369 * fact that this is not correct. See thread6.cs for an example.
9370 * But propagating the abort exception is more important than
9371 * getting the sematics right.
9373 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
9374 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
9375 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
9377 MONO_START_BB (cfg, dont_throw);
9382 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
9384 MonoExceptionClause *clause;
9386 for (tmp = handlers; tmp; tmp = tmp->next) {
9388 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
9390 link_bblock (cfg, bblock, tblock);
9391 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
9392 ins->inst_target_bb = tblock;
9393 ins->inst_eh_block = clause;
9394 MONO_ADD_INS (bblock, ins);
9395 bblock->has_call_handler = 1;
9396 if (COMPILE_LLVM (cfg)) {
9397 MonoBasicBlock *target_bb;
9400 * Link the finally bblock with the target, since it will
9401 * conceptually branch there.
9402 * FIXME: Have to link the bblock containing the endfinally.
9404 GET_BBLOCK (cfg, target_bb, target);
9405 link_bblock (cfg, tblock, target_bb);
9408 g_list_free (handlers);
9411 MONO_INST_NEW (cfg, ins, OP_BR);
9412 MONO_ADD_INS (bblock, ins);
9413 GET_BBLOCK (cfg, tblock, target);
9414 link_bblock (cfg, bblock, tblock);
9415 ins->inst_target_bb = tblock;
9416 start_new_bblock = 1;
9418 if (*ip == CEE_LEAVE)
9427 * Mono specific opcodes
9429 case MONO_CUSTOM_PREFIX: {
9431 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
9435 case CEE_MONO_ICALL: {
9437 MonoJitICallInfo *info;
9439 token = read32 (ip + 2);
9440 func = mono_method_get_wrapper_data (method, token);
9441 info = mono_find_jit_icall_by_addr (func);
9444 CHECK_STACK (info->sig->param_count);
9445 sp -= info->sig->param_count;
9447 ins = mono_emit_jit_icall (cfg, info->func, sp);
9448 if (!MONO_TYPE_IS_VOID (info->sig->ret))
9452 inline_costs += 10 * num_calls++;
9456 case CEE_MONO_LDPTR: {
9459 CHECK_STACK_OVF (1);
9461 token = read32 (ip + 2);
9463 ptr = mono_method_get_wrapper_data (method, token);
9464 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
9465 MonoJitICallInfo *callinfo;
9466 const char *icall_name;
9468 icall_name = method->name + strlen ("__icall_wrapper_");
9469 g_assert (icall_name);
9470 callinfo = mono_find_jit_icall_by_name (icall_name);
9471 g_assert (callinfo);
9473 if (ptr == callinfo->func) {
9474 /* Will be transformed into an AOTCONST later */
9475 EMIT_NEW_PCONST (cfg, ins, ptr);
9481 /* FIXME: Generalize this */
9482 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
9483 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
9488 EMIT_NEW_PCONST (cfg, ins, ptr);
9491 inline_costs += 10 * num_calls++;
9492 /* Can't embed random pointers into AOT code */
9493 cfg->disable_aot = 1;
9496 case CEE_MONO_ICALL_ADDR: {
9497 MonoMethod *cmethod;
9500 CHECK_STACK_OVF (1);
9502 token = read32 (ip + 2);
9504 cmethod = mono_method_get_wrapper_data (method, token);
9506 if (cfg->compile_aot) {
9507 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
9509 ptr = mono_lookup_internal_call (cmethod);
9511 EMIT_NEW_PCONST (cfg, ins, ptr);
9517 case CEE_MONO_VTADDR: {
9518 MonoInst *src_var, *src;
9524 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9525 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
9530 case CEE_MONO_NEWOBJ: {
9531 MonoInst *iargs [2];
9533 CHECK_STACK_OVF (1);
9535 token = read32 (ip + 2);
9536 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9537 mono_class_init (klass);
9538 NEW_DOMAINCONST (cfg, iargs [0]);
9539 MONO_ADD_INS (cfg->cbb, iargs [0]);
9540 NEW_CLASSCONST (cfg, iargs [1], klass);
9541 MONO_ADD_INS (cfg->cbb, iargs [1]);
9542 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
9544 inline_costs += 10 * num_calls++;
9547 case CEE_MONO_OBJADDR:
9550 MONO_INST_NEW (cfg, ins, OP_MOVE);
9551 ins->dreg = alloc_preg (cfg);
9552 ins->sreg1 = sp [0]->dreg;
9553 ins->type = STACK_MP;
9554 MONO_ADD_INS (cfg->cbb, ins);
9558 case CEE_MONO_LDNATIVEOBJ:
9560 * Similar to LDOBJ, but instead load the unmanaged
9561 * representation of the vtype to the stack.
9566 token = read32 (ip + 2);
9567 klass = mono_method_get_wrapper_data (method, token);
9568 g_assert (klass->valuetype);
9569 mono_class_init (klass);
9572 MonoInst *src, *dest, *temp;
9575 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
9576 temp->backend.is_pinvoke = 1;
9577 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
9578 mini_emit_stobj (cfg, dest, src, klass, TRUE);
9580 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
9581 dest->type = STACK_VTYPE;
9582 dest->klass = klass;
9588 case CEE_MONO_RETOBJ: {
9590 * Same as RET, but return the native representation of a vtype
9593 g_assert (cfg->ret);
9594 g_assert (mono_method_signature (method)->pinvoke);
9599 token = read32 (ip + 2);
9600 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9602 if (!cfg->vret_addr) {
9603 g_assert (cfg->ret_var_is_local);
9605 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
9607 EMIT_NEW_RETLOADA (cfg, ins);
9609 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
9611 if (sp != stack_start)
9614 MONO_INST_NEW (cfg, ins, OP_BR);
9615 ins->inst_target_bb = end_bblock;
9616 MONO_ADD_INS (bblock, ins);
9617 link_bblock (cfg, bblock, end_bblock);
9618 start_new_bblock = 1;
9622 case CEE_MONO_CISINST:
9623 case CEE_MONO_CCASTCLASS: {
9628 token = read32 (ip + 2);
9629 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9630 if (ip [1] == CEE_MONO_CISINST)
9631 ins = handle_cisinst (cfg, klass, sp [0]);
9633 ins = handle_ccastclass (cfg, klass, sp [0]);
9639 case CEE_MONO_SAVE_LMF:
9640 case CEE_MONO_RESTORE_LMF:
9641 #ifdef MONO_ARCH_HAVE_LMF_OPS
9642 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9643 MONO_ADD_INS (bblock, ins);
9644 cfg->need_lmf_area = TRUE;
9648 case CEE_MONO_CLASSCONST:
9649 CHECK_STACK_OVF (1);
9651 token = read32 (ip + 2);
9652 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9655 inline_costs += 10 * num_calls++;
9657 case CEE_MONO_NOT_TAKEN:
9658 bblock->out_of_line = TRUE;
9662 CHECK_STACK_OVF (1);
9664 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9665 ins->dreg = alloc_preg (cfg);
9666 ins->inst_offset = (gint32)read32 (ip + 2);
9667 ins->type = STACK_PTR;
9668 MONO_ADD_INS (bblock, ins);
9672 case CEE_MONO_DYN_CALL: {
9675 /* It would be easier to call a trampoline, but that would put an
9676 * extra frame on the stack, confusing exception handling. So
9677 * implement it inline using an opcode for now.
9680 if (!cfg->dyn_call_var) {
9681 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9682 /* prevent it from being register allocated */
9683 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
9686 /* Has to use a call inst since it local regalloc expects it */
9687 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
9688 ins = (MonoInst*)call;
9690 ins->sreg1 = sp [0]->dreg;
9691 ins->sreg2 = sp [1]->dreg;
9692 MONO_ADD_INS (bblock, ins);
9694 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
9695 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
9699 inline_costs += 10 * num_calls++;
9704 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9714 /* somewhat similar to LDTOKEN */
9715 MonoInst *addr, *vtvar;
9716 CHECK_STACK_OVF (1);
9717 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9719 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9720 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9722 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9723 ins->type = STACK_VTYPE;
9724 ins->klass = mono_defaults.argumenthandle_class;
9737 * The following transforms:
9738 * CEE_CEQ into OP_CEQ
9739 * CEE_CGT into OP_CGT
9740 * CEE_CGT_UN into OP_CGT_UN
9741 * CEE_CLT into OP_CLT
9742 * CEE_CLT_UN into OP_CLT_UN
9744 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9746 MONO_INST_NEW (cfg, ins, cmp->opcode);
9748 cmp->sreg1 = sp [0]->dreg;
9749 cmp->sreg2 = sp [1]->dreg;
9750 type_from_op (cmp, sp [0], sp [1]);
9752 if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9753 cmp->opcode = OP_LCOMPARE;
9754 else if (sp [0]->type == STACK_R8)
9755 cmp->opcode = OP_FCOMPARE;
9757 cmp->opcode = OP_ICOMPARE;
9758 MONO_ADD_INS (bblock, cmp);
9759 ins->type = STACK_I4;
9760 ins->dreg = alloc_dreg (cfg, ins->type);
9761 type_from_op (ins, sp [0], sp [1]);
9763 if (cmp->opcode == OP_FCOMPARE) {
9765 * The backends expect the fceq opcodes to do the
9768 cmp->opcode = OP_NOP;
9769 ins->sreg1 = cmp->sreg1;
9770 ins->sreg2 = cmp->sreg2;
9772 MONO_ADD_INS (bblock, ins);
9779 MonoMethod *cil_method;
9780 gboolean needs_static_rgctx_invoke;
9782 CHECK_STACK_OVF (1);
9784 n = read32 (ip + 2);
9785 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9786 if (!cmethod || mono_loader_get_last_error ())
9788 mono_class_init (cmethod->klass);
9790 mono_save_token_info (cfg, image, n, cmethod);
9792 if (cfg->generic_sharing_context)
9793 context_used = mono_method_check_context_used (cmethod);
9795 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9797 cil_method = cmethod;
9798 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9799 METHOD_ACCESS_FAILURE;
9801 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9802 if (check_linkdemand (cfg, method, cmethod))
9804 CHECK_CFG_EXCEPTION;
9805 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9806 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9810 * Optimize the common case of ldftn+delegate creation
9812 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9813 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9814 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9815 MonoInst *target_ins;
9817 int invoke_context_used = 0;
9819 invoke = mono_get_delegate_invoke (ctor_method->klass);
9820 if (!invoke || !mono_method_signature (invoke))
9823 if (cfg->generic_sharing_context)
9824 invoke_context_used = mono_method_check_context_used (invoke);
9826 target_ins = sp [-1];
9828 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
9829 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
9830 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
9831 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
9832 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
9836 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
9837 /* FIXME: SGEN support */
9838 if (invoke_context_used == 0) {
9840 if (cfg->verbose_level > 3)
9841 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9843 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
9844 CHECK_CFG_EXCEPTION;
9853 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
9854 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
9858 inline_costs += 10 * num_calls++;
9861 case CEE_LDVIRTFTN: {
9866 n = read32 (ip + 2);
9867 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9868 if (!cmethod || mono_loader_get_last_error ())
9870 mono_class_init (cmethod->klass);
9872 if (cfg->generic_sharing_context)
9873 context_used = mono_method_check_context_used (cmethod);
9875 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9876 if (check_linkdemand (cfg, method, cmethod))
9878 CHECK_CFG_EXCEPTION;
9879 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9880 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9886 args [1] = emit_get_rgctx_method (cfg, context_used,
9887 cmethod, MONO_RGCTX_INFO_METHOD);
9890 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
9892 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
9895 inline_costs += 10 * num_calls++;
9899 CHECK_STACK_OVF (1);
9901 n = read16 (ip + 2);
9903 EMIT_NEW_ARGLOAD (cfg, ins, n);
9908 CHECK_STACK_OVF (1);
9910 n = read16 (ip + 2);
9912 NEW_ARGLOADA (cfg, ins, n);
9913 MONO_ADD_INS (cfg->cbb, ins);
9921 n = read16 (ip + 2);
9923 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9925 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9929 CHECK_STACK_OVF (1);
9931 n = read16 (ip + 2);
9933 EMIT_NEW_LOCLOAD (cfg, ins, n);
9938 unsigned char *tmp_ip;
9939 CHECK_STACK_OVF (1);
9941 n = read16 (ip + 2);
9944 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9950 EMIT_NEW_LOCLOADA (cfg, ins, n);
9959 n = read16 (ip + 2);
9961 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9963 emit_stloc_ir (cfg, sp, header, n);
9970 if (sp != stack_start)
9972 if (cfg->method != method)
9974 * Inlining this into a loop in a parent could lead to
9975 * stack overflows which is different behavior than the
9976 * non-inlined case, thus disable inlining in this case.
9978 goto inline_failure;
9980 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9981 ins->dreg = alloc_preg (cfg);
9982 ins->sreg1 = sp [0]->dreg;
9983 ins->type = STACK_PTR;
9984 MONO_ADD_INS (cfg->cbb, ins);
9986 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9988 ins->flags |= MONO_INST_INIT;
9993 case CEE_ENDFILTER: {
9994 MonoExceptionClause *clause, *nearest;
9995 int cc, nearest_num;
9999 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
10001 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
10002 ins->sreg1 = (*sp)->dreg;
10003 MONO_ADD_INS (bblock, ins);
10004 start_new_bblock = 1;
10009 for (cc = 0; cc < header->num_clauses; ++cc) {
10010 clause = &header->clauses [cc];
10011 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
10012 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
10013 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
10018 g_assert (nearest);
10019 if ((ip - header->code) != nearest->handler_offset)
10024 case CEE_UNALIGNED_:
10025 ins_flag |= MONO_INST_UNALIGNED;
10026 /* FIXME: record alignment? we can assume 1 for now */
10030 case CEE_VOLATILE_:
10031 ins_flag |= MONO_INST_VOLATILE;
10035 ins_flag |= MONO_INST_TAILCALL;
10036 cfg->flags |= MONO_CFG_HAS_TAIL;
10037 /* Can't inline tail calls at this time */
10038 inline_costs += 100000;
10045 token = read32 (ip + 2);
10046 klass = mini_get_class (method, token, generic_context);
10047 CHECK_TYPELOAD (klass);
10048 if (generic_class_is_reference_type (cfg, klass))
10049 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
10051 mini_emit_initobj (cfg, *sp, NULL, klass);
10055 case CEE_CONSTRAINED_:
10057 token = read32 (ip + 2);
10058 if (method->wrapper_type != MONO_WRAPPER_NONE)
10059 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
10061 constrained_call = mono_class_get_full (image, token, generic_context);
10062 CHECK_TYPELOAD (constrained_call);
10066 case CEE_INITBLK: {
10067 MonoInst *iargs [3];
10071 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
10072 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
10073 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
10074 /* emit_memset only works when val == 0 */
10075 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
10077 iargs [0] = sp [0];
10078 iargs [1] = sp [1];
10079 iargs [2] = sp [2];
10080 if (ip [1] == CEE_CPBLK) {
10081 MonoMethod *memcpy_method = get_memcpy_method ();
10082 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
10084 MonoMethod *memset_method = get_memset_method ();
10085 mono_emit_method_call (cfg, memset_method, iargs, NULL);
10095 ins_flag |= MONO_INST_NOTYPECHECK;
10097 ins_flag |= MONO_INST_NORANGECHECK;
10098 /* we ignore the no-nullcheck for now since we
10099 * really do it explicitly only when doing callvirt->call
10103 case CEE_RETHROW: {
10105 int handler_offset = -1;
10107 for (i = 0; i < header->num_clauses; ++i) {
10108 MonoExceptionClause *clause = &header->clauses [i];
10109 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
10110 handler_offset = clause->handler_offset;
10115 bblock->flags |= BB_EXCEPTION_UNSAFE;
10117 g_assert (handler_offset != -1);
10119 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
10120 MONO_INST_NEW (cfg, ins, OP_RETHROW);
10121 ins->sreg1 = load->dreg;
10122 MONO_ADD_INS (bblock, ins);
10124 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
10125 MONO_ADD_INS (bblock, ins);
10128 link_bblock (cfg, bblock, end_bblock);
10129 start_new_bblock = 1;
10137 CHECK_STACK_OVF (1);
10139 token = read32 (ip + 2);
10140 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic) {
10141 MonoType *type = mono_type_create_from_typespec (image, token);
10142 token = mono_type_size (type, &ialign);
10144 MonoClass *klass = mono_class_get_full (image, token, generic_context);
10145 CHECK_TYPELOAD (klass);
10146 mono_class_init (klass);
10147 token = mono_class_value_size (klass, &align);
10149 EMIT_NEW_ICONST (cfg, ins, token);
10154 case CEE_REFANYTYPE: {
10155 MonoInst *src_var, *src;
10161 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10163 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
10164 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
10165 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
10170 case CEE_READONLY_:
10183 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
10193 g_warning ("opcode 0x%02x not handled", *ip);
10197 if (start_new_bblock != 1)
10200 bblock->cil_length = ip - bblock->cil_code;
10201 bblock->next_bb = end_bblock;
10203 if (cfg->method == method && cfg->domainvar) {
10205 MonoInst *get_domain;
10207 cfg->cbb = init_localsbb;
10209 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
10210 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
10213 get_domain->dreg = alloc_preg (cfg);
10214 MONO_ADD_INS (cfg->cbb, get_domain);
10216 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
10217 MONO_ADD_INS (cfg->cbb, store);
10220 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
10221 if (cfg->compile_aot)
10222 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
10223 mono_get_got_var (cfg);
10226 if (cfg->method == method && cfg->got_var)
10227 mono_emit_load_got_addr (cfg);
10232 cfg->cbb = init_localsbb;
10234 for (i = 0; i < header->num_locals; ++i) {
10235 MonoType *ptype = header->locals [i];
10236 int t = ptype->type;
10237 dreg = cfg->locals [i]->dreg;
10239 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
10240 t = mono_class_enum_basetype (ptype->data.klass)->type;
10241 if (ptype->byref) {
10242 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10243 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
10244 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
10245 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
10246 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
10247 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
10248 MONO_INST_NEW (cfg, ins, OP_R8CONST);
10249 ins->type = STACK_R8;
10250 ins->inst_p0 = (void*)&r8_0;
10251 ins->dreg = alloc_dreg (cfg, STACK_R8);
10252 MONO_ADD_INS (init_localsbb, ins);
10253 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
10254 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
10255 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
10256 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
10258 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10263 if (cfg->init_ref_vars && cfg->method == method) {
10264 /* Emit initialization for ref vars */
10265 // FIXME: Avoid duplication initialization for IL locals.
10266 for (i = 0; i < cfg->num_varinfo; ++i) {
10267 MonoInst *ins = cfg->varinfo [i];
10269 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
10270 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
10274 /* Add a sequence point for method entry/exit events */
10276 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
10277 MONO_ADD_INS (init_localsbb, ins);
10278 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
10279 MONO_ADD_INS (cfg->bb_exit, ins);
10284 if (cfg->method == method) {
10285 MonoBasicBlock *bb;
10286 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10287 bb->region = mono_find_block_region (cfg, bb->real_offset);
10289 mono_create_spvar_for_region (cfg, bb->region);
10290 if (cfg->verbose_level > 2)
10291 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
10295 g_slist_free (class_inits);
10296 dont_inline = g_list_remove (dont_inline, method);
10298 if (inline_costs < 0) {
10301 /* Method is too large */
10302 mname = mono_method_full_name (method, TRUE);
10303 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
10304 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
10306 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10307 mono_basic_block_free (original_bb);
10311 if ((cfg->verbose_level > 2) && (cfg->method == method))
10312 mono_print_code (cfg, "AFTER METHOD-TO-IR");
10314 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10315 mono_basic_block_free (original_bb);
10316 return inline_costs;
10319 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
10326 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
10330 set_exception_type_from_invalid_il (cfg, method, ip);
10334 g_slist_free (class_inits);
10335 mono_basic_block_free (original_bb);
10336 dont_inline = g_list_remove (dont_inline, method);
10337 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10342 store_membase_reg_to_store_membase_imm (int opcode)
10345 case OP_STORE_MEMBASE_REG:
10346 return OP_STORE_MEMBASE_IMM;
10347 case OP_STOREI1_MEMBASE_REG:
10348 return OP_STOREI1_MEMBASE_IMM;
10349 case OP_STOREI2_MEMBASE_REG:
10350 return OP_STOREI2_MEMBASE_IMM;
10351 case OP_STOREI4_MEMBASE_REG:
10352 return OP_STOREI4_MEMBASE_IMM;
10353 case OP_STOREI8_MEMBASE_REG:
10354 return OP_STOREI8_MEMBASE_IMM;
10356 g_assert_not_reached ();
10362 #endif /* DISABLE_JIT */
10365 mono_op_to_op_imm (int opcode)
10369 return OP_IADD_IMM;
10371 return OP_ISUB_IMM;
10373 return OP_IDIV_IMM;
10375 return OP_IDIV_UN_IMM;
10377 return OP_IREM_IMM;
10379 return OP_IREM_UN_IMM;
10381 return OP_IMUL_IMM;
10383 return OP_IAND_IMM;
10387 return OP_IXOR_IMM;
10389 return OP_ISHL_IMM;
10391 return OP_ISHR_IMM;
10393 return OP_ISHR_UN_IMM;
10396 return OP_LADD_IMM;
10398 return OP_LSUB_IMM;
10400 return OP_LAND_IMM;
10404 return OP_LXOR_IMM;
10406 return OP_LSHL_IMM;
10408 return OP_LSHR_IMM;
10410 return OP_LSHR_UN_IMM;
10413 return OP_COMPARE_IMM;
10415 return OP_ICOMPARE_IMM;
10417 return OP_LCOMPARE_IMM;
10419 case OP_STORE_MEMBASE_REG:
10420 return OP_STORE_MEMBASE_IMM;
10421 case OP_STOREI1_MEMBASE_REG:
10422 return OP_STOREI1_MEMBASE_IMM;
10423 case OP_STOREI2_MEMBASE_REG:
10424 return OP_STOREI2_MEMBASE_IMM;
10425 case OP_STOREI4_MEMBASE_REG:
10426 return OP_STOREI4_MEMBASE_IMM;
10428 #if defined(TARGET_X86) || defined (TARGET_AMD64)
10430 return OP_X86_PUSH_IMM;
10431 case OP_X86_COMPARE_MEMBASE_REG:
10432 return OP_X86_COMPARE_MEMBASE_IMM;
10434 #if defined(TARGET_AMD64)
10435 case OP_AMD64_ICOMPARE_MEMBASE_REG:
10436 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10438 case OP_VOIDCALL_REG:
10439 return OP_VOIDCALL;
10447 return OP_LOCALLOC_IMM;
10454 ldind_to_load_membase (int opcode)
10458 return OP_LOADI1_MEMBASE;
10460 return OP_LOADU1_MEMBASE;
10462 return OP_LOADI2_MEMBASE;
10464 return OP_LOADU2_MEMBASE;
10466 return OP_LOADI4_MEMBASE;
10468 return OP_LOADU4_MEMBASE;
10470 return OP_LOAD_MEMBASE;
10471 case CEE_LDIND_REF:
10472 return OP_LOAD_MEMBASE;
10474 return OP_LOADI8_MEMBASE;
10476 return OP_LOADR4_MEMBASE;
10478 return OP_LOADR8_MEMBASE;
10480 g_assert_not_reached ();
10487 stind_to_store_membase (int opcode)
10491 return OP_STOREI1_MEMBASE_REG;
10493 return OP_STOREI2_MEMBASE_REG;
10495 return OP_STOREI4_MEMBASE_REG;
10497 case CEE_STIND_REF:
10498 return OP_STORE_MEMBASE_REG;
10500 return OP_STOREI8_MEMBASE_REG;
10502 return OP_STORER4_MEMBASE_REG;
10504 return OP_STORER8_MEMBASE_REG;
10506 g_assert_not_reached ();
10513 mono_load_membase_to_load_mem (int opcode)
10515 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
10516 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10518 case OP_LOAD_MEMBASE:
10519 return OP_LOAD_MEM;
10520 case OP_LOADU1_MEMBASE:
10521 return OP_LOADU1_MEM;
10522 case OP_LOADU2_MEMBASE:
10523 return OP_LOADU2_MEM;
10524 case OP_LOADI4_MEMBASE:
10525 return OP_LOADI4_MEM;
10526 case OP_LOADU4_MEMBASE:
10527 return OP_LOADU4_MEM;
10528 #if SIZEOF_REGISTER == 8
10529 case OP_LOADI8_MEMBASE:
10530 return OP_LOADI8_MEM;
10539 op_to_op_dest_membase (int store_opcode, int opcode)
10541 #if defined(TARGET_X86)
10542 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
10547 return OP_X86_ADD_MEMBASE_REG;
10549 return OP_X86_SUB_MEMBASE_REG;
10551 return OP_X86_AND_MEMBASE_REG;
10553 return OP_X86_OR_MEMBASE_REG;
10555 return OP_X86_XOR_MEMBASE_REG;
10558 return OP_X86_ADD_MEMBASE_IMM;
10561 return OP_X86_SUB_MEMBASE_IMM;
10564 return OP_X86_AND_MEMBASE_IMM;
10567 return OP_X86_OR_MEMBASE_IMM;
10570 return OP_X86_XOR_MEMBASE_IMM;
10576 #if defined(TARGET_AMD64)
10577 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
10582 return OP_X86_ADD_MEMBASE_REG;
10584 return OP_X86_SUB_MEMBASE_REG;
10586 return OP_X86_AND_MEMBASE_REG;
10588 return OP_X86_OR_MEMBASE_REG;
10590 return OP_X86_XOR_MEMBASE_REG;
10592 return OP_X86_ADD_MEMBASE_IMM;
10594 return OP_X86_SUB_MEMBASE_IMM;
10596 return OP_X86_AND_MEMBASE_IMM;
10598 return OP_X86_OR_MEMBASE_IMM;
10600 return OP_X86_XOR_MEMBASE_IMM;
10602 return OP_AMD64_ADD_MEMBASE_REG;
10604 return OP_AMD64_SUB_MEMBASE_REG;
10606 return OP_AMD64_AND_MEMBASE_REG;
10608 return OP_AMD64_OR_MEMBASE_REG;
10610 return OP_AMD64_XOR_MEMBASE_REG;
10613 return OP_AMD64_ADD_MEMBASE_IMM;
10616 return OP_AMD64_SUB_MEMBASE_IMM;
10619 return OP_AMD64_AND_MEMBASE_IMM;
10622 return OP_AMD64_OR_MEMBASE_IMM;
10625 return OP_AMD64_XOR_MEMBASE_IMM;
10635 op_to_op_store_membase (int store_opcode, int opcode)
10637 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10640 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10641 return OP_X86_SETEQ_MEMBASE;
10643 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10644 return OP_X86_SETNE_MEMBASE;
10652 op_to_op_src1_membase (int load_opcode, int opcode)
10655 /* FIXME: This has sign extension issues */
10657 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10658 return OP_X86_COMPARE_MEMBASE8_IMM;
10661 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10666 return OP_X86_PUSH_MEMBASE;
10667 case OP_COMPARE_IMM:
10668 case OP_ICOMPARE_IMM:
10669 return OP_X86_COMPARE_MEMBASE_IMM;
10672 return OP_X86_COMPARE_MEMBASE_REG;
10676 #ifdef TARGET_AMD64
10677 /* FIXME: This has sign extension issues */
10679 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10680 return OP_X86_COMPARE_MEMBASE8_IMM;
10685 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10686 return OP_X86_PUSH_MEMBASE;
10688 /* FIXME: This only works for 32 bit immediates
10689 case OP_COMPARE_IMM:
10690 case OP_LCOMPARE_IMM:
10691 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10692 return OP_AMD64_COMPARE_MEMBASE_IMM;
10694 case OP_ICOMPARE_IMM:
10695 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10696 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10700 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10701 return OP_AMD64_COMPARE_MEMBASE_REG;
10704 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10705 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10714 op_to_op_src2_membase (int load_opcode, int opcode)
10717 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10723 return OP_X86_COMPARE_REG_MEMBASE;
10725 return OP_X86_ADD_REG_MEMBASE;
10727 return OP_X86_SUB_REG_MEMBASE;
10729 return OP_X86_AND_REG_MEMBASE;
10731 return OP_X86_OR_REG_MEMBASE;
10733 return OP_X86_XOR_REG_MEMBASE;
10737 #ifdef TARGET_AMD64
10738 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
10741 return OP_AMD64_ICOMPARE_REG_MEMBASE;
10743 return OP_X86_ADD_REG_MEMBASE;
10745 return OP_X86_SUB_REG_MEMBASE;
10747 return OP_X86_AND_REG_MEMBASE;
10749 return OP_X86_OR_REG_MEMBASE;
10751 return OP_X86_XOR_REG_MEMBASE;
10753 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
10757 return OP_AMD64_COMPARE_REG_MEMBASE;
10759 return OP_AMD64_ADD_REG_MEMBASE;
10761 return OP_AMD64_SUB_REG_MEMBASE;
10763 return OP_AMD64_AND_REG_MEMBASE;
10765 return OP_AMD64_OR_REG_MEMBASE;
10767 return OP_AMD64_XOR_REG_MEMBASE;
10776 mono_op_to_op_imm_noemul (int opcode)
10779 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10785 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10793 return mono_op_to_op_imm (opcode);
10797 #ifndef DISABLE_JIT
10800 * mono_handle_global_vregs:
10802 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10806 mono_handle_global_vregs (MonoCompile *cfg)
10808 gint32 *vreg_to_bb;
10809 MonoBasicBlock *bb;
10812 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
10814 #ifdef MONO_ARCH_SIMD_INTRINSICS
10815 if (cfg->uses_simd_intrinsics)
10816 mono_simd_simplify_indirection (cfg);
10819 /* Find local vregs used in more than one bb */
10820 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10821 MonoInst *ins = bb->code;
10822 int block_num = bb->block_num;
10824 if (cfg->verbose_level > 2)
10825 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
10828 for (; ins; ins = ins->next) {
10829 const char *spec = INS_INFO (ins->opcode);
10830 int regtype = 0, regindex;
10833 if (G_UNLIKELY (cfg->verbose_level > 2))
10834 mono_print_ins (ins);
10836 g_assert (ins->opcode >= MONO_CEE_LAST);
10838 for (regindex = 0; regindex < 4; regindex ++) {
10841 if (regindex == 0) {
10842 regtype = spec [MONO_INST_DEST];
10843 if (regtype == ' ')
10846 } else if (regindex == 1) {
10847 regtype = spec [MONO_INST_SRC1];
10848 if (regtype == ' ')
10851 } else if (regindex == 2) {
10852 regtype = spec [MONO_INST_SRC2];
10853 if (regtype == ' ')
10856 } else if (regindex == 3) {
10857 regtype = spec [MONO_INST_SRC3];
10858 if (regtype == ' ')
10863 #if SIZEOF_REGISTER == 4
10864 /* In the LLVM case, the long opcodes are not decomposed */
10865 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
10867 * Since some instructions reference the original long vreg,
10868 * and some reference the two component vregs, it is quite hard
10869 * to determine when it needs to be global. So be conservative.
10871 if (!get_vreg_to_inst (cfg, vreg)) {
10872 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10874 if (cfg->verbose_level > 2)
10875 printf ("LONG VREG R%d made global.\n", vreg);
10879 * Make the component vregs volatile since the optimizations can
10880 * get confused otherwise.
10882 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
10883 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
10887 g_assert (vreg != -1);
10889 prev_bb = vreg_to_bb [vreg];
10890 if (prev_bb == 0) {
10891 /* 0 is a valid block num */
10892 vreg_to_bb [vreg] = block_num + 1;
10893 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
10894 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
10897 if (!get_vreg_to_inst (cfg, vreg)) {
10898 if (G_UNLIKELY (cfg->verbose_level > 2))
10899 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
10903 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
10906 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10909 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
10912 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
10915 g_assert_not_reached ();
10919 /* Flag as having been used in more than one bb */
10920 vreg_to_bb [vreg] = -1;
10926 /* If a variable is used in only one bblock, convert it into a local vreg */
10927 for (i = 0; i < cfg->num_varinfo; i++) {
10928 MonoInst *var = cfg->varinfo [i];
10929 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
10931 switch (var->type) {
10937 #if SIZEOF_REGISTER == 8
10940 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
10941 /* Enabling this screws up the fp stack on x86 */
10944 /* Arguments are implicitly global */
10945 /* Putting R4 vars into registers doesn't work currently */
10946 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
10948 * Make that the variable's liveness interval doesn't contain a call, since
10949 * that would cause the lvreg to be spilled, making the whole optimization
10952 /* This is too slow for JIT compilation */
10954 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
10956 int def_index, call_index, ins_index;
10957 gboolean spilled = FALSE;
10962 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
10963 const char *spec = INS_INFO (ins->opcode);
10965 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
10966 def_index = ins_index;
10968 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
10969 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
10970 if (call_index > def_index) {
10976 if (MONO_IS_CALL (ins))
10977 call_index = ins_index;
10987 if (G_UNLIKELY (cfg->verbose_level > 2))
10988 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10989 var->flags |= MONO_INST_IS_DEAD;
10990 cfg->vreg_to_inst [var->dreg] = NULL;
10997 * Compress the varinfo and vars tables so the liveness computation is faster and
10998 * takes up less space.
11001 for (i = 0; i < cfg->num_varinfo; ++i) {
11002 MonoInst *var = cfg->varinfo [i];
11003 if (pos < i && cfg->locals_start == i)
11004 cfg->locals_start = pos;
11005 if (!(var->flags & MONO_INST_IS_DEAD)) {
11007 cfg->varinfo [pos] = cfg->varinfo [i];
11008 cfg->varinfo [pos]->inst_c0 = pos;
11009 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
11010 cfg->vars [pos].idx = pos;
11011 #if SIZEOF_REGISTER == 4
11012 if (cfg->varinfo [pos]->type == STACK_I8) {
11013 /* Modify the two component vars too */
11016 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
11017 var1->inst_c0 = pos;
11018 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
11019 var1->inst_c0 = pos;
11026 cfg->num_varinfo = pos;
11027 if (cfg->locals_start > cfg->num_varinfo)
11028 cfg->locals_start = cfg->num_varinfo;
11032 * mono_spill_global_vars:
11034 * Generate spill code for variables which are not allocated to registers,
11035 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
11036 * code is generated which could be optimized by the local optimization passes.
11039 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
11041 MonoBasicBlock *bb;
11043 int orig_next_vreg;
11044 guint32 *vreg_to_lvreg;
11046 guint32 i, lvregs_len;
11047 gboolean dest_has_lvreg = FALSE;
11048 guint32 stacktypes [128];
11049 MonoInst **live_range_start, **live_range_end;
11050 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
11052 *need_local_opts = FALSE;
11054 memset (spec2, 0, sizeof (spec2));
11056 /* FIXME: Move this function to mini.c */
11057 stacktypes ['i'] = STACK_PTR;
11058 stacktypes ['l'] = STACK_I8;
11059 stacktypes ['f'] = STACK_R8;
11060 #ifdef MONO_ARCH_SIMD_INTRINSICS
11061 stacktypes ['x'] = STACK_VTYPE;
11064 #if SIZEOF_REGISTER == 4
11065 /* Create MonoInsts for longs */
11066 for (i = 0; i < cfg->num_varinfo; i++) {
11067 MonoInst *ins = cfg->varinfo [i];
11069 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
11070 switch (ins->type) {
11075 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
11078 g_assert (ins->opcode == OP_REGOFFSET);
11080 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
11082 tree->opcode = OP_REGOFFSET;
11083 tree->inst_basereg = ins->inst_basereg;
11084 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
11086 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
11088 tree->opcode = OP_REGOFFSET;
11089 tree->inst_basereg = ins->inst_basereg;
11090 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
11100 /* FIXME: widening and truncation */
11103 * As an optimization, when a variable allocated to the stack is first loaded into
11104 * an lvreg, we will remember the lvreg and use it the next time instead of loading
11105 * the variable again.
11107 orig_next_vreg = cfg->next_vreg;
11108 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
11109 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
11113 * These arrays contain the first and last instructions accessing a given
11115 * Since we emit bblocks in the same order we process them here, and we
11116 * don't split live ranges, these will precisely describe the live range of
11117 * the variable, i.e. the instruction range where a valid value can be found
11118 * in the variables location.
11119 * The live range is computed using the liveness info computed by the liveness pass.
11120 * We can't use vmv->range, since that is an abstract live range, and we need
11121 * one which is instruction precise.
11122 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
11124 /* FIXME: Only do this if debugging info is requested */
11125 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
11126 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
11127 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
11128 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
11130 /* Add spill loads/stores */
11131 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
11134 if (cfg->verbose_level > 2)
11135 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
11137 /* Clear vreg_to_lvreg array */
11138 for (i = 0; i < lvregs_len; i++)
11139 vreg_to_lvreg [lvregs [i]] = 0;
11143 MONO_BB_FOR_EACH_INS (bb, ins) {
11144 const char *spec = INS_INFO (ins->opcode);
11145 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
11146 gboolean store, no_lvreg;
11147 int sregs [MONO_MAX_SRC_REGS];
11149 if (G_UNLIKELY (cfg->verbose_level > 2))
11150 mono_print_ins (ins);
11152 if (ins->opcode == OP_NOP)
11156 * We handle LDADDR here as well, since it can only be decomposed
11157 * when variable addresses are known.
11159 if (ins->opcode == OP_LDADDR) {
11160 MonoInst *var = ins->inst_p0;
11162 if (var->opcode == OP_VTARG_ADDR) {
11163 /* Happens on SPARC/S390 where vtypes are passed by reference */
11164 MonoInst *vtaddr = var->inst_left;
11165 if (vtaddr->opcode == OP_REGVAR) {
11166 ins->opcode = OP_MOVE;
11167 ins->sreg1 = vtaddr->dreg;
11169 else if (var->inst_left->opcode == OP_REGOFFSET) {
11170 ins->opcode = OP_LOAD_MEMBASE;
11171 ins->inst_basereg = vtaddr->inst_basereg;
11172 ins->inst_offset = vtaddr->inst_offset;
11176 g_assert (var->opcode == OP_REGOFFSET);
11178 ins->opcode = OP_ADD_IMM;
11179 ins->sreg1 = var->inst_basereg;
11180 ins->inst_imm = var->inst_offset;
11183 *need_local_opts = TRUE;
11184 spec = INS_INFO (ins->opcode);
11187 if (ins->opcode < MONO_CEE_LAST) {
11188 mono_print_ins (ins);
11189 g_assert_not_reached ();
11193 * Store opcodes have destbasereg in the dreg, but in reality, it is an
11197 if (MONO_IS_STORE_MEMBASE (ins)) {
11198 tmp_reg = ins->dreg;
11199 ins->dreg = ins->sreg2;
11200 ins->sreg2 = tmp_reg;
11203 spec2 [MONO_INST_DEST] = ' ';
11204 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11205 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11206 spec2 [MONO_INST_SRC3] = ' ';
11208 } else if (MONO_IS_STORE_MEMINDEX (ins))
11209 g_assert_not_reached ();
11214 if (G_UNLIKELY (cfg->verbose_level > 2)) {
11215 printf ("\t %.3s %d", spec, ins->dreg);
11216 num_sregs = mono_inst_get_src_registers (ins, sregs);
11217 for (srcindex = 0; srcindex < 3; ++srcindex)
11218 printf (" %d", sregs [srcindex]);
11225 regtype = spec [MONO_INST_DEST];
11226 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
11229 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
11230 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
11231 MonoInst *store_ins;
11233 MonoInst *def_ins = ins;
11234 int dreg = ins->dreg; /* The original vreg */
11236 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
11238 if (var->opcode == OP_REGVAR) {
11239 ins->dreg = var->dreg;
11240 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
11242 * Instead of emitting a load+store, use a _membase opcode.
11244 g_assert (var->opcode == OP_REGOFFSET);
11245 if (ins->opcode == OP_MOVE) {
11249 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
11250 ins->inst_basereg = var->inst_basereg;
11251 ins->inst_offset = var->inst_offset;
11254 spec = INS_INFO (ins->opcode);
11258 g_assert (var->opcode == OP_REGOFFSET);
11260 prev_dreg = ins->dreg;
11262 /* Invalidate any previous lvreg for this vreg */
11263 vreg_to_lvreg [ins->dreg] = 0;
11267 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
11269 store_opcode = OP_STOREI8_MEMBASE_REG;
11272 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
11274 if (regtype == 'l') {
11275 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
11276 mono_bblock_insert_after_ins (bb, ins, store_ins);
11277 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
11278 mono_bblock_insert_after_ins (bb, ins, store_ins);
11279 def_ins = store_ins;
11282 g_assert (store_opcode != OP_STOREV_MEMBASE);
11284 /* Try to fuse the store into the instruction itself */
11285 /* FIXME: Add more instructions */
11286 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
11287 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
11288 ins->inst_imm = ins->inst_c0;
11289 ins->inst_destbasereg = var->inst_basereg;
11290 ins->inst_offset = var->inst_offset;
11291 spec = INS_INFO (ins->opcode);
11292 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
11293 ins->opcode = store_opcode;
11294 ins->inst_destbasereg = var->inst_basereg;
11295 ins->inst_offset = var->inst_offset;
11299 tmp_reg = ins->dreg;
11300 ins->dreg = ins->sreg2;
11301 ins->sreg2 = tmp_reg;
11304 spec2 [MONO_INST_DEST] = ' ';
11305 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11306 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11307 spec2 [MONO_INST_SRC3] = ' ';
11309 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
11310 // FIXME: The backends expect the base reg to be in inst_basereg
11311 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
11313 ins->inst_basereg = var->inst_basereg;
11314 ins->inst_offset = var->inst_offset;
11315 spec = INS_INFO (ins->opcode);
11317 /* printf ("INS: "); mono_print_ins (ins); */
11318 /* Create a store instruction */
11319 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
11321 /* Insert it after the instruction */
11322 mono_bblock_insert_after_ins (bb, ins, store_ins);
11324 def_ins = store_ins;
11327 * We can't assign ins->dreg to var->dreg here, since the
11328 * sregs could use it. So set a flag, and do it after
11331 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
11332 dest_has_lvreg = TRUE;
11337 if (def_ins && !live_range_start [dreg]) {
11338 live_range_start [dreg] = def_ins;
11339 live_range_start_bb [dreg] = bb;
11346 num_sregs = mono_inst_get_src_registers (ins, sregs);
11347 for (srcindex = 0; srcindex < 3; ++srcindex) {
11348 regtype = spec [MONO_INST_SRC1 + srcindex];
11349 sreg = sregs [srcindex];
11351 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
11352 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
11353 MonoInst *var = get_vreg_to_inst (cfg, sreg);
11354 MonoInst *use_ins = ins;
11355 MonoInst *load_ins;
11356 guint32 load_opcode;
11358 if (var->opcode == OP_REGVAR) {
11359 sregs [srcindex] = var->dreg;
11360 //mono_inst_set_src_registers (ins, sregs);
11361 live_range_end [sreg] = use_ins;
11362 live_range_end_bb [sreg] = bb;
11366 g_assert (var->opcode == OP_REGOFFSET);
11368 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
11370 g_assert (load_opcode != OP_LOADV_MEMBASE);
11372 if (vreg_to_lvreg [sreg]) {
11373 g_assert (vreg_to_lvreg [sreg] != -1);
11375 /* The variable is already loaded to an lvreg */
11376 if (G_UNLIKELY (cfg->verbose_level > 2))
11377 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
11378 sregs [srcindex] = vreg_to_lvreg [sreg];
11379 //mono_inst_set_src_registers (ins, sregs);
11383 /* Try to fuse the load into the instruction */
11384 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
11385 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
11386 sregs [0] = var->inst_basereg;
11387 //mono_inst_set_src_registers (ins, sregs);
11388 ins->inst_offset = var->inst_offset;
11389 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
11390 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
11391 sregs [1] = var->inst_basereg;
11392 //mono_inst_set_src_registers (ins, sregs);
11393 ins->inst_offset = var->inst_offset;
11395 if (MONO_IS_REAL_MOVE (ins)) {
11396 ins->opcode = OP_NOP;
11399 //printf ("%d ", srcindex); mono_print_ins (ins);
11401 sreg = alloc_dreg (cfg, stacktypes [regtype]);
11403 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
11404 if (var->dreg == prev_dreg) {
11406 * sreg refers to the value loaded by the load
11407 * emitted below, but we need to use ins->dreg
11408 * since it refers to the store emitted earlier.
11412 g_assert (sreg != -1);
11413 vreg_to_lvreg [var->dreg] = sreg;
11414 g_assert (lvregs_len < 1024);
11415 lvregs [lvregs_len ++] = var->dreg;
11419 sregs [srcindex] = sreg;
11420 //mono_inst_set_src_registers (ins, sregs);
11422 if (regtype == 'l') {
11423 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
11424 mono_bblock_insert_before_ins (bb, ins, load_ins);
11425 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
11426 mono_bblock_insert_before_ins (bb, ins, load_ins);
11427 use_ins = load_ins;
11430 #if SIZEOF_REGISTER == 4
11431 g_assert (load_opcode != OP_LOADI8_MEMBASE);
11433 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
11434 mono_bblock_insert_before_ins (bb, ins, load_ins);
11435 use_ins = load_ins;
11439 if (var->dreg < orig_next_vreg) {
11440 live_range_end [var->dreg] = use_ins;
11441 live_range_end_bb [var->dreg] = bb;
11445 mono_inst_set_src_registers (ins, sregs);
11447 if (dest_has_lvreg) {
11448 g_assert (ins->dreg != -1);
11449 vreg_to_lvreg [prev_dreg] = ins->dreg;
11450 g_assert (lvregs_len < 1024);
11451 lvregs [lvregs_len ++] = prev_dreg;
11452 dest_has_lvreg = FALSE;
11456 tmp_reg = ins->dreg;
11457 ins->dreg = ins->sreg2;
11458 ins->sreg2 = tmp_reg;
11461 if (MONO_IS_CALL (ins)) {
11462 /* Clear vreg_to_lvreg array */
11463 for (i = 0; i < lvregs_len; i++)
11464 vreg_to_lvreg [lvregs [i]] = 0;
11466 } else if (ins->opcode == OP_NOP) {
11468 MONO_INST_NULLIFY_SREGS (ins);
11471 if (cfg->verbose_level > 2)
11472 mono_print_ins_index (1, ins);
11475 /* Extend the live range based on the liveness info */
11476 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
11477 for (i = 0; i < cfg->num_varinfo; i ++) {
11478 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
11480 if (vreg_is_volatile (cfg, vi->vreg))
11481 /* The liveness info is incomplete */
11484 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
11485 /* Live from at least the first ins of this bb */
11486 live_range_start [vi->vreg] = bb->code;
11487 live_range_start_bb [vi->vreg] = bb;
11490 if (mono_bitset_test_fast (bb->live_out_set, i)) {
11491 /* Live at least until the last ins of this bb */
11492 live_range_end [vi->vreg] = bb->last_ins;
11493 live_range_end_bb [vi->vreg] = bb;
11499 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
11501 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
11502 * by storing the current native offset into MonoMethodVar->live_range_start/end.
11504 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
11505 for (i = 0; i < cfg->num_varinfo; ++i) {
11506 int vreg = MONO_VARINFO (cfg, i)->vreg;
11509 if (live_range_start [vreg]) {
11510 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
11512 ins->inst_c1 = vreg;
11513 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
11515 if (live_range_end [vreg]) {
11516 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
11518 ins->inst_c1 = vreg;
11519 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
11520 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
11522 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
11528 g_free (live_range_start);
11529 g_free (live_range_end);
11530 g_free (live_range_start_bb);
11531 g_free (live_range_end_bb);
11535 mono_create_helper_signatures (void)
11537 helper_sig_domain_get = mono_create_icall_signature ("ptr");
11538 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
11539 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
11540 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
11541 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
11542 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
11543 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
11548 * - use 'iadd' instead of 'int_add'
11549 * - handling ovf opcodes: decompose in method_to_ir.
11550 * - unify iregs/fregs
11551 * -> partly done, the missing parts are:
11552 * - a more complete unification would involve unifying the hregs as well, so
11553 * code wouldn't need if (fp) all over the place. but that would mean the hregs
11554 * would no longer map to the machine hregs, so the code generators would need to
11555 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
11556 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
11557 * fp/non-fp branches speeds it up by about 15%.
11558 * - use sext/zext opcodes instead of shifts
11560 * - get rid of TEMPLOADs if possible and use vregs instead
11561 * - clean up usage of OP_P/OP_ opcodes
11562 * - cleanup usage of DUMMY_USE
11563 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
11565 * - set the stack type and allocate a dreg in the EMIT_NEW macros
11566 * - get rid of all the <foo>2 stuff when the new JIT is ready.
11567 * - make sure handle_stack_args () is called before the branch is emitted
11568 * - when the new IR is done, get rid of all unused stuff
11569 * - COMPARE/BEQ as separate instructions or unify them ?
11570 * - keeping them separate allows specialized compare instructions like
11571 * compare_imm, compare_membase
11572 * - most back ends unify fp compare+branch, fp compare+ceq
11573 * - integrate mono_save_args into inline_method
11574 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
11575 * - handle long shift opts on 32 bit platforms somehow: they require
11576 * 3 sregs (2 for arg1 and 1 for arg2)
11577 * - make byref a 'normal' type.
11578 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
11579 * variable if needed.
11580 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
11581 * like inline_method.
11582 * - remove inlining restrictions
11583 * - fix LNEG and enable cfold of INEG
11584 * - generalize x86 optimizations like ldelema as a peephole optimization
11585 * - add store_mem_imm for amd64
11586 * - optimize the loading of the interruption flag in the managed->native wrappers
11587 * - avoid special handling of OP_NOP in passes
11588 * - move code inserting instructions into one function/macro.
11589 * - try a coalescing phase after liveness analysis
11590 * - add float -> vreg conversion + local optimizations on !x86
11591 * - figure out how to handle decomposed branches during optimizations, ie.
11592 * compare+branch, op_jump_table+op_br etc.
11593 * - promote RuntimeXHandles to vregs
11594 * - vtype cleanups:
11595 * - add a NEW_VARLOADA_VREG macro
11596 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11597 * accessing vtype fields.
11598 * - get rid of I8CONST on 64 bit platforms
11599 * - dealing with the increase in code size due to branches created during opcode
11601 * - use extended basic blocks
11602 * - all parts of the JIT
11603 * - handle_global_vregs () && local regalloc
11604 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
11605 * - sources of increase in code size:
11608 * - isinst and castclass
11609 * - lvregs not allocated to global registers even if used multiple times
11610 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
11612 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
11613 * - add all micro optimizations from the old JIT
11614 * - put tree optimizations into the deadce pass
11615 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
11616 * specific function.
11617 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
11618 * fcompare + branchCC.
11619 * - create a helper function for allocating a stack slot, taking into account
11620 * MONO_CFG_HAS_SPILLUP.
11622 * - merge the ia64 switch changes.
11623 * - optimize mono_regstate2_alloc_int/float.
11624 * - fix the pessimistic handling of variables accessed in exception handler blocks.
11625 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
11626 * parts of the tree could be separated by other instructions, killing the tree
11627 * arguments, or stores killing loads etc. Also, should we fold loads into other
11628 * instructions if the result of the load is used multiple times ?
11629 * - make the REM_IMM optimization in mini-x86.c arch-independent.
11630 * - LAST MERGE: 108395.
11631 * - when returning vtypes in registers, generate IR and append it to the end of the
11632 * last bb instead of doing it in the epilog.
11633 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
11641 - When to decompose opcodes:
11642 - earlier: this makes some optimizations hard to implement, since the low level IR
11643 no longer contains the neccessary information. But it is easier to do.
11644 - later: harder to implement, enables more optimizations.
11645 - Branches inside bblocks:
11646 - created when decomposing complex opcodes.
11647 - branches to another bblock: harmless, but not tracked by the branch
11648 optimizations, so need to branch to a label at the start of the bblock.
11649 - branches to inside the same bblock: very problematic, trips up the local
11650 reg allocator. Can be fixed by spitting the current bblock, but that is a
11651 complex operation, since some local vregs can become global vregs etc.
11652 - Local/global vregs:
11653 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11654 local register allocator.
11655 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11656 structure, created by mono_create_var (). Assigned to hregs or the stack by
11657 the global register allocator.
11658 - When to do optimizations like alu->alu_imm:
11659 - earlier -> saves work later on since the IR will be smaller/simpler
11660 - later -> can work on more instructions
11661 - Handling of valuetypes:
11662 - When a vtype is pushed on the stack, a new temporary is created, an
11663 instruction computing its address (LDADDR) is emitted and pushed on
11664 the stack. Need to optimize cases when the vtype is used immediately as in
11665 argument passing, stloc etc.
11666 - Instead of the to_end stuff in the old JIT, simply call the function handling
11667 the values on the stack before emitting the last instruction of the bb.
11670 #endif /* DISABLE_JIT */