2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
30 #include <mono/utils/memcheck.h>
32 #include <mono/metadata/assembly.h>
33 #include <mono/metadata/attrdefs.h>
34 #include <mono/metadata/loader.h>
35 #include <mono/metadata/tabledefs.h>
36 #include <mono/metadata/class.h>
37 #include <mono/metadata/object.h>
38 #include <mono/metadata/exception.h>
39 #include <mono/metadata/opcodes.h>
40 #include <mono/metadata/mono-endian.h>
41 #include <mono/metadata/tokentype.h>
42 #include <mono/metadata/tabledefs.h>
43 #include <mono/metadata/marshal.h>
44 #include <mono/metadata/debug-helpers.h>
45 #include <mono/metadata/mono-debug.h>
46 #include <mono/metadata/gc-internal.h>
47 #include <mono/metadata/security-manager.h>
48 #include <mono/metadata/threads-types.h>
49 #include <mono/metadata/security-core-clr.h>
50 #include <mono/metadata/monitor.h>
51 #include <mono/metadata/profiler-private.h>
52 #include <mono/metadata/profiler.h>
53 #include <mono/utils/mono-compiler.h>
54 #include <mono/metadata/mono-basic-block.h>
61 #include "jit-icalls.h"
63 #include "debugger-agent.h"
65 #define BRANCH_COST 10
66 #define INLINE_LENGTH_LIMIT 20
67 #define INLINE_FAILURE do {\
68 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
71 #define CHECK_CFG_EXCEPTION do {\
72 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
75 #define METHOD_ACCESS_FAILURE do { \
76 char *method_fname = mono_method_full_name (method, TRUE); \
77 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
78 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS); \
79 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
80 g_free (method_fname); \
81 g_free (cil_method_fname); \
82 goto exception_exit; \
84 #define FIELD_ACCESS_FAILURE do { \
85 char *method_fname = mono_method_full_name (method, TRUE); \
86 char *field_fname = mono_field_full_name (field); \
87 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS); \
88 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
89 g_free (method_fname); \
90 g_free (field_fname); \
91 goto exception_exit; \
93 #define GENERIC_SHARING_FAILURE(opcode) do { \
94 if (cfg->generic_sharing_context) { \
95 if (cfg->verbose_level > 2) \
96 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
97 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
98 goto exception_exit; \
102 /* Determine whenever 'ins' represents a load of the 'this' argument */
103 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
105 static int ldind_to_load_membase (int opcode);
106 static int stind_to_store_membase (int opcode);
108 int mono_op_to_op_imm (int opcode);
109 int mono_op_to_op_imm_noemul (int opcode);
111 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
112 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
113 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
115 /* helper methods signature */
116 extern MonoMethodSignature *helper_sig_class_init_trampoline;
117 extern MonoMethodSignature *helper_sig_domain_get;
118 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
119 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm;
120 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
121 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
122 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
125 * Instruction metadata
133 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
134 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
140 #if SIZEOF_REGISTER == 8
145 /* keep in sync with the enum in mini.h */
148 #include "mini-ops.h"
153 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
154 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
156 * This should contain the index of the last sreg + 1. This is not the same
157 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
159 const gint8 ins_sreg_counts[] = {
160 #include "mini-ops.h"
165 #define MONO_INIT_VARINFO(vi,id) do { \
166 (vi)->range.first_use.pos.bid = 0xffff; \
172 mono_inst_set_src_registers (MonoInst *ins, int *regs)
174 ins->sreg1 = regs [0];
175 ins->sreg2 = regs [1];
176 ins->sreg3 = regs [2];
180 mono_alloc_ireg (MonoCompile *cfg)
182 return alloc_ireg (cfg);
186 mono_alloc_freg (MonoCompile *cfg)
188 return alloc_freg (cfg);
192 mono_alloc_preg (MonoCompile *cfg)
194 return alloc_preg (cfg);
198 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
200 return alloc_dreg (cfg, stack_type);
204 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
210 switch (type->type) {
213 case MONO_TYPE_BOOLEAN:
225 case MONO_TYPE_FNPTR:
227 case MONO_TYPE_CLASS:
228 case MONO_TYPE_STRING:
229 case MONO_TYPE_OBJECT:
230 case MONO_TYPE_SZARRAY:
231 case MONO_TYPE_ARRAY:
235 #if SIZEOF_REGISTER == 8
244 case MONO_TYPE_VALUETYPE:
245 if (type->data.klass->enumtype) {
246 type = mono_class_enum_basetype (type->data.klass);
249 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
252 case MONO_TYPE_TYPEDBYREF:
254 case MONO_TYPE_GENERICINST:
255 type = &type->data.generic_class->container_class->byval_arg;
259 g_assert (cfg->generic_sharing_context);
262 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
268 mono_print_bb (MonoBasicBlock *bb, const char *msg)
273 printf ("\n%s %d: [IN: ", msg, bb->block_num);
274 for (i = 0; i < bb->in_count; ++i)
275 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
277 for (i = 0; i < bb->out_count; ++i)
278 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
280 for (tree = bb->code; tree; tree = tree->next)
281 mono_print_ins_index (-1, tree);
285 * Can't put this at the beginning, since other files reference stuff from this
290 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
292 #define LOAD_ERROR do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto load_error; } while (0)
294 #define GET_BBLOCK(cfg,tblock,ip) do { \
295 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
297 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
298 NEW_BBLOCK (cfg, (tblock)); \
299 (tblock)->cil_code = (ip); \
300 ADD_BBLOCK (cfg, (tblock)); \
304 #if defined(TARGET_X86) || defined(TARGET_AMD64)
305 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
306 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
307 (dest)->dreg = alloc_preg ((cfg)); \
308 (dest)->sreg1 = (sr1); \
309 (dest)->sreg2 = (sr2); \
310 (dest)->inst_imm = (imm); \
311 (dest)->backend.shift_amount = (shift); \
312 MONO_ADD_INS ((cfg)->cbb, (dest)); \
316 #if SIZEOF_REGISTER == 8
317 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
318 /* FIXME: Need to add many more cases */ \
319 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
321 int dr = alloc_preg (cfg); \
322 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
323 (ins)->sreg2 = widen->dreg; \
327 #define ADD_WIDEN_OP(ins, arg1, arg2)
330 #define ADD_BINOP(op) do { \
331 MONO_INST_NEW (cfg, ins, (op)); \
333 ins->sreg1 = sp [0]->dreg; \
334 ins->sreg2 = sp [1]->dreg; \
335 type_from_op (ins, sp [0], sp [1]); \
337 /* Have to insert a widening op */ \
338 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
339 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
340 MONO_ADD_INS ((cfg)->cbb, (ins)); \
341 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
344 #define ADD_UNOP(op) do { \
345 MONO_INST_NEW (cfg, ins, (op)); \
347 ins->sreg1 = sp [0]->dreg; \
348 type_from_op (ins, sp [0], NULL); \
350 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
351 MONO_ADD_INS ((cfg)->cbb, (ins)); \
352 *sp++ = mono_decompose_opcode (cfg, ins); \
355 #define ADD_BINCOND(next_block) do { \
358 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
359 cmp->sreg1 = sp [0]->dreg; \
360 cmp->sreg2 = sp [1]->dreg; \
361 type_from_op (cmp, sp [0], sp [1]); \
363 type_from_op (ins, sp [0], sp [1]); \
364 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
365 GET_BBLOCK (cfg, tblock, target); \
366 link_bblock (cfg, bblock, tblock); \
367 ins->inst_true_bb = tblock; \
368 if ((next_block)) { \
369 link_bblock (cfg, bblock, (next_block)); \
370 ins->inst_false_bb = (next_block); \
371 start_new_bblock = 1; \
373 GET_BBLOCK (cfg, tblock, ip); \
374 link_bblock (cfg, bblock, tblock); \
375 ins->inst_false_bb = tblock; \
376 start_new_bblock = 2; \
378 if (sp != stack_start) { \
379 handle_stack_args (cfg, stack_start, sp - stack_start); \
380 CHECK_UNVERIFIABLE (cfg); \
382 MONO_ADD_INS (bblock, cmp); \
383 MONO_ADD_INS (bblock, ins); \
387 * link_bblock: Links two basic blocks
389 * links two basic blocks in the control flow graph, the 'from'
390 * argument is the starting block and the 'to' argument is the block
391 * the control flow ends to after 'from'.
394 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
396 MonoBasicBlock **newa;
400 if (from->cil_code) {
402 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
404 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
407 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
409 printf ("edge from entry to exit\n");
414 for (i = 0; i < from->out_count; ++i) {
415 if (to == from->out_bb [i]) {
421 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
422 for (i = 0; i < from->out_count; ++i) {
423 newa [i] = from->out_bb [i];
431 for (i = 0; i < to->in_count; ++i) {
432 if (from == to->in_bb [i]) {
438 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
439 for (i = 0; i < to->in_count; ++i) {
440 newa [i] = to->in_bb [i];
449 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
451 link_bblock (cfg, from, to);
455 * mono_find_block_region:
457 * We mark each basic block with a region ID. We use that to avoid BB
458 * optimizations when blocks are in different regions.
461 * A region token that encodes where this region is, and information
462 * about the clause owner for this block.
464 * The region encodes the try/catch/filter clause that owns this block
465 * as well as the type. -1 is a special value that represents a block
466 * that is in none of try/catch/filter.
469 mono_find_block_region (MonoCompile *cfg, int offset)
471 MonoMethodHeader *header = cfg->header;
472 MonoExceptionClause *clause;
475 for (i = 0; i < header->num_clauses; ++i) {
476 clause = &header->clauses [i];
477 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
478 (offset < (clause->handler_offset)))
479 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
481 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
482 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
483 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
484 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
485 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
487 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
490 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
491 return ((i + 1) << 8) | clause->flags;
498 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
500 MonoMethodHeader *header = cfg->header;
501 MonoExceptionClause *clause;
505 for (i = 0; i < header->num_clauses; ++i) {
506 clause = &header->clauses [i];
507 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
508 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
509 if (clause->flags == type)
510 res = g_list_append (res, clause);
517 mono_create_spvar_for_region (MonoCompile *cfg, int region)
521 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
525 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
526 /* prevent it from being register allocated */
527 var->flags |= MONO_INST_INDIRECT;
529 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
533 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
535 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
539 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
543 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
547 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
548 /* prevent it from being register allocated */
549 var->flags |= MONO_INST_INDIRECT;
551 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
557 * Returns the type used in the eval stack when @type is loaded.
558 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
561 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
565 inst->klass = klass = mono_class_from_mono_type (type);
567 inst->type = STACK_MP;
572 switch (type->type) {
574 inst->type = STACK_INV;
578 case MONO_TYPE_BOOLEAN:
584 inst->type = STACK_I4;
589 case MONO_TYPE_FNPTR:
590 inst->type = STACK_PTR;
592 case MONO_TYPE_CLASS:
593 case MONO_TYPE_STRING:
594 case MONO_TYPE_OBJECT:
595 case MONO_TYPE_SZARRAY:
596 case MONO_TYPE_ARRAY:
597 inst->type = STACK_OBJ;
601 inst->type = STACK_I8;
605 inst->type = STACK_R8;
607 case MONO_TYPE_VALUETYPE:
608 if (type->data.klass->enumtype) {
609 type = mono_class_enum_basetype (type->data.klass);
613 inst->type = STACK_VTYPE;
616 case MONO_TYPE_TYPEDBYREF:
617 inst->klass = mono_defaults.typed_reference_class;
618 inst->type = STACK_VTYPE;
620 case MONO_TYPE_GENERICINST:
621 type = &type->data.generic_class->container_class->byval_arg;
624 case MONO_TYPE_MVAR :
625 /* FIXME: all the arguments must be references for now,
626 * later look inside cfg and see if the arg num is
629 g_assert (cfg->generic_sharing_context);
630 inst->type = STACK_OBJ;
633 g_error ("unknown type 0x%02x in eval stack type", type->type);
638 * The following tables are used to quickly validate the IL code in type_from_op ().
641 bin_num_table [STACK_MAX] [STACK_MAX] = {
642 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
643 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
644 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
645 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
646 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
647 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
648 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
649 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
654 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
657 /* reduce the size of this table */
659 bin_int_table [STACK_MAX] [STACK_MAX] = {
660 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
661 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
662 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
663 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
664 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
665 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
666 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
667 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
671 bin_comp_table [STACK_MAX] [STACK_MAX] = {
672 /* Inv i L p F & O vt */
674 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
675 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
676 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
677 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
678 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
679 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
680 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
683 /* reduce the size of this table */
685 shift_table [STACK_MAX] [STACK_MAX] = {
686 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
687 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
688 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
689 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
690 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
691 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
692 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
693 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
697 * Tables to map from the non-specific opcode to the matching
698 * type-specific opcode.
700 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
702 binops_op_map [STACK_MAX] = {
703 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
706 /* handles from CEE_NEG to CEE_CONV_U8 */
708 unops_op_map [STACK_MAX] = {
709 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
712 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
714 ovfops_op_map [STACK_MAX] = {
715 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
718 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
720 ovf2ops_op_map [STACK_MAX] = {
721 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
724 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
726 ovf3ops_op_map [STACK_MAX] = {
727 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
730 /* handles from CEE_BEQ to CEE_BLT_UN */
732 beqops_op_map [STACK_MAX] = {
733 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
736 /* handles from CEE_CEQ to CEE_CLT_UN */
738 ceqops_op_map [STACK_MAX] = {
739 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
743 * Sets ins->type (the type on the eval stack) according to the
744 * type of the opcode and the arguments to it.
745 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
747 * FIXME: this function sets ins->type unconditionally in some cases, but
748 * it should set it to invalid for some types (a conv.x on an object)
751 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
753 switch (ins->opcode) {
760 /* FIXME: check unverifiable args for STACK_MP */
761 ins->type = bin_num_table [src1->type] [src2->type];
762 ins->opcode += binops_op_map [ins->type];
769 ins->type = bin_int_table [src1->type] [src2->type];
770 ins->opcode += binops_op_map [ins->type];
775 ins->type = shift_table [src1->type] [src2->type];
776 ins->opcode += binops_op_map [ins->type];
781 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
782 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
783 ins->opcode = OP_LCOMPARE;
784 else if (src1->type == STACK_R8)
785 ins->opcode = OP_FCOMPARE;
787 ins->opcode = OP_ICOMPARE;
789 case OP_ICOMPARE_IMM:
790 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
791 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
792 ins->opcode = OP_LCOMPARE_IMM;
804 ins->opcode += beqops_op_map [src1->type];
807 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
808 ins->opcode += ceqops_op_map [src1->type];
814 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
815 ins->opcode += ceqops_op_map [src1->type];
819 ins->type = neg_table [src1->type];
820 ins->opcode += unops_op_map [ins->type];
823 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
824 ins->type = src1->type;
826 ins->type = STACK_INV;
827 ins->opcode += unops_op_map [ins->type];
833 ins->type = STACK_I4;
834 ins->opcode += unops_op_map [src1->type];
837 ins->type = STACK_R8;
838 switch (src1->type) {
841 ins->opcode = OP_ICONV_TO_R_UN;
844 ins->opcode = OP_LCONV_TO_R_UN;
848 case CEE_CONV_OVF_I1:
849 case CEE_CONV_OVF_U1:
850 case CEE_CONV_OVF_I2:
851 case CEE_CONV_OVF_U2:
852 case CEE_CONV_OVF_I4:
853 case CEE_CONV_OVF_U4:
854 ins->type = STACK_I4;
855 ins->opcode += ovf3ops_op_map [src1->type];
857 case CEE_CONV_OVF_I_UN:
858 case CEE_CONV_OVF_U_UN:
859 ins->type = STACK_PTR;
860 ins->opcode += ovf2ops_op_map [src1->type];
862 case CEE_CONV_OVF_I1_UN:
863 case CEE_CONV_OVF_I2_UN:
864 case CEE_CONV_OVF_I4_UN:
865 case CEE_CONV_OVF_U1_UN:
866 case CEE_CONV_OVF_U2_UN:
867 case CEE_CONV_OVF_U4_UN:
868 ins->type = STACK_I4;
869 ins->opcode += ovf2ops_op_map [src1->type];
872 ins->type = STACK_PTR;
873 switch (src1->type) {
875 ins->opcode = OP_ICONV_TO_U;
879 #if SIZEOF_REGISTER == 8
880 ins->opcode = OP_LCONV_TO_U;
882 ins->opcode = OP_MOVE;
886 ins->opcode = OP_LCONV_TO_U;
889 ins->opcode = OP_FCONV_TO_U;
895 ins->type = STACK_I8;
896 ins->opcode += unops_op_map [src1->type];
898 case CEE_CONV_OVF_I8:
899 case CEE_CONV_OVF_U8:
900 ins->type = STACK_I8;
901 ins->opcode += ovf3ops_op_map [src1->type];
903 case CEE_CONV_OVF_U8_UN:
904 case CEE_CONV_OVF_I8_UN:
905 ins->type = STACK_I8;
906 ins->opcode += ovf2ops_op_map [src1->type];
910 ins->type = STACK_R8;
911 ins->opcode += unops_op_map [src1->type];
914 ins->type = STACK_R8;
918 ins->type = STACK_I4;
919 ins->opcode += ovfops_op_map [src1->type];
924 ins->type = STACK_PTR;
925 ins->opcode += ovfops_op_map [src1->type];
933 ins->type = bin_num_table [src1->type] [src2->type];
934 ins->opcode += ovfops_op_map [src1->type];
935 if (ins->type == STACK_R8)
936 ins->type = STACK_INV;
938 case OP_LOAD_MEMBASE:
939 ins->type = STACK_PTR;
941 case OP_LOADI1_MEMBASE:
942 case OP_LOADU1_MEMBASE:
943 case OP_LOADI2_MEMBASE:
944 case OP_LOADU2_MEMBASE:
945 case OP_LOADI4_MEMBASE:
946 case OP_LOADU4_MEMBASE:
947 ins->type = STACK_PTR;
949 case OP_LOADI8_MEMBASE:
950 ins->type = STACK_I8;
952 case OP_LOADR4_MEMBASE:
953 case OP_LOADR8_MEMBASE:
954 ins->type = STACK_R8;
957 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
961 if (ins->type == STACK_MP)
962 ins->klass = mono_defaults.object_class;
967 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
973 param_table [STACK_MAX] [STACK_MAX] = {
978 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
982 switch (args->type) {
992 for (i = 0; i < sig->param_count; ++i) {
993 switch (args [i].type) {
997 if (!sig->params [i]->byref)
1001 if (sig->params [i]->byref)
1003 switch (sig->params [i]->type) {
1004 case MONO_TYPE_CLASS:
1005 case MONO_TYPE_STRING:
1006 case MONO_TYPE_OBJECT:
1007 case MONO_TYPE_SZARRAY:
1008 case MONO_TYPE_ARRAY:
1015 if (sig->params [i]->byref)
1017 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1026 /*if (!param_table [args [i].type] [sig->params [i]->type])
1034 * When we need a pointer to the current domain many times in a method, we
1035 * call mono_domain_get() once and we store the result in a local variable.
1036 * This function returns the variable that represents the MonoDomain*.
1038 inline static MonoInst *
1039 mono_get_domainvar (MonoCompile *cfg)
1041 if (!cfg->domainvar)
1042 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1043 return cfg->domainvar;
1047 * The got_var contains the address of the Global Offset Table when AOT
1051 mono_get_got_var (MonoCompile *cfg)
1053 #ifdef MONO_ARCH_NEED_GOT_VAR
1054 if (!cfg->compile_aot)
1056 if (!cfg->got_var) {
1057 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1059 return cfg->got_var;
1066 mono_get_vtable_var (MonoCompile *cfg)
1068 g_assert (cfg->generic_sharing_context);
1070 if (!cfg->rgctx_var) {
1071 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1072 /* force the var to be stack allocated */
1073 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1076 return cfg->rgctx_var;
1080 type_from_stack_type (MonoInst *ins) {
1081 switch (ins->type) {
1082 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1083 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1084 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1085 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1087 return &ins->klass->this_arg;
1088 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1089 case STACK_VTYPE: return &ins->klass->byval_arg;
1091 g_error ("stack type %d to monotype not handled\n", ins->type);
1096 static G_GNUC_UNUSED int
1097 type_to_stack_type (MonoType *t)
1099 t = mono_type_get_underlying_type (t);
1103 case MONO_TYPE_BOOLEAN:
1106 case MONO_TYPE_CHAR:
1113 case MONO_TYPE_FNPTR:
1115 case MONO_TYPE_CLASS:
1116 case MONO_TYPE_STRING:
1117 case MONO_TYPE_OBJECT:
1118 case MONO_TYPE_SZARRAY:
1119 case MONO_TYPE_ARRAY:
1127 case MONO_TYPE_VALUETYPE:
1128 case MONO_TYPE_TYPEDBYREF:
1130 case MONO_TYPE_GENERICINST:
1131 if (mono_type_generic_inst_is_valuetype (t))
1137 g_assert_not_reached ();
1144 array_access_to_klass (int opcode)
1148 return mono_defaults.byte_class;
1150 return mono_defaults.uint16_class;
1153 return mono_defaults.int_class;
1156 return mono_defaults.sbyte_class;
1159 return mono_defaults.int16_class;
1162 return mono_defaults.int32_class;
1164 return mono_defaults.uint32_class;
1167 return mono_defaults.int64_class;
1170 return mono_defaults.single_class;
1173 return mono_defaults.double_class;
1174 case CEE_LDELEM_REF:
1175 case CEE_STELEM_REF:
1176 return mono_defaults.object_class;
1178 g_assert_not_reached ();
1184 * We try to share variables when possible
1187 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1192 /* inlining can result in deeper stacks */
1193 if (slot >= cfg->header->max_stack)
1194 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1196 pos = ins->type - 1 + slot * STACK_MAX;
1198 switch (ins->type) {
1205 if ((vnum = cfg->intvars [pos]))
1206 return cfg->varinfo [vnum];
1207 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1208 cfg->intvars [pos] = res->inst_c0;
1211 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1217 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1220 * Don't use this if a generic_context is set, since that means AOT can't
1221 * look up the method using just the image+token.
1222 * table == 0 means this is a reference made from a wrapper.
1224 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1225 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1226 jump_info_token->image = image;
1227 jump_info_token->token = token;
1228 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1233 * This function is called to handle items that are left on the evaluation stack
1234 * at basic block boundaries. What happens is that we save the values to local variables
1235 * and we reload them later when first entering the target basic block (with the
1236 * handle_loaded_temps () function).
1237 * A single joint point will use the same variables (stored in the array bb->out_stack or
1238 * bb->in_stack, if the basic block is before or after the joint point).
1240 * This function needs to be called _before_ emitting the last instruction of
1241 * the bb (i.e. before emitting a branch).
1242 * If the stack merge fails at a join point, cfg->unverifiable is set.
1245 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1248 MonoBasicBlock *bb = cfg->cbb;
1249 MonoBasicBlock *outb;
1250 MonoInst *inst, **locals;
1255 if (cfg->verbose_level > 3)
1256 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1257 if (!bb->out_scount) {
1258 bb->out_scount = count;
1259 //printf ("bblock %d has out:", bb->block_num);
1261 for (i = 0; i < bb->out_count; ++i) {
1262 outb = bb->out_bb [i];
1263 /* exception handlers are linked, but they should not be considered for stack args */
1264 if (outb->flags & BB_EXCEPTION_HANDLER)
1266 //printf (" %d", outb->block_num);
1267 if (outb->in_stack) {
1269 bb->out_stack = outb->in_stack;
1275 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1276 for (i = 0; i < count; ++i) {
1278 * try to reuse temps already allocated for this purpouse, if they occupy the same
1279 * stack slot and if they are of the same type.
1280 * This won't cause conflicts since if 'local' is used to
1281 * store one of the values in the in_stack of a bblock, then
1282 * the same variable will be used for the same outgoing stack
1284 * This doesn't work when inlining methods, since the bblocks
1285 * in the inlined methods do not inherit their in_stack from
1286 * the bblock they are inlined to. See bug #58863 for an
1289 if (cfg->inlined_method)
1290 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1292 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1297 for (i = 0; i < bb->out_count; ++i) {
1298 outb = bb->out_bb [i];
1299 /* exception handlers are linked, but they should not be considered for stack args */
1300 if (outb->flags & BB_EXCEPTION_HANDLER)
1302 if (outb->in_scount) {
1303 if (outb->in_scount != bb->out_scount) {
1304 cfg->unverifiable = TRUE;
1307 continue; /* check they are the same locals */
1309 outb->in_scount = count;
1310 outb->in_stack = bb->out_stack;
1313 locals = bb->out_stack;
1315 for (i = 0; i < count; ++i) {
1316 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1317 inst->cil_code = sp [i]->cil_code;
1318 sp [i] = locals [i];
1319 if (cfg->verbose_level > 3)
1320 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1324 * It is possible that the out bblocks already have in_stack assigned, and
1325 * the in_stacks differ. In this case, we will store to all the different
1332 /* Find a bblock which has a different in_stack */
1334 while (bindex < bb->out_count) {
1335 outb = bb->out_bb [bindex];
1336 /* exception handlers are linked, but they should not be considered for stack args */
1337 if (outb->flags & BB_EXCEPTION_HANDLER) {
1341 if (outb->in_stack != locals) {
1342 for (i = 0; i < count; ++i) {
1343 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1344 inst->cil_code = sp [i]->cil_code;
1345 sp [i] = locals [i];
1346 if (cfg->verbose_level > 3)
1347 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1349 locals = outb->in_stack;
1358 /* Emit code which loads interface_offsets [klass->interface_id]
1359 * The array is stored in memory before vtable.
1362 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1364 if (cfg->compile_aot) {
1365 int ioffset_reg = alloc_preg (cfg);
1366 int iid_reg = alloc_preg (cfg);
1368 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1369 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1370 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1373 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1378 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1380 int ibitmap_reg = alloc_preg (cfg);
1381 #ifdef COMPRESSED_INTERFACE_BITMAP
1383 MonoInst *res, *ins;
1384 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1385 MONO_ADD_INS (cfg->cbb, ins);
1387 if (cfg->compile_aot)
1388 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1390 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1391 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1392 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1394 int ibitmap_byte_reg = alloc_preg (cfg);
1396 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1398 if (cfg->compile_aot) {
1399 int iid_reg = alloc_preg (cfg);
1400 int shifted_iid_reg = alloc_preg (cfg);
1401 int ibitmap_byte_address_reg = alloc_preg (cfg);
1402 int masked_iid_reg = alloc_preg (cfg);
1403 int iid_one_bit_reg = alloc_preg (cfg);
1404 int iid_bit_reg = alloc_preg (cfg);
1405 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1406 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1407 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1408 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1409 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1410 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1411 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1412 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1414 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1415 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1421 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1422 * stored in "klass_reg" implements the interface "klass".
1425 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1427 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1431 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1432 * stored in "vtable_reg" implements the interface "klass".
1435 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1437 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1441 * Emit code which checks whenever the interface id of @klass is smaller than
1442 * than the value given by max_iid_reg.
1445 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1446 MonoBasicBlock *false_target)
1448 if (cfg->compile_aot) {
1449 int iid_reg = alloc_preg (cfg);
1450 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1451 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1454 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1456 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1458 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1461 /* Same as above, but obtains max_iid from a vtable */
1463 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1464 MonoBasicBlock *false_target)
1466 int max_iid_reg = alloc_preg (cfg);
1468 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1469 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1472 /* Same as above, but obtains max_iid from a klass */
1474 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1475 MonoBasicBlock *false_target)
1477 int max_iid_reg = alloc_preg (cfg);
1479 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1480 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1484 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1486 int idepth_reg = alloc_preg (cfg);
1487 int stypes_reg = alloc_preg (cfg);
1488 int stype = alloc_preg (cfg);
1490 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1491 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1492 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1493 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1495 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1496 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1498 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1499 } else if (cfg->compile_aot) {
1500 int const_reg = alloc_preg (cfg);
1501 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1502 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1504 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1506 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1510 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1512 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1516 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1518 int intf_reg = alloc_preg (cfg);
1520 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1521 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1522 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1524 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1526 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1530 * Variant of the above that takes a register to the class, not the vtable.
1533 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1535 int intf_bit_reg = alloc_preg (cfg);
1537 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1538 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1539 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1541 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1543 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1547 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1550 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1551 } else if (cfg->compile_aot) {
1552 int const_reg = alloc_preg (cfg);
1553 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1554 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1556 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1558 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1562 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1564 return mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1568 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1570 if (cfg->compile_aot) {
1571 int const_reg = alloc_preg (cfg);
1572 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1573 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1575 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1577 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1581 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1584 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1587 int rank_reg = alloc_preg (cfg);
1588 int eclass_reg = alloc_preg (cfg);
1590 g_assert (!klass_inst);
1591 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1592 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1593 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1594 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1595 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1596 if (klass->cast_class == mono_defaults.object_class) {
1597 int parent_reg = alloc_preg (cfg);
1598 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1599 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1600 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1601 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1602 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1603 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1604 } else if (klass->cast_class == mono_defaults.enum_class) {
1605 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1606 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1607 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1609 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1610 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1613 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1614 /* Check that the object is a vector too */
1615 int bounds_reg = alloc_preg (cfg);
1616 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1617 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1618 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1621 int idepth_reg = alloc_preg (cfg);
1622 int stypes_reg = alloc_preg (cfg);
1623 int stype = alloc_preg (cfg);
1625 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1626 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1627 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1628 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1630 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1631 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1632 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1637 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1639 return mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1643 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1647 g_assert (val == 0);
1652 if ((size <= 4) && (size <= align)) {
1655 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1658 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1661 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1663 #if SIZEOF_REGISTER == 8
1665 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1671 val_reg = alloc_preg (cfg);
1673 if (SIZEOF_REGISTER == 8)
1674 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1676 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1679 /* This could be optimized further if neccesary */
1681 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1688 #if !NO_UNALIGNED_ACCESS
1689 if (SIZEOF_REGISTER == 8) {
1691 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1696 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1704 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1709 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1714 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1721 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1728 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1729 g_assert (size < 10000);
1732 /* This could be optimized further if neccesary */
1734 cur_reg = alloc_preg (cfg);
1735 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1736 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1743 #if !NO_UNALIGNED_ACCESS
1744 if (SIZEOF_REGISTER == 8) {
1746 cur_reg = alloc_preg (cfg);
1747 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1748 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1757 cur_reg = alloc_preg (cfg);
1758 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1759 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1765 cur_reg = alloc_preg (cfg);
1766 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1767 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1773 cur_reg = alloc_preg (cfg);
1774 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1775 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1783 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1786 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1789 type = mini_get_basic_type_from_generic (gsctx, type);
1790 switch (type->type) {
1791 case MONO_TYPE_VOID:
1792 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1795 case MONO_TYPE_BOOLEAN:
1798 case MONO_TYPE_CHAR:
1801 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1805 case MONO_TYPE_FNPTR:
1806 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1807 case MONO_TYPE_CLASS:
1808 case MONO_TYPE_STRING:
1809 case MONO_TYPE_OBJECT:
1810 case MONO_TYPE_SZARRAY:
1811 case MONO_TYPE_ARRAY:
1812 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1815 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1818 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1819 case MONO_TYPE_VALUETYPE:
1820 if (type->data.klass->enumtype) {
1821 type = mono_class_enum_basetype (type->data.klass);
1824 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1825 case MONO_TYPE_TYPEDBYREF:
1826 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1827 case MONO_TYPE_GENERICINST:
1828 type = &type->data.generic_class->container_class->byval_arg;
1831 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1837 * target_type_is_incompatible:
1838 * @cfg: MonoCompile context
1840 * Check that the item @arg on the evaluation stack can be stored
1841 * in the target type (can be a local, or field, etc).
1842 * The cfg arg can be used to check if we need verification or just
1845 * Returns: non-0 value if arg can't be stored on a target.
1848 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1850 MonoType *simple_type;
1853 if (target->byref) {
1854 /* FIXME: check that the pointed to types match */
1855 if (arg->type == STACK_MP)
1856 return arg->klass != mono_class_from_mono_type (target);
1857 if (arg->type == STACK_PTR)
1862 simple_type = mono_type_get_underlying_type (target);
1863 switch (simple_type->type) {
1864 case MONO_TYPE_VOID:
1868 case MONO_TYPE_BOOLEAN:
1871 case MONO_TYPE_CHAR:
1874 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1878 /* STACK_MP is needed when setting pinned locals */
1879 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1884 case MONO_TYPE_FNPTR:
1885 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1888 case MONO_TYPE_CLASS:
1889 case MONO_TYPE_STRING:
1890 case MONO_TYPE_OBJECT:
1891 case MONO_TYPE_SZARRAY:
1892 case MONO_TYPE_ARRAY:
1893 if (arg->type != STACK_OBJ)
1895 /* FIXME: check type compatibility */
1899 if (arg->type != STACK_I8)
1904 if (arg->type != STACK_R8)
1907 case MONO_TYPE_VALUETYPE:
1908 if (arg->type != STACK_VTYPE)
1910 klass = mono_class_from_mono_type (simple_type);
1911 if (klass != arg->klass)
1914 case MONO_TYPE_TYPEDBYREF:
1915 if (arg->type != STACK_VTYPE)
1917 klass = mono_class_from_mono_type (simple_type);
1918 if (klass != arg->klass)
1921 case MONO_TYPE_GENERICINST:
1922 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1923 if (arg->type != STACK_VTYPE)
1925 klass = mono_class_from_mono_type (simple_type);
1926 if (klass != arg->klass)
1930 if (arg->type != STACK_OBJ)
1932 /* FIXME: check type compatibility */
1936 case MONO_TYPE_MVAR:
1937 /* FIXME: all the arguments must be references for now,
1938 * later look inside cfg and see if the arg num is
1939 * really a reference
1941 g_assert (cfg->generic_sharing_context);
1942 if (arg->type != STACK_OBJ)
1946 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1952 * Prepare arguments for passing to a function call.
1953 * Return a non-zero value if the arguments can't be passed to the given
1955 * The type checks are not yet complete and some conversions may need
1956 * casts on 32 or 64 bit architectures.
1958 * FIXME: implement this using target_type_is_incompatible ()
1961 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1963 MonoType *simple_type;
1967 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1971 for (i = 0; i < sig->param_count; ++i) {
1972 if (sig->params [i]->byref) {
1973 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1977 simple_type = sig->params [i];
1978 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1980 switch (simple_type->type) {
1981 case MONO_TYPE_VOID:
1986 case MONO_TYPE_BOOLEAN:
1989 case MONO_TYPE_CHAR:
1992 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
1998 case MONO_TYPE_FNPTR:
1999 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2002 case MONO_TYPE_CLASS:
2003 case MONO_TYPE_STRING:
2004 case MONO_TYPE_OBJECT:
2005 case MONO_TYPE_SZARRAY:
2006 case MONO_TYPE_ARRAY:
2007 if (args [i]->type != STACK_OBJ)
2012 if (args [i]->type != STACK_I8)
2017 if (args [i]->type != STACK_R8)
2020 case MONO_TYPE_VALUETYPE:
2021 if (simple_type->data.klass->enumtype) {
2022 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2025 if (args [i]->type != STACK_VTYPE)
2028 case MONO_TYPE_TYPEDBYREF:
2029 if (args [i]->type != STACK_VTYPE)
2032 case MONO_TYPE_GENERICINST:
2033 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2037 g_error ("unknown type 0x%02x in check_call_signature",
2045 callvirt_to_call (int opcode)
2050 case OP_VOIDCALLVIRT:
2059 g_assert_not_reached ();
2066 callvirt_to_call_membase (int opcode)
2070 return OP_CALL_MEMBASE;
2071 case OP_VOIDCALLVIRT:
2072 return OP_VOIDCALL_MEMBASE;
2074 return OP_FCALL_MEMBASE;
2076 return OP_LCALL_MEMBASE;
2078 return OP_VCALL_MEMBASE;
2080 g_assert_not_reached ();
2086 #ifdef MONO_ARCH_HAVE_IMT
2088 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2092 if (COMPILE_LLVM (cfg)) {
2093 method_reg = alloc_preg (cfg);
2096 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2097 } else if (cfg->compile_aot) {
2098 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2101 MONO_INST_NEW (cfg, ins, OP_PCONST);
2102 ins->inst_p0 = call->method;
2103 ins->dreg = method_reg;
2104 MONO_ADD_INS (cfg->cbb, ins);
2108 call->imt_arg_reg = method_reg;
2110 #ifdef MONO_ARCH_IMT_REG
2111 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2113 /* Need this to keep the IMT arg alive */
2114 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2119 #ifdef MONO_ARCH_IMT_REG
2120 method_reg = alloc_preg (cfg);
2123 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2124 } else if (cfg->compile_aot) {
2125 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2128 MONO_INST_NEW (cfg, ins, OP_PCONST);
2129 ins->inst_p0 = call->method;
2130 ins->dreg = method_reg;
2131 MONO_ADD_INS (cfg->cbb, ins);
2134 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2136 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2141 static MonoJumpInfo *
2142 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2144 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2148 ji->data.target = target;
2153 inline static MonoCallInst *
2154 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2155 MonoInst **args, int calli, int virtual, int tail)
2158 #ifdef MONO_ARCH_SOFT_FLOAT
2163 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2165 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2168 call->signature = sig;
2170 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2173 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2174 call->vret_var = cfg->vret_addr;
2175 //g_assert_not_reached ();
2177 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2178 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2181 temp->backend.is_pinvoke = sig->pinvoke;
2184 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2185 * address of return value to increase optimization opportunities.
2186 * Before vtype decomposition, the dreg of the call ins itself represents the
2187 * fact the call modifies the return value. After decomposition, the call will
2188 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2189 * will be transformed into an LDADDR.
2191 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2192 loada->dreg = alloc_preg (cfg);
2193 loada->inst_p0 = temp;
2194 /* We reference the call too since call->dreg could change during optimization */
2195 loada->inst_p1 = call;
2196 MONO_ADD_INS (cfg->cbb, loada);
2198 call->inst.dreg = temp->dreg;
2200 call->vret_var = loada;
2201 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2202 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2204 #ifdef MONO_ARCH_SOFT_FLOAT
2205 if (COMPILE_SOFT_FLOAT (cfg)) {
2207 * If the call has a float argument, we would need to do an r8->r4 conversion using
2208 * an icall, but that cannot be done during the call sequence since it would clobber
2209 * the call registers + the stack. So we do it before emitting the call.
2211 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2213 MonoInst *in = call->args [i];
2215 if (i >= sig->hasthis)
2216 t = sig->params [i - sig->hasthis];
2218 t = &mono_defaults.int_class->byval_arg;
2219 t = mono_type_get_underlying_type (t);
2221 if (!t->byref && t->type == MONO_TYPE_R4) {
2222 MonoInst *iargs [1];
2226 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2228 /* The result will be in an int vreg */
2229 call->args [i] = conv;
2236 if (COMPILE_LLVM (cfg))
2237 mono_llvm_emit_call (cfg, call);
2239 mono_arch_emit_call (cfg, call);
2241 mono_arch_emit_call (cfg, call);
2244 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2245 cfg->flags |= MONO_CFG_HAS_CALLS;
2250 inline static MonoInst*
2251 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2253 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE);
2255 call->inst.sreg1 = addr->dreg;
2257 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2259 return (MonoInst*)call;
2263 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2265 #ifdef MONO_ARCH_RGCTX_REG
2266 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2267 cfg->uses_rgctx_reg = TRUE;
2268 call->rgctx_reg = TRUE;
2270 call->rgctx_arg_reg = rgctx_reg;
2277 inline static MonoInst*
2278 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2284 rgctx_reg = mono_alloc_preg (cfg);
2285 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2287 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2289 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2290 return (MonoInst*)call;
2294 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, int rgctx_type);
2296 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, int rgctx_type);
2299 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2300 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2302 gboolean might_be_remote;
2303 gboolean virtual = this != NULL;
2304 gboolean enable_for_aot = TRUE;
2308 if (method->string_ctor) {
2309 /* Create the real signature */
2310 /* FIXME: Cache these */
2311 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2312 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2317 might_be_remote = this && sig->hasthis &&
2318 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2319 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this);
2321 context_used = mono_method_check_context_used (method);
2322 if (might_be_remote && context_used) {
2325 g_assert (cfg->generic_sharing_context);
2327 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2329 return mono_emit_calli (cfg, sig, args, addr);
2332 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE);
2334 if (might_be_remote)
2335 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2337 call->method = method;
2338 call->inst.flags |= MONO_INST_HAS_METHOD;
2339 call->inst.inst_left = this;
2342 int vtable_reg, slot_reg, this_reg;
2344 this_reg = this->dreg;
2346 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2347 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2348 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2350 /* Make a call to delegate->invoke_impl */
2351 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2352 call->inst.inst_basereg = this_reg;
2353 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2354 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2356 return (MonoInst*)call;
2360 if ((!cfg->compile_aot || enable_for_aot) &&
2361 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2362 (MONO_METHOD_IS_FINAL (method) &&
2363 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2364 !(method->klass->marshalbyref && context_used)) {
2366 * the method is not virtual, we just need to ensure this is not null
2367 * and then we can call the method directly.
2369 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2371 * The check above ensures method is not gshared, this is needed since
2372 * gshared methods can't have wrappers.
2374 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2377 if (!method->string_ctor)
2378 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2380 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2382 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2384 return (MonoInst*)call;
2387 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2389 * the method is virtual, but we can statically dispatch since either
2390 * it's class or the method itself are sealed.
2391 * But first we need to ensure it's not a null reference.
2393 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2395 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2396 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2398 return (MonoInst*)call;
2401 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2403 vtable_reg = alloc_preg (cfg);
2404 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2405 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2407 #ifdef MONO_ARCH_HAVE_IMT
2409 guint32 imt_slot = mono_method_get_imt_slot (method);
2410 emit_imt_argument (cfg, call, imt_arg);
2411 slot_reg = vtable_reg;
2412 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2415 if (slot_reg == -1) {
2416 slot_reg = alloc_preg (cfg);
2417 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2418 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2421 slot_reg = vtable_reg;
2422 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2423 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2424 #ifdef MONO_ARCH_HAVE_IMT
2426 g_assert (mono_method_signature (method)->generic_param_count);
2427 emit_imt_argument (cfg, call, imt_arg);
2432 call->inst.sreg1 = slot_reg;
2433 call->virtual = TRUE;
2436 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2438 return (MonoInst*)call;
2442 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2443 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2450 rgctx_reg = mono_alloc_preg (cfg);
2451 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2453 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2455 call = (MonoCallInst*)ins;
2457 set_rgctx_arg (cfg, call, rgctx_reg, vtable_arg);
2463 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2465 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2469 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2476 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE);
2479 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2481 return (MonoInst*)call;
2485 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2487 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2491 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2495 * mono_emit_abs_call:
2497 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2499 inline static MonoInst*
2500 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2501 MonoMethodSignature *sig, MonoInst **args)
2503 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2507 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2510 if (cfg->abs_patches == NULL)
2511 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2512 g_hash_table_insert (cfg->abs_patches, ji, ji);
2513 ins = mono_emit_native_call (cfg, ji, sig, args);
2514 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2519 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2521 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2522 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2526 * Native code might return non register sized integers
2527 * without initializing the upper bits.
2529 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2530 case OP_LOADI1_MEMBASE:
2531 widen_op = OP_ICONV_TO_I1;
2533 case OP_LOADU1_MEMBASE:
2534 widen_op = OP_ICONV_TO_U1;
2536 case OP_LOADI2_MEMBASE:
2537 widen_op = OP_ICONV_TO_I2;
2539 case OP_LOADU2_MEMBASE:
2540 widen_op = OP_ICONV_TO_U2;
2546 if (widen_op != -1) {
2547 int dreg = alloc_preg (cfg);
2550 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2551 widen->type = ins->type;
2561 get_memcpy_method (void)
2563 static MonoMethod *memcpy_method = NULL;
2564 if (!memcpy_method) {
2565 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2567 g_error ("Old corlib found. Install a new one");
2569 return memcpy_method;
2573 create_write_barrier_bitmap (MonoClass *klass, unsigned *wb_bitmap, int offset)
2575 MonoClassField *field;
2576 gpointer iter = NULL;
2578 while ((field = mono_class_get_fields (klass, &iter))) {
2581 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2583 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2584 if (mono_type_is_reference (field->type)) {
2585 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2586 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2588 /*FIXME support nested value types so this works for: struct X { Y y; int z;} struct Y { object a,b; }*/
2589 MonoClass *field_class = mono_class_from_mono_type (field->type);
2590 if (field_class->has_references)
2591 create_write_barrier_bitmap (field_class, wb_bitmap, offset + foffset);
2597 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value, int value_reg)
2600 int card_table_shift_bits;
2601 gpointer card_table_mask;
2602 guint8 *card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2603 MonoInst *dummy_use;
2605 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
2606 int nursery_shift_bits;
2607 size_t nursery_size;
2609 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2611 if (!cfg->compile_aot && card_table && nursery_shift_bits > 0) {
2614 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2615 wbarrier->sreg1 = ptr->dreg;
2617 wbarrier->sreg2 = value->dreg;
2619 wbarrier->sreg2 = value_reg;
2620 MONO_ADD_INS (cfg->cbb, wbarrier);
2624 int offset_reg = alloc_preg (cfg);
2625 int card_reg = alloc_preg (cfg);
2628 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2629 if (card_table_mask)
2630 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
2632 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2633 * IMM's larger than 32bits.
2635 if (cfg->compile_aot) {
2636 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
2638 MONO_INST_NEW (cfg, ins, OP_PCONST);
2639 ins->inst_p0 = card_table;
2640 ins->dreg = card_reg;
2641 MONO_ADD_INS (cfg->cbb, ins);
2644 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
2645 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
2647 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2648 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
2652 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
2654 MONO_INST_NEW (cfg, dummy_use, OP_DUMMY_USE);
2655 dummy_use->sreg1 = value_reg;
2656 MONO_ADD_INS (cfg->cbb, dummy_use);
2662 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
2664 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
2665 unsigned need_wb = 0;
2670 /*types with references can't have alignment smaller than sizeof(void*) */
2671 if (align < SIZEOF_VOID_P)
2674 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
2675 if (size > 32 * SIZEOF_VOID_P)
2678 create_write_barrier_bitmap (klass, &need_wb, 0);
2680 /* We don't unroll more than 5 stores to avoid code bloat. */
2681 if (size > 5 * SIZEOF_VOID_P) {
2682 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
2683 size += (SIZEOF_VOID_P - 1);
2684 size &= ~(SIZEOF_VOID_P - 1);
2686 EMIT_NEW_ICONST (cfg, iargs [2], size);
2687 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
2688 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
2692 destreg = iargs [0]->dreg;
2693 srcreg = iargs [1]->dreg;
2696 dest_ptr_reg = alloc_preg (cfg);
2697 tmp_reg = alloc_preg (cfg);
2700 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
2702 while (size >= SIZEOF_VOID_P) {
2703 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, tmp_reg, srcreg, offset);
2704 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
2707 emit_write_barrier (cfg, iargs [0], NULL, tmp_reg);
2709 offset += SIZEOF_VOID_P;
2710 size -= SIZEOF_VOID_P;
2713 /*tmp += sizeof (void*)*/
2714 if (size >= SIZEOF_VOID_P) {
2715 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
2716 MONO_ADD_INS (cfg->cbb, iargs [0]);
2720 /* Those cannot be references since size < sizeof (void*) */
2722 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
2723 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
2729 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
2730 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
2736 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
2737 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
2746 * Emit code to copy a valuetype of type @klass whose address is stored in
2747 * @src->dreg to memory whose address is stored at @dest->dreg.
2750 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2752 MonoInst *iargs [4];
2755 MonoMethod *memcpy_method;
2759 * This check breaks with spilled vars... need to handle it during verification anyway.
2760 * g_assert (klass && klass == src->klass && klass == dest->klass);
2764 n = mono_class_native_size (klass, &align);
2766 n = mono_class_value_size (klass, &align);
2768 /* if native is true there should be no references in the struct */
2769 if (cfg->gen_write_barriers && klass->has_references && !native) {
2770 /* Avoid barriers when storing to the stack */
2771 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2772 (dest->opcode == OP_LDADDR))) {
2773 int context_used = 0;
2778 if (cfg->generic_sharing_context)
2779 context_used = mono_class_check_context_used (klass);
2781 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
2782 if ((cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
2784 } else if (context_used) {
2785 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2787 if (cfg->compile_aot) {
2788 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2790 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2791 mono_class_compute_gc_descriptor (klass);
2795 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2800 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2801 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2802 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2806 EMIT_NEW_ICONST (cfg, iargs [2], n);
2808 memcpy_method = get_memcpy_method ();
2809 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2814 get_memset_method (void)
2816 static MonoMethod *memset_method = NULL;
2817 if (!memset_method) {
2818 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2820 g_error ("Old corlib found. Install a new one");
2822 return memset_method;
2826 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2828 MonoInst *iargs [3];
2831 MonoMethod *memset_method;
2833 /* FIXME: Optimize this for the case when dest is an LDADDR */
2835 mono_class_init (klass);
2836 n = mono_class_value_size (klass, &align);
2838 if (n <= sizeof (gpointer) * 5) {
2839 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2842 memset_method = get_memset_method ();
2844 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2845 EMIT_NEW_ICONST (cfg, iargs [2], n);
2846 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2851 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2853 MonoInst *this = NULL;
2855 g_assert (cfg->generic_sharing_context);
2857 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2858 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2859 !method->klass->valuetype)
2860 EMIT_NEW_ARGLOAD (cfg, this, 0);
2862 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2863 MonoInst *mrgctx_loc, *mrgctx_var;
2866 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2868 mrgctx_loc = mono_get_vtable_var (cfg);
2869 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2872 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2873 MonoInst *vtable_loc, *vtable_var;
2877 vtable_loc = mono_get_vtable_var (cfg);
2878 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2880 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2881 MonoInst *mrgctx_var = vtable_var;
2884 vtable_reg = alloc_preg (cfg);
2885 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2886 vtable_var->type = STACK_PTR;
2892 int vtable_reg, res_reg;
2894 vtable_reg = alloc_preg (cfg);
2895 res_reg = alloc_preg (cfg);
2896 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2901 static MonoJumpInfoRgctxEntry *
2902 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2904 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2905 res->method = method;
2906 res->in_mrgctx = in_mrgctx;
2907 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2908 res->data->type = patch_type;
2909 res->data->data.target = patch_data;
2910 res->info_type = info_type;
2915 static inline MonoInst*
2916 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2918 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2922 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2923 MonoClass *klass, int rgctx_type)
2925 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2926 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2928 return emit_rgctx_fetch (cfg, rgctx, entry);
2932 * emit_get_rgctx_method:
2934 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2935 * normal constants, else emit a load from the rgctx.
2938 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2939 MonoMethod *cmethod, int rgctx_type)
2941 if (!context_used) {
2944 switch (rgctx_type) {
2945 case MONO_RGCTX_INFO_METHOD:
2946 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
2948 case MONO_RGCTX_INFO_METHOD_RGCTX:
2949 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
2952 g_assert_not_reached ();
2955 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2956 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2958 return emit_rgctx_fetch (cfg, rgctx, entry);
2963 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2964 MonoClassField *field, int rgctx_type)
2966 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2967 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2969 return emit_rgctx_fetch (cfg, rgctx, entry);
2973 * On return the caller must check @klass for load errors.
2976 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
2978 MonoInst *vtable_arg;
2980 int context_used = 0;
2982 if (cfg->generic_sharing_context)
2983 context_used = mono_class_check_context_used (klass);
2986 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
2987 klass, MONO_RGCTX_INFO_VTABLE);
2989 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2993 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
2996 if (COMPILE_LLVM (cfg))
2997 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
2999 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3000 #ifdef MONO_ARCH_VTABLE_REG
3001 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3002 cfg->uses_vtable_reg = TRUE;
3009 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
3011 if (mini_get_debug_options ()->better_cast_details) {
3012 int to_klass_reg = alloc_preg (cfg);
3013 int vtable_reg = alloc_preg (cfg);
3014 int klass_reg = alloc_preg (cfg);
3015 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3018 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3022 MONO_ADD_INS (cfg->cbb, tls_get);
3023 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3024 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3026 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3027 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3028 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3033 reset_cast_details (MonoCompile *cfg)
3035 /* Reset the variables holding the cast details */
3036 if (mini_get_debug_options ()->better_cast_details) {
3037 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3039 MONO_ADD_INS (cfg->cbb, tls_get);
3040 /* It is enough to reset the from field */
3041 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3046 * On return the caller must check @array_class for load errors
3049 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3051 int vtable_reg = alloc_preg (cfg);
3052 int context_used = 0;
3054 if (cfg->generic_sharing_context)
3055 context_used = mono_class_check_context_used (array_class);
3057 save_cast_details (cfg, array_class, obj->dreg);
3059 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3061 if (cfg->opt & MONO_OPT_SHARED) {
3062 int class_reg = alloc_preg (cfg);
3063 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3064 if (cfg->compile_aot) {
3065 int klass_reg = alloc_preg (cfg);
3066 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3067 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3069 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3071 } else if (context_used) {
3072 MonoInst *vtable_ins;
3074 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3075 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3077 if (cfg->compile_aot) {
3081 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3083 vt_reg = alloc_preg (cfg);
3084 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3085 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3088 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3090 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3094 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3096 reset_cast_details (cfg);
3100 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3101 * generic code is generated.
3104 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3106 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3109 MonoInst *rgctx, *addr;
3111 /* FIXME: What if the class is shared? We might not
3112 have to get the address of the method from the
3114 addr = emit_get_rgctx_method (cfg, context_used, method,
3115 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3117 rgctx = emit_get_rgctx (cfg, method, context_used);
3119 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3121 return mono_emit_method_call (cfg, method, &val, NULL);
3126 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3130 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3131 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3132 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3133 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3135 obj_reg = sp [0]->dreg;
3136 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3137 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3139 /* FIXME: generics */
3140 g_assert (klass->rank == 0);
3143 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3144 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3146 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3147 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3150 MonoInst *element_class;
3152 /* This assertion is from the unboxcast insn */
3153 g_assert (klass->rank == 0);
3155 element_class = emit_get_rgctx_klass (cfg, context_used,
3156 klass->element_class, MONO_RGCTX_INFO_KLASS);
3158 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3159 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3161 save_cast_details (cfg, klass->element_class, obj_reg);
3162 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3163 reset_cast_details (cfg);
3166 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
3167 MONO_ADD_INS (cfg->cbb, add);
3168 add->type = STACK_MP;
3175 * Returns NULL and set the cfg exception on error.
3178 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3180 MonoInst *iargs [2];
3186 MonoInst *iargs [2];
3189 FIXME: we cannot get managed_alloc here because we can't get
3190 the class's vtable (because it's not a closed class)
3192 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3193 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3196 if (cfg->opt & MONO_OPT_SHARED)
3197 rgctx_info = MONO_RGCTX_INFO_KLASS;
3199 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3200 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3202 if (cfg->opt & MONO_OPT_SHARED) {
3203 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3205 alloc_ftn = mono_object_new;
3208 alloc_ftn = mono_object_new_specific;
3211 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3214 if (cfg->opt & MONO_OPT_SHARED) {
3215 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3216 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3218 alloc_ftn = mono_object_new;
3219 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3220 /* This happens often in argument checking code, eg. throw new FooException... */
3221 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3222 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3223 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3225 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3226 MonoMethod *managed_alloc = NULL;
3230 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3231 cfg->exception_ptr = klass;
3235 #ifndef MONO_CROSS_COMPILE
3236 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3239 if (managed_alloc) {
3240 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3241 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3243 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3245 guint32 lw = vtable->klass->instance_size;
3246 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3247 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3248 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3251 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3255 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3259 * Returns NULL and set the cfg exception on error.
3262 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
3264 MonoInst *alloc, *ins;
3266 if (mono_class_is_nullable (klass)) {
3267 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3270 /* FIXME: What if the class is shared? We might not
3271 have to get the method address from the RGCTX. */
3272 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3273 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3274 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3276 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3278 return mono_emit_method_call (cfg, method, &val, NULL);
3282 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3286 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3293 mini_class_has_reference_variant_generic_argument (MonoClass *klass, int context_used)
3296 MonoGenericContainer *container;
3297 MonoGenericInst *ginst;
3299 if (klass->generic_class) {
3300 container = klass->generic_class->container_class->generic_container;
3301 ginst = klass->generic_class->context.class_inst;
3302 } else if (klass->generic_container && context_used) {
3303 container = klass->generic_container;
3304 ginst = container->context.class_inst;
3309 for (i = 0; i < container->type_argc; ++i) {
3311 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
3313 type = ginst->type_argv [i];
3314 if (MONO_TYPE_IS_REFERENCE (type))
3317 if (context_used && (type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR))
3323 // FIXME: This doesn't work yet (class libs tests fail?)
3324 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3327 * Returns NULL and set the cfg exception on error.
3330 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3332 MonoBasicBlock *is_null_bb;
3333 int obj_reg = src->dreg;
3334 int vtable_reg = alloc_preg (cfg);
3335 MonoInst *klass_inst = NULL;
3340 klass_inst = emit_get_rgctx_klass (cfg, context_used,
3341 klass, MONO_RGCTX_INFO_KLASS);
3343 if (is_complex_isinst (klass)) {
3344 /* Complex case, handle by an icall */
3350 args [1] = klass_inst;
3352 return mono_emit_jit_icall (cfg, mono_object_castclass, args);
3354 /* Simple case, handled by the code below */
3358 NEW_BBLOCK (cfg, is_null_bb);
3360 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3361 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3363 save_cast_details (cfg, klass, obj_reg);
3365 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3366 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3367 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3369 int klass_reg = alloc_preg (cfg);
3371 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3373 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3374 /* the remoting code is broken, access the class for now */
3375 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3376 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3378 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3379 cfg->exception_ptr = klass;
3382 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3384 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3385 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3387 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3389 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3390 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3394 MONO_START_BB (cfg, is_null_bb);
3396 reset_cast_details (cfg);
3402 * Returns NULL and set the cfg exception on error.
3405 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3408 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3409 int obj_reg = src->dreg;
3410 int vtable_reg = alloc_preg (cfg);
3411 int res_reg = alloc_preg (cfg);
3412 MonoInst *klass_inst = NULL;
3415 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3417 if (is_complex_isinst (klass)) {
3420 /* Complex case, handle by an icall */
3426 args [1] = klass_inst;
3428 return mono_emit_jit_icall (cfg, mono_object_isinst, args);
3430 /* Simple case, the code below can handle it */
3434 NEW_BBLOCK (cfg, is_null_bb);
3435 NEW_BBLOCK (cfg, false_bb);
3436 NEW_BBLOCK (cfg, end_bb);
3438 /* Do the assignment at the beginning, so the other assignment can be if converted */
3439 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3440 ins->type = STACK_OBJ;
3443 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3444 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3446 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3448 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3449 g_assert (!context_used);
3450 /* the is_null_bb target simply copies the input register to the output */
3451 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3453 int klass_reg = alloc_preg (cfg);
3456 int rank_reg = alloc_preg (cfg);
3457 int eclass_reg = alloc_preg (cfg);
3459 g_assert (!context_used);
3460 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3461 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3462 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3463 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3464 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3465 if (klass->cast_class == mono_defaults.object_class) {
3466 int parent_reg = alloc_preg (cfg);
3467 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3468 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3469 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3470 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3471 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3472 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3473 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3474 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3475 } else if (klass->cast_class == mono_defaults.enum_class) {
3476 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3477 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3478 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3479 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3481 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3482 /* Check that the object is a vector too */
3483 int bounds_reg = alloc_preg (cfg);
3484 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3485 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3486 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3489 /* the is_null_bb target simply copies the input register to the output */
3490 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3492 } else if (mono_class_is_nullable (klass)) {
3493 g_assert (!context_used);
3494 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3495 /* the is_null_bb target simply copies the input register to the output */
3496 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3498 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3499 g_assert (!context_used);
3500 /* the remoting code is broken, access the class for now */
3501 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3502 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3504 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3505 cfg->exception_ptr = klass;
3508 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3510 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3511 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3513 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3514 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3516 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3517 /* the is_null_bb target simply copies the input register to the output */
3518 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
3523 MONO_START_BB (cfg, false_bb);
3525 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3526 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3528 MONO_START_BB (cfg, is_null_bb);
3530 MONO_START_BB (cfg, end_bb);
3536 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3538 /* This opcode takes as input an object reference and a class, and returns:
3539 0) if the object is an instance of the class,
3540 1) if the object is not instance of the class,
3541 2) if the object is a proxy whose type cannot be determined */
3544 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3545 int obj_reg = src->dreg;
3546 int dreg = alloc_ireg (cfg);
3548 int klass_reg = alloc_preg (cfg);
3550 NEW_BBLOCK (cfg, true_bb);
3551 NEW_BBLOCK (cfg, false_bb);
3552 NEW_BBLOCK (cfg, false2_bb);
3553 NEW_BBLOCK (cfg, end_bb);
3554 NEW_BBLOCK (cfg, no_proxy_bb);
3556 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3557 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3559 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3560 NEW_BBLOCK (cfg, interface_fail_bb);
3562 tmp_reg = alloc_preg (cfg);
3563 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3564 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3565 MONO_START_BB (cfg, interface_fail_bb);
3566 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3568 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3570 tmp_reg = alloc_preg (cfg);
3571 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3572 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3573 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3575 tmp_reg = alloc_preg (cfg);
3576 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3577 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3579 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3580 tmp_reg = alloc_preg (cfg);
3581 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3582 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3584 tmp_reg = alloc_preg (cfg);
3585 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3586 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3587 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3589 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3590 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3592 MONO_START_BB (cfg, no_proxy_bb);
3594 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3597 MONO_START_BB (cfg, false_bb);
3599 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3600 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3602 MONO_START_BB (cfg, false2_bb);
3604 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3605 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3607 MONO_START_BB (cfg, true_bb);
3609 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3611 MONO_START_BB (cfg, end_bb);
3614 MONO_INST_NEW (cfg, ins, OP_ICONST);
3616 ins->type = STACK_I4;
3622 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3624 /* This opcode takes as input an object reference and a class, and returns:
3625 0) if the object is an instance of the class,
3626 1) if the object is a proxy whose type cannot be determined
3627 an InvalidCastException exception is thrown otherwhise*/
3630 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3631 int obj_reg = src->dreg;
3632 int dreg = alloc_ireg (cfg);
3633 int tmp_reg = alloc_preg (cfg);
3634 int klass_reg = alloc_preg (cfg);
3636 NEW_BBLOCK (cfg, end_bb);
3637 NEW_BBLOCK (cfg, ok_result_bb);
3639 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3640 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3642 save_cast_details (cfg, klass, obj_reg);
3644 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3645 NEW_BBLOCK (cfg, interface_fail_bb);
3647 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3648 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3649 MONO_START_BB (cfg, interface_fail_bb);
3650 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3652 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3654 tmp_reg = alloc_preg (cfg);
3655 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3656 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3657 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3659 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3660 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3663 NEW_BBLOCK (cfg, no_proxy_bb);
3665 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3666 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3667 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3669 tmp_reg = alloc_preg (cfg);
3670 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3671 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3673 tmp_reg = alloc_preg (cfg);
3674 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3675 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3676 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3678 NEW_BBLOCK (cfg, fail_1_bb);
3680 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3682 MONO_START_BB (cfg, fail_1_bb);
3684 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3685 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3687 MONO_START_BB (cfg, no_proxy_bb);
3689 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3692 MONO_START_BB (cfg, ok_result_bb);
3694 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3696 MONO_START_BB (cfg, end_bb);
3699 MONO_INST_NEW (cfg, ins, OP_ICONST);
3701 ins->type = STACK_I4;
3707 * Returns NULL and set the cfg exception on error.
3709 static G_GNUC_UNUSED MonoInst*
3710 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3714 gpointer *trampoline;
3715 MonoInst *obj, *method_ins, *tramp_ins;
3719 obj = handle_alloc (cfg, klass, FALSE, 0);
3723 /* Inline the contents of mono_delegate_ctor */
3725 /* Set target field */
3726 /* Optimize away setting of NULL target */
3727 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
3728 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3729 if (cfg->gen_write_barriers) {
3730 dreg = alloc_preg (cfg);
3731 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target));
3732 emit_write_barrier (cfg, ptr, target, 0);
3736 /* Set method field */
3737 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3738 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3739 if (cfg->gen_write_barriers) {
3740 dreg = alloc_preg (cfg);
3741 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method));
3742 emit_write_barrier (cfg, ptr, method_ins, 0);
3745 * To avoid looking up the compiled code belonging to the target method
3746 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3747 * store it, and we fill it after the method has been compiled.
3749 if (!cfg->compile_aot && !method->dynamic) {
3750 MonoInst *code_slot_ins;
3753 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3755 domain = mono_domain_get ();
3756 mono_domain_lock (domain);
3757 if (!domain_jit_info (domain)->method_code_hash)
3758 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3759 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3761 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3762 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3764 mono_domain_unlock (domain);
3766 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3768 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3771 /* Set invoke_impl field */
3772 if (cfg->compile_aot) {
3773 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3775 trampoline = mono_create_delegate_trampoline (klass);
3776 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3778 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3780 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3786 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3788 MonoJitICallInfo *info;
3790 /* Need to register the icall so it gets an icall wrapper */
3791 info = mono_get_array_new_va_icall (rank);
3793 cfg->flags |= MONO_CFG_HAS_VARARGS;
3795 /* mono_array_new_va () needs a vararg calling convention */
3796 cfg->disable_llvm = TRUE;
3798 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3799 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3803 mono_emit_load_got_addr (MonoCompile *cfg)
3805 MonoInst *getaddr, *dummy_use;
3807 if (!cfg->got_var || cfg->got_var_allocated)
3810 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3811 getaddr->dreg = cfg->got_var->dreg;
3813 /* Add it to the start of the first bblock */
3814 if (cfg->bb_entry->code) {
3815 getaddr->next = cfg->bb_entry->code;
3816 cfg->bb_entry->code = getaddr;
3819 MONO_ADD_INS (cfg->bb_entry, getaddr);
3821 cfg->got_var_allocated = TRUE;
3824 * Add a dummy use to keep the got_var alive, since real uses might
3825 * only be generated by the back ends.
3826 * Add it to end_bblock, so the variable's lifetime covers the whole
3828 * It would be better to make the usage of the got var explicit in all
3829 * cases when the backend needs it (i.e. calls, throw etc.), so this
3830 * wouldn't be needed.
3832 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3833 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3836 static int inline_limit;
3837 static gboolean inline_limit_inited;
3840 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3842 MonoMethodHeaderSummary header;
3844 #ifdef MONO_ARCH_SOFT_FLOAT
3845 MonoMethodSignature *sig = mono_method_signature (method);
3849 if (cfg->generic_sharing_context)
3852 if (cfg->inline_depth > 10)
3855 #ifdef MONO_ARCH_HAVE_LMF_OPS
3856 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3857 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3858 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3863 if (!mono_method_get_header_summary (method, &header))
3866 /*runtime, icall and pinvoke are checked by summary call*/
3867 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3868 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3869 (method->klass->marshalbyref) ||
3873 /* also consider num_locals? */
3874 /* Do the size check early to avoid creating vtables */
3875 if (!inline_limit_inited) {
3876 if (getenv ("MONO_INLINELIMIT"))
3877 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3879 inline_limit = INLINE_LENGTH_LIMIT;
3880 inline_limit_inited = TRUE;
3882 if (header.code_size >= inline_limit)
3886 * if we can initialize the class of the method right away, we do,
3887 * otherwise we don't allow inlining if the class needs initialization,
3888 * since it would mean inserting a call to mono_runtime_class_init()
3889 * inside the inlined code
3891 if (!(cfg->opt & MONO_OPT_SHARED)) {
3892 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3893 if (cfg->run_cctors && method->klass->has_cctor) {
3894 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3895 if (!method->klass->runtime_info)
3896 /* No vtable created yet */
3898 vtable = mono_class_vtable (cfg->domain, method->klass);
3901 /* This makes so that inline cannot trigger */
3902 /* .cctors: too many apps depend on them */
3903 /* running with a specific order... */
3904 if (! vtable->initialized)
3906 mono_runtime_class_init (vtable);
3908 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3909 if (!method->klass->runtime_info)
3910 /* No vtable created yet */
3912 vtable = mono_class_vtable (cfg->domain, method->klass);
3915 if (!vtable->initialized)
3920 * If we're compiling for shared code
3921 * the cctor will need to be run at aot method load time, for example,
3922 * or at the end of the compilation of the inlining method.
3924 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3929 * CAS - do not inline methods with declarative security
3930 * Note: this has to be before any possible return TRUE;
3932 if (mono_method_has_declsec (method))
3935 #ifdef MONO_ARCH_SOFT_FLOAT
3937 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3939 for (i = 0; i < sig->param_count; ++i)
3940 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3948 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3950 if (vtable->initialized && !cfg->compile_aot)
3953 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3956 if (!mono_class_needs_cctor_run (vtable->klass, method))
3959 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3960 /* The initialization is already done before the method is called */
3967 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
3971 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3973 mono_class_init (klass);
3974 size = mono_class_array_element_size (klass);
3976 mult_reg = alloc_preg (cfg);
3977 array_reg = arr->dreg;
3978 index_reg = index->dreg;
3980 #if SIZEOF_REGISTER == 8
3981 /* The array reg is 64 bits but the index reg is only 32 */
3982 if (COMPILE_LLVM (cfg)) {
3984 index2_reg = index_reg;
3986 index2_reg = alloc_preg (cfg);
3987 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3990 if (index->type == STACK_I8) {
3991 index2_reg = alloc_preg (cfg);
3992 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
3994 index2_reg = index_reg;
3999 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4001 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4002 if (size == 1 || size == 2 || size == 4 || size == 8) {
4003 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4005 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
4006 ins->type = STACK_PTR;
4012 add_reg = alloc_preg (cfg);
4014 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4015 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4016 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4017 ins->type = STACK_PTR;
4018 MONO_ADD_INS (cfg->cbb, ins);
4023 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4025 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4027 int bounds_reg = alloc_preg (cfg);
4028 int add_reg = alloc_preg (cfg);
4029 int mult_reg = alloc_preg (cfg);
4030 int mult2_reg = alloc_preg (cfg);
4031 int low1_reg = alloc_preg (cfg);
4032 int low2_reg = alloc_preg (cfg);
4033 int high1_reg = alloc_preg (cfg);
4034 int high2_reg = alloc_preg (cfg);
4035 int realidx1_reg = alloc_preg (cfg);
4036 int realidx2_reg = alloc_preg (cfg);
4037 int sum_reg = alloc_preg (cfg);
4042 mono_class_init (klass);
4043 size = mono_class_array_element_size (klass);
4045 index1 = index_ins1->dreg;
4046 index2 = index_ins2->dreg;
4048 /* range checking */
4049 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4050 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4052 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4053 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4054 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4055 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4056 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4057 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4058 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4060 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4061 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4062 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4063 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4064 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
4065 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4066 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4068 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4069 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4070 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4071 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4072 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4074 ins->type = STACK_MP;
4076 MONO_ADD_INS (cfg->cbb, ins);
4083 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4087 MonoMethod *addr_method;
4090 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4093 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
4095 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4096 /* emit_ldelema_2 depends on OP_LMUL */
4097 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
4098 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
4102 element_size = mono_class_array_element_size (cmethod->klass->element_class);
4103 addr_method = mono_marshal_get_array_address (rank, element_size);
4104 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4109 static MonoBreakPolicy
4110 always_insert_breakpoint (MonoMethod *method)
4112 return MONO_BREAK_POLICY_ALWAYS;
4115 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4118 * mono_set_break_policy:
4119 * policy_callback: the new callback function
4121 * Allow embedders to decide wherther to actually obey breakpoint instructions
4122 * (both break IL instructions and Debugger.Break () method calls), for example
4123 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4124 * untrusted or semi-trusted code.
4126 * @policy_callback will be called every time a break point instruction needs to
4127 * be inserted with the method argument being the method that calls Debugger.Break()
4128 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4129 * if it wants the breakpoint to not be effective in the given method.
4130 * #MONO_BREAK_POLICY_ALWAYS is the default.
4133 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4135 if (policy_callback)
4136 break_policy_func = policy_callback;
4138 break_policy_func = always_insert_breakpoint;
4142 should_insert_brekpoint (MonoMethod *method) {
4143 switch (break_policy_func (method)) {
4144 case MONO_BREAK_POLICY_ALWAYS:
4146 case MONO_BREAK_POLICY_NEVER:
4148 case MONO_BREAK_POLICY_ON_DBG:
4149 return mono_debug_using_mono_debugger ();
4151 g_warning ("Incorrect value returned from break policy callback");
4156 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4158 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4160 MonoInst *addr, *store, *load;
4161 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4163 /* the bounds check is already done by the callers */
4164 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4166 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4167 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4169 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4170 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4176 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4178 MonoInst *ins = NULL;
4179 #ifdef MONO_ARCH_SIMD_INTRINSICS
4180 if (cfg->opt & MONO_OPT_SIMD) {
4181 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4191 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4193 MonoInst *ins = NULL;
4195 static MonoClass *runtime_helpers_class = NULL;
4196 if (! runtime_helpers_class)
4197 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
4198 "System.Runtime.CompilerServices", "RuntimeHelpers");
4200 if (cmethod->klass == mono_defaults.string_class) {
4201 if (strcmp (cmethod->name, "get_Chars") == 0) {
4202 int dreg = alloc_ireg (cfg);
4203 int index_reg = alloc_preg (cfg);
4204 int mult_reg = alloc_preg (cfg);
4205 int add_reg = alloc_preg (cfg);
4207 #if SIZEOF_REGISTER == 8
4208 /* The array reg is 64 bits but the index reg is only 32 */
4209 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
4211 index_reg = args [1]->dreg;
4213 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
4215 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4216 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
4217 add_reg = ins->dreg;
4218 /* Avoid a warning */
4220 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4223 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
4224 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4225 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4226 add_reg, G_STRUCT_OFFSET (MonoString, chars));
4228 type_from_op (ins, NULL, NULL);
4230 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4231 int dreg = alloc_ireg (cfg);
4232 /* Decompose later to allow more optimizations */
4233 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
4234 ins->type = STACK_I4;
4235 ins->flags |= MONO_INST_FAULT;
4236 cfg->cbb->has_array_access = TRUE;
4237 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
4240 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
4241 int mult_reg = alloc_preg (cfg);
4242 int add_reg = alloc_preg (cfg);
4244 /* The corlib functions check for oob already. */
4245 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
4246 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4247 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
4248 return cfg->cbb->last_ins;
4251 } else if (cmethod->klass == mono_defaults.object_class) {
4253 if (strcmp (cmethod->name, "GetType") == 0) {
4254 int dreg = alloc_preg (cfg);
4255 int vt_reg = alloc_preg (cfg);
4256 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4257 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
4258 type_from_op (ins, NULL, NULL);
4261 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
4262 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
4263 int dreg = alloc_ireg (cfg);
4264 int t1 = alloc_ireg (cfg);
4266 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
4267 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
4268 ins->type = STACK_I4;
4272 } else if (strcmp (cmethod->name, ".ctor") == 0) {
4273 MONO_INST_NEW (cfg, ins, OP_NOP);
4274 MONO_ADD_INS (cfg->cbb, ins);
4278 } else if (cmethod->klass == mono_defaults.array_class) {
4279 if (strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
4280 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
4282 #ifndef MONO_BIG_ARRAYS
4284 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
4287 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
4288 int dreg = alloc_ireg (cfg);
4289 int bounds_reg = alloc_ireg (cfg);
4290 MonoBasicBlock *end_bb, *szarray_bb;
4291 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
4293 NEW_BBLOCK (cfg, end_bb);
4294 NEW_BBLOCK (cfg, szarray_bb);
4296 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
4297 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4298 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4299 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
4300 /* Non-szarray case */
4302 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4303 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4305 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4306 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4307 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4308 MONO_START_BB (cfg, szarray_bb);
4311 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4312 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4314 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4315 MONO_START_BB (cfg, end_bb);
4317 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
4318 ins->type = STACK_I4;
4324 if (cmethod->name [0] != 'g')
4327 if (strcmp (cmethod->name, "get_Rank") == 0) {
4328 int dreg = alloc_ireg (cfg);
4329 int vtable_reg = alloc_preg (cfg);
4330 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
4331 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4332 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
4333 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4334 type_from_op (ins, NULL, NULL);
4337 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4338 int dreg = alloc_ireg (cfg);
4340 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4341 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4342 type_from_op (ins, NULL, NULL);
4347 } else if (cmethod->klass == runtime_helpers_class) {
4349 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
4350 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
4354 } else if (cmethod->klass == mono_defaults.thread_class) {
4355 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
4356 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
4357 MONO_ADD_INS (cfg->cbb, ins);
4359 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
4360 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4361 MONO_ADD_INS (cfg->cbb, ins);
4364 } else if (cmethod->klass == mono_defaults.monitor_class) {
4365 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4366 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
4369 if (COMPILE_LLVM (cfg)) {
4371 * Pass the argument normally, the LLVM backend will handle the
4372 * calling convention problems.
4374 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4376 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
4377 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4378 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4379 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4382 return (MonoInst*)call;
4383 } else if (strcmp (cmethod->name, "Exit") == 0) {
4386 if (COMPILE_LLVM (cfg)) {
4387 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4389 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
4390 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4391 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4392 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4395 return (MonoInst*)call;
4397 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4398 MonoMethod *fast_method = NULL;
4400 /* Avoid infinite recursion */
4401 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
4402 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
4403 strcmp (cfg->method->name, "FastMonitorExit") == 0))
4406 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) ||
4407 strcmp (cmethod->name, "Exit") == 0)
4408 fast_method = mono_monitor_get_fast_path (cmethod);
4412 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4414 } else if (cmethod->klass->image == mono_defaults.corlib &&
4415 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4416 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4419 #if SIZEOF_REGISTER == 8
4420 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
4421 /* 64 bit reads are already atomic */
4422 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
4423 ins->dreg = mono_alloc_preg (cfg);
4424 ins->inst_basereg = args [0]->dreg;
4425 ins->inst_offset = 0;
4426 MONO_ADD_INS (cfg->cbb, ins);
4430 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4431 if (strcmp (cmethod->name, "Increment") == 0) {
4432 MonoInst *ins_iconst;
4435 if (fsig->params [0]->type == MONO_TYPE_I4)
4436 opcode = OP_ATOMIC_ADD_NEW_I4;
4437 #if SIZEOF_REGISTER == 8
4438 else if (fsig->params [0]->type == MONO_TYPE_I8)
4439 opcode = OP_ATOMIC_ADD_NEW_I8;
4442 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4443 ins_iconst->inst_c0 = 1;
4444 ins_iconst->dreg = mono_alloc_ireg (cfg);
4445 MONO_ADD_INS (cfg->cbb, ins_iconst);
4447 MONO_INST_NEW (cfg, ins, opcode);
4448 ins->dreg = mono_alloc_ireg (cfg);
4449 ins->inst_basereg = args [0]->dreg;
4450 ins->inst_offset = 0;
4451 ins->sreg2 = ins_iconst->dreg;
4452 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4453 MONO_ADD_INS (cfg->cbb, ins);
4455 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4456 MonoInst *ins_iconst;
4459 if (fsig->params [0]->type == MONO_TYPE_I4)
4460 opcode = OP_ATOMIC_ADD_NEW_I4;
4461 #if SIZEOF_REGISTER == 8
4462 else if (fsig->params [0]->type == MONO_TYPE_I8)
4463 opcode = OP_ATOMIC_ADD_NEW_I8;
4466 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4467 ins_iconst->inst_c0 = -1;
4468 ins_iconst->dreg = mono_alloc_ireg (cfg);
4469 MONO_ADD_INS (cfg->cbb, ins_iconst);
4471 MONO_INST_NEW (cfg, ins, opcode);
4472 ins->dreg = mono_alloc_ireg (cfg);
4473 ins->inst_basereg = args [0]->dreg;
4474 ins->inst_offset = 0;
4475 ins->sreg2 = ins_iconst->dreg;
4476 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4477 MONO_ADD_INS (cfg->cbb, ins);
4479 } else if (strcmp (cmethod->name, "Add") == 0) {
4482 if (fsig->params [0]->type == MONO_TYPE_I4)
4483 opcode = OP_ATOMIC_ADD_NEW_I4;
4484 #if SIZEOF_REGISTER == 8
4485 else if (fsig->params [0]->type == MONO_TYPE_I8)
4486 opcode = OP_ATOMIC_ADD_NEW_I8;
4490 MONO_INST_NEW (cfg, ins, opcode);
4491 ins->dreg = mono_alloc_ireg (cfg);
4492 ins->inst_basereg = args [0]->dreg;
4493 ins->inst_offset = 0;
4494 ins->sreg2 = args [1]->dreg;
4495 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4496 MONO_ADD_INS (cfg->cbb, ins);
4499 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4501 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4502 if (strcmp (cmethod->name, "Exchange") == 0) {
4504 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4506 if (fsig->params [0]->type == MONO_TYPE_I4)
4507 opcode = OP_ATOMIC_EXCHANGE_I4;
4508 #if SIZEOF_REGISTER == 8
4509 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4510 (fsig->params [0]->type == MONO_TYPE_I))
4511 opcode = OP_ATOMIC_EXCHANGE_I8;
4513 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4514 opcode = OP_ATOMIC_EXCHANGE_I4;
4519 MONO_INST_NEW (cfg, ins, opcode);
4520 ins->dreg = mono_alloc_ireg (cfg);
4521 ins->inst_basereg = args [0]->dreg;
4522 ins->inst_offset = 0;
4523 ins->sreg2 = args [1]->dreg;
4524 MONO_ADD_INS (cfg->cbb, ins);
4526 switch (fsig->params [0]->type) {
4528 ins->type = STACK_I4;
4532 ins->type = STACK_I8;
4534 case MONO_TYPE_OBJECT:
4535 ins->type = STACK_OBJ;
4538 g_assert_not_reached ();
4541 if (cfg->gen_write_barriers && is_ref)
4542 emit_write_barrier (cfg, args [0], args [1], -1);
4544 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4546 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4547 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4549 gboolean is_ref = MONO_TYPE_IS_REFERENCE (fsig->params [1]);
4550 if (fsig->params [1]->type == MONO_TYPE_I4)
4552 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4553 size = sizeof (gpointer);
4554 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
4557 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4558 ins->dreg = alloc_ireg (cfg);
4559 ins->sreg1 = args [0]->dreg;
4560 ins->sreg2 = args [1]->dreg;
4561 ins->sreg3 = args [2]->dreg;
4562 ins->type = STACK_I4;
4563 MONO_ADD_INS (cfg->cbb, ins);
4564 } else if (size == 8) {
4565 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4566 ins->dreg = alloc_ireg (cfg);
4567 ins->sreg1 = args [0]->dreg;
4568 ins->sreg2 = args [1]->dreg;
4569 ins->sreg3 = args [2]->dreg;
4570 ins->type = STACK_I8;
4571 MONO_ADD_INS (cfg->cbb, ins);
4573 /* g_assert_not_reached (); */
4575 if (cfg->gen_write_barriers && is_ref)
4576 emit_write_barrier (cfg, args [0], args [1], -1);
4578 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4582 } else if (cmethod->klass->image == mono_defaults.corlib) {
4583 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4584 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4585 if (should_insert_brekpoint (cfg->method))
4586 MONO_INST_NEW (cfg, ins, OP_BREAK);
4588 MONO_INST_NEW (cfg, ins, OP_NOP);
4589 MONO_ADD_INS (cfg->cbb, ins);
4592 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4593 && strcmp (cmethod->klass->name, "Environment") == 0) {
4595 EMIT_NEW_ICONST (cfg, ins, 1);
4597 EMIT_NEW_ICONST (cfg, ins, 0);
4601 } else if (cmethod->klass == mono_defaults.math_class) {
4603 * There is general branches code for Min/Max, but it does not work for
4605 * http://everything2.com/?node_id=1051618
4609 #ifdef MONO_ARCH_SIMD_INTRINSICS
4610 if (cfg->opt & MONO_OPT_SIMD) {
4611 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4617 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4621 * This entry point could be used later for arbitrary method
4624 inline static MonoInst*
4625 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4626 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4628 if (method->klass == mono_defaults.string_class) {
4629 /* managed string allocation support */
4630 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
4631 MonoInst *iargs [2];
4632 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4633 MonoMethod *managed_alloc = NULL;
4635 g_assert (vtable); /*Should not fail since it System.String*/
4636 #ifndef MONO_CROSS_COMPILE
4637 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4641 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4642 iargs [1] = args [0];
4643 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4650 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4652 MonoInst *store, *temp;
4655 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4656 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4659 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4660 * would be different than the MonoInst's used to represent arguments, and
4661 * the ldelema implementation can't deal with that.
4662 * Solution: When ldelema is used on an inline argument, create a var for
4663 * it, emit ldelema on that var, and emit the saving code below in
4664 * inline_method () if needed.
4666 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4667 cfg->args [i] = temp;
4668 /* This uses cfg->args [i] which is set by the preceeding line */
4669 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4670 store->cil_code = sp [0]->cil_code;
4675 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4676 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4678 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4680 check_inline_called_method_name_limit (MonoMethod *called_method)
4683 static char *limit = NULL;
4685 if (limit == NULL) {
4686 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4688 if (limit_string != NULL)
4689 limit = limit_string;
4691 limit = (char *) "";
4694 if (limit [0] != '\0') {
4695 char *called_method_name = mono_method_full_name (called_method, TRUE);
4697 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4698 g_free (called_method_name);
4700 //return (strncmp_result <= 0);
4701 return (strncmp_result == 0);
4708 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4710 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4713 static char *limit = NULL;
4715 if (limit == NULL) {
4716 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4717 if (limit_string != NULL) {
4718 limit = limit_string;
4720 limit = (char *) "";
4724 if (limit [0] != '\0') {
4725 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4727 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4728 g_free (caller_method_name);
4730 //return (strncmp_result <= 0);
4731 return (strncmp_result == 0);
4739 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4740 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4742 MonoInst *ins, *rvar = NULL;
4743 MonoMethodHeader *cheader;
4744 MonoBasicBlock *ebblock, *sbblock;
4746 MonoMethod *prev_inlined_method;
4747 MonoInst **prev_locals, **prev_args;
4748 MonoType **prev_arg_types;
4749 guint prev_real_offset;
4750 GHashTable *prev_cbb_hash;
4751 MonoBasicBlock **prev_cil_offset_to_bb;
4752 MonoBasicBlock *prev_cbb;
4753 unsigned char* prev_cil_start;
4754 guint32 prev_cil_offset_to_bb_len;
4755 MonoMethod *prev_current_method;
4756 MonoGenericContext *prev_generic_context;
4757 gboolean ret_var_set, prev_ret_var_set;
4759 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4761 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4762 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4765 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4766 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4770 if (cfg->verbose_level > 2)
4771 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4773 if (!cmethod->inline_info) {
4774 mono_jit_stats.inlineable_methods++;
4775 cmethod->inline_info = 1;
4778 /* allocate local variables */
4779 cheader = mono_method_get_header (cmethod);
4781 if (cheader == NULL || mono_loader_get_last_error ()) {
4783 mono_metadata_free_mh (cheader);
4784 mono_loader_clear_error ();
4788 /*Must verify before creating locals as it can cause the JIT to assert.*/
4789 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
4790 mono_metadata_free_mh (cheader);
4794 /* allocate space to store the return value */
4795 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4796 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4800 prev_locals = cfg->locals;
4801 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4802 for (i = 0; i < cheader->num_locals; ++i)
4803 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4805 /* allocate start and end blocks */
4806 /* This is needed so if the inline is aborted, we can clean up */
4807 NEW_BBLOCK (cfg, sbblock);
4808 sbblock->real_offset = real_offset;
4810 NEW_BBLOCK (cfg, ebblock);
4811 ebblock->block_num = cfg->num_bblocks++;
4812 ebblock->real_offset = real_offset;
4814 prev_args = cfg->args;
4815 prev_arg_types = cfg->arg_types;
4816 prev_inlined_method = cfg->inlined_method;
4817 cfg->inlined_method = cmethod;
4818 cfg->ret_var_set = FALSE;
4819 cfg->inline_depth ++;
4820 prev_real_offset = cfg->real_offset;
4821 prev_cbb_hash = cfg->cbb_hash;
4822 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4823 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4824 prev_cil_start = cfg->cil_start;
4825 prev_cbb = cfg->cbb;
4826 prev_current_method = cfg->current_method;
4827 prev_generic_context = cfg->generic_context;
4828 prev_ret_var_set = cfg->ret_var_set;
4830 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4832 ret_var_set = cfg->ret_var_set;
4834 cfg->inlined_method = prev_inlined_method;
4835 cfg->real_offset = prev_real_offset;
4836 cfg->cbb_hash = prev_cbb_hash;
4837 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4838 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4839 cfg->cil_start = prev_cil_start;
4840 cfg->locals = prev_locals;
4841 cfg->args = prev_args;
4842 cfg->arg_types = prev_arg_types;
4843 cfg->current_method = prev_current_method;
4844 cfg->generic_context = prev_generic_context;
4845 cfg->ret_var_set = prev_ret_var_set;
4846 cfg->inline_depth --;
4848 if ((costs >= 0 && costs < 60) || inline_allways) {
4849 if (cfg->verbose_level > 2)
4850 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4852 mono_jit_stats.inlined_methods++;
4854 /* always add some code to avoid block split failures */
4855 MONO_INST_NEW (cfg, ins, OP_NOP);
4856 MONO_ADD_INS (prev_cbb, ins);
4858 prev_cbb->next_bb = sbblock;
4859 link_bblock (cfg, prev_cbb, sbblock);
4862 * Get rid of the begin and end bblocks if possible to aid local
4865 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4867 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4868 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4870 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4871 MonoBasicBlock *prev = ebblock->in_bb [0];
4872 mono_merge_basic_blocks (cfg, prev, ebblock);
4874 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4875 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4876 cfg->cbb = prev_cbb;
4884 * If the inlined method contains only a throw, then the ret var is not
4885 * set, so set it to a dummy value.
4888 static double r8_0 = 0.0;
4890 switch (rvar->type) {
4892 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4895 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4900 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4903 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4904 ins->type = STACK_R8;
4905 ins->inst_p0 = (void*)&r8_0;
4906 ins->dreg = rvar->dreg;
4907 MONO_ADD_INS (cfg->cbb, ins);
4910 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4913 g_assert_not_reached ();
4917 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4920 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4923 if (cfg->verbose_level > 2)
4924 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4925 cfg->exception_type = MONO_EXCEPTION_NONE;
4926 mono_loader_clear_error ();
4928 /* This gets rid of the newly added bblocks */
4929 cfg->cbb = prev_cbb;
4931 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4936 * Some of these comments may well be out-of-date.
4937 * Design decisions: we do a single pass over the IL code (and we do bblock
4938 * splitting/merging in the few cases when it's required: a back jump to an IL
4939 * address that was not already seen as bblock starting point).
4940 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4941 * Complex operations are decomposed in simpler ones right away. We need to let the
4942 * arch-specific code peek and poke inside this process somehow (except when the
4943 * optimizations can take advantage of the full semantic info of coarse opcodes).
4944 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4945 * MonoInst->opcode initially is the IL opcode or some simplification of that
4946 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4947 * opcode with value bigger than OP_LAST.
4948 * At this point the IR can be handed over to an interpreter, a dumb code generator
4949 * or to the optimizing code generator that will translate it to SSA form.
4951 * Profiling directed optimizations.
4952 * We may compile by default with few or no optimizations and instrument the code
4953 * or the user may indicate what methods to optimize the most either in a config file
4954 * or through repeated runs where the compiler applies offline the optimizations to
4955 * each method and then decides if it was worth it.
4958 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4959 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4960 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4961 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4962 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4963 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4964 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4965 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; LOAD_ERROR;}
4967 /* offset from br.s -> br like opcodes */
4968 #define BIG_BRANCH_OFFSET 13
4971 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4973 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4975 return b == NULL || b == bb;
4979 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4981 unsigned char *ip = start;
4982 unsigned char *target;
4985 MonoBasicBlock *bblock;
4986 const MonoOpcode *opcode;
4989 cli_addr = ip - start;
4990 i = mono_opcode_value ((const guint8 **)&ip, end);
4993 opcode = &mono_opcodes [i];
4994 switch (opcode->argument) {
4995 case MonoInlineNone:
4998 case MonoInlineString:
4999 case MonoInlineType:
5000 case MonoInlineField:
5001 case MonoInlineMethod:
5004 case MonoShortInlineR:
5011 case MonoShortInlineVar:
5012 case MonoShortInlineI:
5015 case MonoShortInlineBrTarget:
5016 target = start + cli_addr + 2 + (signed char)ip [1];
5017 GET_BBLOCK (cfg, bblock, target);
5020 GET_BBLOCK (cfg, bblock, ip);
5022 case MonoInlineBrTarget:
5023 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
5024 GET_BBLOCK (cfg, bblock, target);
5027 GET_BBLOCK (cfg, bblock, ip);
5029 case MonoInlineSwitch: {
5030 guint32 n = read32 (ip + 1);
5033 cli_addr += 5 + 4 * n;
5034 target = start + cli_addr;
5035 GET_BBLOCK (cfg, bblock, target);
5037 for (j = 0; j < n; ++j) {
5038 target = start + cli_addr + (gint32)read32 (ip);
5039 GET_BBLOCK (cfg, bblock, target);
5049 g_assert_not_reached ();
5052 if (i == CEE_THROW) {
5053 unsigned char *bb_start = ip - 1;
5055 /* Find the start of the bblock containing the throw */
5057 while ((bb_start >= start) && !bblock) {
5058 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
5062 bblock->out_of_line = 1;
5071 static inline MonoMethod *
5072 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5076 if (m->wrapper_type != MONO_WRAPPER_NONE)
5077 return mono_method_get_wrapper_data (m, token);
5079 method = mono_get_method_full (m->klass->image, token, klass, context);
5084 static inline MonoMethod *
5085 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5087 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
5089 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
5095 static inline MonoClass*
5096 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
5100 if (method->wrapper_type != MONO_WRAPPER_NONE)
5101 klass = mono_method_get_wrapper_data (method, token);
5103 klass = mono_class_get_full (method->klass->image, token, context);
5105 mono_class_init (klass);
5110 * Returns TRUE if the JIT should abort inlining because "callee"
5111 * is influenced by security attributes.
5114 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
5118 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
5122 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
5123 if (result == MONO_JIT_SECURITY_OK)
5126 if (result == MONO_JIT_LINKDEMAND_ECMA) {
5127 /* Generate code to throw a SecurityException before the actual call/link */
5128 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5131 NEW_ICONST (cfg, args [0], 4);
5132 NEW_METHODCONST (cfg, args [1], caller);
5133 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
5134 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
5135 /* don't hide previous results */
5136 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
5137 cfg->exception_data = result;
5145 throw_exception (void)
5147 static MonoMethod *method = NULL;
5150 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5151 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
5158 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
5160 MonoMethod *thrower = throw_exception ();
5163 EMIT_NEW_PCONST (cfg, args [0], ex);
5164 mono_emit_method_call (cfg, thrower, args, NULL);
5168 * Return the original method is a wrapper is specified. We can only access
5169 * the custom attributes from the original method.
5172 get_original_method (MonoMethod *method)
5174 if (method->wrapper_type == MONO_WRAPPER_NONE)
5177 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
5178 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
5181 /* in other cases we need to find the original method */
5182 return mono_marshal_method_from_wrapper (method);
5186 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
5187 MonoBasicBlock *bblock, unsigned char *ip)
5189 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5190 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
5192 emit_throw_exception (cfg, ex);
5196 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
5197 MonoBasicBlock *bblock, unsigned char *ip)
5199 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5200 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
5202 emit_throw_exception (cfg, ex);
5206 * Check that the IL instructions at ip are the array initialization
5207 * sequence and return the pointer to the data and the size.
5210 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
5213 * newarr[System.Int32]
5215 * ldtoken field valuetype ...
5216 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
5218 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
5219 guint32 token = read32 (ip + 7);
5220 guint32 field_token = read32 (ip + 2);
5221 guint32 field_index = field_token & 0xffffff;
5223 const char *data_ptr;
5225 MonoMethod *cmethod;
5226 MonoClass *dummy_class;
5227 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
5233 *out_field_token = field_token;
5235 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
5238 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
5240 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
5241 case MONO_TYPE_BOOLEAN:
5245 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
5246 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
5247 case MONO_TYPE_CHAR:
5257 return NULL; /* stupid ARM FP swapped format */
5267 if (size > mono_type_size (field->type, &dummy_align))
5270 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
5271 if (!method->klass->image->dynamic) {
5272 field_index = read32 (ip + 2) & 0xffffff;
5273 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
5274 data_ptr = mono_image_rva_map (method->klass->image, rva);
5275 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
5276 /* for aot code we do the lookup on load */
5277 if (aot && data_ptr)
5278 return GUINT_TO_POINTER (rva);
5280 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
5282 data_ptr = mono_field_get_data (field);
5290 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
5292 char *method_fname = mono_method_full_name (method, TRUE);
5294 MonoMethodHeader *header = mono_method_get_header (method);
5296 if (header->code_size == 0)
5297 method_code = g_strdup ("method body is empty.");
5299 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
5300 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
5301 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
5302 g_free (method_fname);
5303 g_free (method_code);
5304 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
5308 set_exception_object (MonoCompile *cfg, MonoException *exception)
5310 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
5311 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
5312 cfg->exception_ptr = exception;
5316 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5320 if (cfg->generic_sharing_context)
5321 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
5323 type = &klass->byval_arg;
5324 return MONO_TYPE_IS_REFERENCE (type);
5328 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5331 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5332 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5333 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5334 /* Optimize reg-reg moves away */
5336 * Can't optimize other opcodes, since sp[0] might point to
5337 * the last ins of a decomposed opcode.
5339 sp [0]->dreg = (cfg)->locals [n]->dreg;
5341 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5346 * ldloca inhibits many optimizations so try to get rid of it in common
5349 static inline unsigned char *
5350 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5359 local = read16 (ip + 2);
5363 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5364 gboolean skip = FALSE;
5366 /* From the INITOBJ case */
5367 token = read32 (ip + 2);
5368 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5369 CHECK_TYPELOAD (klass);
5370 if (generic_class_is_reference_type (cfg, klass)) {
5371 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5372 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5373 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5374 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5375 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5388 is_exception_class (MonoClass *class)
5391 if (class == mono_defaults.exception_class)
5393 class = class->parent;
5399 * mono_method_to_ir:
5401 * Translate the .net IL into linear IR.
5404 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5405 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5406 guint inline_offset, gboolean is_virtual_call)
5409 MonoInst *ins, **sp, **stack_start;
5410 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5411 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
5412 MonoMethod *cmethod, *method_definition;
5413 MonoInst **arg_array;
5414 MonoMethodHeader *header;
5416 guint32 token, ins_flag;
5418 MonoClass *constrained_call = NULL;
5419 unsigned char *ip, *end, *target, *err_pos;
5420 static double r8_0 = 0.0;
5421 MonoMethodSignature *sig;
5422 MonoGenericContext *generic_context = NULL;
5423 MonoGenericContainer *generic_container = NULL;
5424 MonoType **param_types;
5425 int i, n, start_new_bblock, dreg;
5426 int num_calls = 0, inline_costs = 0;
5427 int breakpoint_id = 0;
5429 MonoBoolean security, pinvoke;
5430 MonoSecurityManager* secman = NULL;
5431 MonoDeclSecurityActions actions;
5432 GSList *class_inits = NULL;
5433 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5435 gboolean init_locals, seq_points, skip_dead_blocks;
5437 /* serialization and xdomain stuff may need access to private fields and methods */
5438 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5439 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5440 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5441 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5442 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5443 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5445 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5447 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5448 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5449 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5450 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5452 image = method->klass->image;
5453 header = mono_method_get_header (method);
5455 MonoLoaderError *error;
5457 if ((error = mono_loader_get_last_error ())) {
5458 mono_cfg_set_exception (cfg, error->exception_type);
5460 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
5461 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
5463 goto exception_exit;
5465 generic_container = mono_method_get_generic_container (method);
5466 sig = mono_method_signature (method);
5467 num_args = sig->hasthis + sig->param_count;
5468 ip = (unsigned char*)header->code;
5469 cfg->cil_start = ip;
5470 end = ip + header->code_size;
5471 mono_jit_stats.cil_code_size += header->code_size;
5472 init_locals = header->init_locals;
5474 seq_points = cfg->gen_seq_points && cfg->method == method;
5477 * Methods without init_locals set could cause asserts in various passes
5482 method_definition = method;
5483 while (method_definition->is_inflated) {
5484 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5485 method_definition = imethod->declaring;
5488 /* SkipVerification is not allowed if core-clr is enabled */
5489 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5491 dont_verify_stloc = TRUE;
5494 if (mono_debug_using_mono_debugger ())
5495 cfg->keep_cil_nops = TRUE;
5497 if (sig->is_inflated)
5498 generic_context = mono_method_get_context (method);
5499 else if (generic_container)
5500 generic_context = &generic_container->context;
5501 cfg->generic_context = generic_context;
5503 if (!cfg->generic_sharing_context)
5504 g_assert (!sig->has_type_parameters);
5506 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5507 g_assert (method->is_inflated);
5508 g_assert (mono_method_get_context (method)->method_inst);
5510 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5511 g_assert (sig->generic_param_count);
5513 if (cfg->method == method) {
5514 cfg->real_offset = 0;
5516 cfg->real_offset = inline_offset;
5519 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5520 cfg->cil_offset_to_bb_len = header->code_size;
5522 cfg->current_method = method;
5524 if (cfg->verbose_level > 2)
5525 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5527 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5529 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5530 for (n = 0; n < sig->param_count; ++n)
5531 param_types [n + sig->hasthis] = sig->params [n];
5532 cfg->arg_types = param_types;
5534 dont_inline = g_list_prepend (dont_inline, method);
5535 if (cfg->method == method) {
5537 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5538 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5541 NEW_BBLOCK (cfg, start_bblock);
5542 cfg->bb_entry = start_bblock;
5543 start_bblock->cil_code = NULL;
5544 start_bblock->cil_length = 0;
5547 NEW_BBLOCK (cfg, end_bblock);
5548 cfg->bb_exit = end_bblock;
5549 end_bblock->cil_code = NULL;
5550 end_bblock->cil_length = 0;
5551 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
5552 g_assert (cfg->num_bblocks == 2);
5554 arg_array = cfg->args;
5556 if (header->num_clauses) {
5557 cfg->spvars = g_hash_table_new (NULL, NULL);
5558 cfg->exvars = g_hash_table_new (NULL, NULL);
5560 /* handle exception clauses */
5561 for (i = 0; i < header->num_clauses; ++i) {
5562 MonoBasicBlock *try_bb;
5563 MonoExceptionClause *clause = &header->clauses [i];
5564 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5565 try_bb->real_offset = clause->try_offset;
5566 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5567 tblock->real_offset = clause->handler_offset;
5568 tblock->flags |= BB_EXCEPTION_HANDLER;
5570 link_bblock (cfg, try_bb, tblock);
5572 if (*(ip + clause->handler_offset) == CEE_POP)
5573 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5575 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5576 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5577 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5578 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5579 MONO_ADD_INS (tblock, ins);
5581 /* todo: is a fault block unsafe to optimize? */
5582 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5583 tblock->flags |= BB_EXCEPTION_UNSAFE;
5587 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5589 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5591 /* catch and filter blocks get the exception object on the stack */
5592 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5593 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5594 MonoInst *dummy_use;
5596 /* mostly like handle_stack_args (), but just sets the input args */
5597 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5598 tblock->in_scount = 1;
5599 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5600 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5603 * Add a dummy use for the exvar so its liveness info will be
5607 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5609 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5610 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5611 tblock->flags |= BB_EXCEPTION_HANDLER;
5612 tblock->real_offset = clause->data.filter_offset;
5613 tblock->in_scount = 1;
5614 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5615 /* The filter block shares the exvar with the handler block */
5616 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5617 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5618 MONO_ADD_INS (tblock, ins);
5622 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5623 clause->data.catch_class &&
5624 cfg->generic_sharing_context &&
5625 mono_class_check_context_used (clause->data.catch_class)) {
5627 * In shared generic code with catch
5628 * clauses containing type variables
5629 * the exception handling code has to
5630 * be able to get to the rgctx.
5631 * Therefore we have to make sure that
5632 * the vtable/mrgctx argument (for
5633 * static or generic methods) or the
5634 * "this" argument (for non-static
5635 * methods) are live.
5637 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5638 mini_method_get_context (method)->method_inst ||
5639 method->klass->valuetype) {
5640 mono_get_vtable_var (cfg);
5642 MonoInst *dummy_use;
5644 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5649 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5650 cfg->cbb = start_bblock;
5651 cfg->args = arg_array;
5652 mono_save_args (cfg, sig, inline_args);
5655 /* FIRST CODE BLOCK */
5656 NEW_BBLOCK (cfg, bblock);
5657 bblock->cil_code = ip;
5661 ADD_BBLOCK (cfg, bblock);
5663 if (cfg->method == method) {
5664 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5665 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5666 MONO_INST_NEW (cfg, ins, OP_BREAK);
5667 MONO_ADD_INS (bblock, ins);
5671 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5672 secman = mono_security_manager_get_methods ();
5674 security = (secman && mono_method_has_declsec (method));
5675 /* at this point having security doesn't mean we have any code to generate */
5676 if (security && (cfg->method == method)) {
5677 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5678 * And we do not want to enter the next section (with allocation) if we
5679 * have nothing to generate */
5680 security = mono_declsec_get_demands (method, &actions);
5683 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5684 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5686 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5687 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5688 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5690 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5691 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5695 mono_custom_attrs_free (custom);
5698 custom = mono_custom_attrs_from_class (wrapped->klass);
5699 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5703 mono_custom_attrs_free (custom);
5706 /* not a P/Invoke after all */
5711 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5712 /* we use a separate basic block for the initialization code */
5713 NEW_BBLOCK (cfg, init_localsbb);
5714 cfg->bb_init = init_localsbb;
5715 init_localsbb->real_offset = cfg->real_offset;
5716 start_bblock->next_bb = init_localsbb;
5717 init_localsbb->next_bb = bblock;
5718 link_bblock (cfg, start_bblock, init_localsbb);
5719 link_bblock (cfg, init_localsbb, bblock);
5721 cfg->cbb = init_localsbb;
5723 start_bblock->next_bb = bblock;
5724 link_bblock (cfg, start_bblock, bblock);
5727 /* at this point we know, if security is TRUE, that some code needs to be generated */
5728 if (security && (cfg->method == method)) {
5731 mono_jit_stats.cas_demand_generation++;
5733 if (actions.demand.blob) {
5734 /* Add code for SecurityAction.Demand */
5735 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5736 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5737 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5738 mono_emit_method_call (cfg, secman->demand, args, NULL);
5740 if (actions.noncasdemand.blob) {
5741 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5742 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5743 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5744 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5745 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5746 mono_emit_method_call (cfg, secman->demand, args, NULL);
5748 if (actions.demandchoice.blob) {
5749 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5750 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5751 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5752 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5753 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5757 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5759 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5762 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5763 /* check if this is native code, e.g. an icall or a p/invoke */
5764 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5765 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5767 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
5768 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
5770 /* if this ia a native call then it can only be JITted from platform code */
5771 if ((icall || pinvk) && method->klass && method->klass->image) {
5772 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
5773 MonoException *ex = icall ? mono_get_exception_security () :
5774 mono_get_exception_method_access ();
5775 emit_throw_exception (cfg, ex);
5782 if (header->code_size == 0)
5785 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5790 if (cfg->method == method)
5791 mono_debug_init_method (cfg, bblock, breakpoint_id);
5793 for (n = 0; n < header->num_locals; ++n) {
5794 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5799 /* We force the vtable variable here for all shared methods
5800 for the possibility that they might show up in a stack
5801 trace where their exact instantiation is needed. */
5802 if (cfg->generic_sharing_context && method == cfg->method) {
5803 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5804 mini_method_get_context (method)->method_inst ||
5805 method->klass->valuetype) {
5806 mono_get_vtable_var (cfg);
5808 /* FIXME: Is there a better way to do this?
5809 We need the variable live for the duration
5810 of the whole method. */
5811 cfg->args [0]->flags |= MONO_INST_INDIRECT;
5815 /* add a check for this != NULL to inlined methods */
5816 if (is_virtual_call) {
5819 NEW_ARGLOAD (cfg, arg_ins, 0);
5820 MONO_ADD_INS (cfg->cbb, arg_ins);
5821 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
5824 skip_dead_blocks = !dont_verify;
5825 if (skip_dead_blocks) {
5826 original_bb = bb = mono_basic_block_split (method, &error);
5827 if (!mono_error_ok (&error)) {
5828 mono_error_cleanup (&error);
5834 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5835 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5838 start_new_bblock = 0;
5841 if (cfg->method == method)
5842 cfg->real_offset = ip - header->code;
5844 cfg->real_offset = inline_offset;
5849 if (start_new_bblock) {
5850 bblock->cil_length = ip - bblock->cil_code;
5851 if (start_new_bblock == 2) {
5852 g_assert (ip == tblock->cil_code);
5854 GET_BBLOCK (cfg, tblock, ip);
5856 bblock->next_bb = tblock;
5859 start_new_bblock = 0;
5860 for (i = 0; i < bblock->in_scount; ++i) {
5861 if (cfg->verbose_level > 3)
5862 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5863 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5867 g_slist_free (class_inits);
5870 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5871 link_bblock (cfg, bblock, tblock);
5872 if (sp != stack_start) {
5873 handle_stack_args (cfg, stack_start, sp - stack_start);
5875 CHECK_UNVERIFIABLE (cfg);
5877 bblock->next_bb = tblock;
5880 for (i = 0; i < bblock->in_scount; ++i) {
5881 if (cfg->verbose_level > 3)
5882 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5883 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5886 g_slist_free (class_inits);
5891 if (skip_dead_blocks) {
5892 int ip_offset = ip - header->code;
5894 if (ip_offset == bb->end)
5898 int op_size = mono_opcode_size (ip, end);
5899 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
5901 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
5903 if (ip_offset + op_size == bb->end) {
5904 MONO_INST_NEW (cfg, ins, OP_NOP);
5905 MONO_ADD_INS (bblock, ins);
5906 start_new_bblock = 1;
5914 * Sequence points are points where the debugger can place a breakpoint.
5915 * Currently, we generate these automatically at points where the IL
5918 if (seq_points && sp == stack_start) {
5919 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
5920 MONO_ADD_INS (cfg->cbb, ins);
5923 bblock->real_offset = cfg->real_offset;
5925 if ((cfg->method == method) && cfg->coverage_info) {
5926 guint32 cil_offset = ip - header->code;
5927 cfg->coverage_info->data [cil_offset].cil_code = ip;
5929 /* TODO: Use an increment here */
5930 #if defined(TARGET_X86)
5931 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5932 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5934 MONO_ADD_INS (cfg->cbb, ins);
5936 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5937 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5941 if (cfg->verbose_level > 3)
5942 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5946 if (cfg->keep_cil_nops)
5947 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
5949 MONO_INST_NEW (cfg, ins, OP_NOP);
5951 MONO_ADD_INS (bblock, ins);
5954 if (should_insert_brekpoint (cfg->method))
5955 MONO_INST_NEW (cfg, ins, OP_BREAK);
5957 MONO_INST_NEW (cfg, ins, OP_NOP);
5959 MONO_ADD_INS (bblock, ins);
5965 CHECK_STACK_OVF (1);
5966 n = (*ip)-CEE_LDARG_0;
5968 EMIT_NEW_ARGLOAD (cfg, ins, n);
5976 CHECK_STACK_OVF (1);
5977 n = (*ip)-CEE_LDLOC_0;
5979 EMIT_NEW_LOCLOAD (cfg, ins, n);
5988 n = (*ip)-CEE_STLOC_0;
5991 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5993 emit_stloc_ir (cfg, sp, header, n);
6000 CHECK_STACK_OVF (1);
6003 EMIT_NEW_ARGLOAD (cfg, ins, n);
6009 CHECK_STACK_OVF (1);
6012 NEW_ARGLOADA (cfg, ins, n);
6013 MONO_ADD_INS (cfg->cbb, ins);
6023 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
6025 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
6030 CHECK_STACK_OVF (1);
6033 EMIT_NEW_LOCLOAD (cfg, ins, n);
6037 case CEE_LDLOCA_S: {
6038 unsigned char *tmp_ip;
6040 CHECK_STACK_OVF (1);
6041 CHECK_LOCAL (ip [1]);
6043 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
6049 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
6058 CHECK_LOCAL (ip [1]);
6059 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
6061 emit_stloc_ir (cfg, sp, header, ip [1]);
6066 CHECK_STACK_OVF (1);
6067 EMIT_NEW_PCONST (cfg, ins, NULL);
6068 ins->type = STACK_OBJ;
6073 CHECK_STACK_OVF (1);
6074 EMIT_NEW_ICONST (cfg, ins, -1);
6087 CHECK_STACK_OVF (1);
6088 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
6094 CHECK_STACK_OVF (1);
6096 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
6102 CHECK_STACK_OVF (1);
6103 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
6109 CHECK_STACK_OVF (1);
6110 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6111 ins->type = STACK_I8;
6112 ins->dreg = alloc_dreg (cfg, STACK_I8);
6114 ins->inst_l = (gint64)read64 (ip);
6115 MONO_ADD_INS (bblock, ins);
6121 gboolean use_aotconst = FALSE;
6123 #ifdef TARGET_POWERPC
6124 /* FIXME: Clean this up */
6125 if (cfg->compile_aot)
6126 use_aotconst = TRUE;
6129 /* FIXME: we should really allocate this only late in the compilation process */
6130 f = mono_domain_alloc (cfg->domain, sizeof (float));
6132 CHECK_STACK_OVF (1);
6138 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
6140 dreg = alloc_freg (cfg);
6141 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
6142 ins->type = STACK_R8;
6144 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6145 ins->type = STACK_R8;
6146 ins->dreg = alloc_dreg (cfg, STACK_R8);
6148 MONO_ADD_INS (bblock, ins);
6158 gboolean use_aotconst = FALSE;
6160 #ifdef TARGET_POWERPC
6161 /* FIXME: Clean this up */
6162 if (cfg->compile_aot)
6163 use_aotconst = TRUE;
6166 /* FIXME: we should really allocate this only late in the compilation process */
6167 d = mono_domain_alloc (cfg->domain, sizeof (double));
6169 CHECK_STACK_OVF (1);
6175 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
6177 dreg = alloc_freg (cfg);
6178 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
6179 ins->type = STACK_R8;
6181 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6182 ins->type = STACK_R8;
6183 ins->dreg = alloc_dreg (cfg, STACK_R8);
6185 MONO_ADD_INS (bblock, ins);
6194 MonoInst *temp, *store;
6196 CHECK_STACK_OVF (1);
6200 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
6201 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
6203 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6206 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6219 if (sp [0]->type == STACK_R8)
6220 /* we need to pop the value from the x86 FP stack */
6221 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
6230 if (stack_start != sp)
6232 token = read32 (ip + 1);
6233 /* FIXME: check the signature matches */
6234 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6236 if (!cmethod || mono_loader_get_last_error ())
6239 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
6240 GENERIC_SHARING_FAILURE (CEE_JMP);
6242 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
6243 CHECK_CFG_EXCEPTION;
6245 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6247 MonoMethodSignature *fsig = mono_method_signature (cmethod);
6250 /* Handle tail calls similarly to calls */
6251 n = fsig->param_count + fsig->hasthis;
6253 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
6254 call->method = cmethod;
6255 call->tail_call = TRUE;
6256 call->signature = mono_method_signature (cmethod);
6257 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
6258 call->inst.inst_p0 = cmethod;
6259 for (i = 0; i < n; ++i)
6260 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
6262 mono_arch_emit_call (cfg, call);
6263 MONO_ADD_INS (bblock, (MonoInst*)call);
6266 for (i = 0; i < num_args; ++i)
6267 /* Prevent arguments from being optimized away */
6268 arg_array [i]->flags |= MONO_INST_VOLATILE;
6270 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6271 ins = (MonoInst*)call;
6272 ins->inst_p0 = cmethod;
6273 MONO_ADD_INS (bblock, ins);
6277 start_new_bblock = 1;
6282 case CEE_CALLVIRT: {
6283 MonoInst *addr = NULL;
6284 MonoMethodSignature *fsig = NULL;
6286 int virtual = *ip == CEE_CALLVIRT;
6287 int calli = *ip == CEE_CALLI;
6288 gboolean pass_imt_from_rgctx = FALSE;
6289 MonoInst *imt_arg = NULL;
6290 gboolean pass_vtable = FALSE;
6291 gboolean pass_mrgctx = FALSE;
6292 MonoInst *vtable_arg = NULL;
6293 gboolean check_this = FALSE;
6294 gboolean supported_tail_call = FALSE;
6297 token = read32 (ip + 1);
6304 if (method->wrapper_type != MONO_WRAPPER_NONE)
6305 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6307 fsig = mono_metadata_parse_signature (image, token);
6309 n = fsig->param_count + fsig->hasthis;
6311 if (method->dynamic && fsig->pinvoke) {
6315 * This is a call through a function pointer using a pinvoke
6316 * signature. Have to create a wrapper and call that instead.
6317 * FIXME: This is very slow, need to create a wrapper at JIT time
6318 * instead based on the signature.
6320 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
6321 EMIT_NEW_PCONST (cfg, args [1], fsig);
6323 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
6326 MonoMethod *cil_method;
6328 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6329 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6330 cil_method = cmethod;
6331 } else if (constrained_call) {
6332 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6334 * This is needed since get_method_constrained can't find
6335 * the method in klass representing a type var.
6336 * The type var is guaranteed to be a reference type in this
6339 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6340 cil_method = cmethod;
6341 g_assert (!cmethod->klass->valuetype);
6343 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6346 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6347 cil_method = cmethod;
6350 if (!cmethod || mono_loader_get_last_error ())
6352 if (!dont_verify && !cfg->skip_visibility) {
6353 MonoMethod *target_method = cil_method;
6354 if (method->is_inflated) {
6355 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6357 if (!mono_method_can_access_method (method_definition, target_method) &&
6358 !mono_method_can_access_method (method, cil_method))
6359 METHOD_ACCESS_FAILURE;
6362 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6363 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6365 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6366 /* MS.NET seems to silently convert this to a callvirt */
6371 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
6372 * converts to a callvirt.
6374 * tests/bug-515884.il is an example of this behavior
6376 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
6377 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
6378 if (!virtual && cmethod->klass->marshalbyref && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
6382 if (!cmethod->klass->inited)
6383 if (!mono_class_init (cmethod->klass))
6386 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6387 mini_class_is_system_array (cmethod->klass)) {
6388 array_rank = cmethod->klass->rank;
6389 fsig = mono_method_signature (cmethod);
6391 fsig = mono_method_signature (cmethod);
6396 if (fsig->pinvoke) {
6397 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6398 check_for_pending_exc, FALSE);
6399 fsig = mono_method_signature (wrapper);
6400 } else if (constrained_call) {
6401 fsig = mono_method_signature (cmethod);
6403 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6407 mono_save_token_info (cfg, image, token, cil_method);
6409 n = fsig->param_count + fsig->hasthis;
6411 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6412 if (check_linkdemand (cfg, method, cmethod))
6414 CHECK_CFG_EXCEPTION;
6417 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6418 g_assert_not_reached ();
6421 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6424 if (!cfg->generic_sharing_context && cmethod)
6425 g_assert (!mono_method_check_context_used (cmethod));
6429 //g_assert (!virtual || fsig->hasthis);
6433 if (constrained_call) {
6435 * We have the `constrained.' prefix opcode.
6437 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6439 * The type parameter is instantiated as a valuetype,
6440 * but that type doesn't override the method we're
6441 * calling, so we need to box `this'.
6443 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
6444 ins->klass = constrained_call;
6445 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call));
6446 CHECK_CFG_EXCEPTION;
6447 } else if (!constrained_call->valuetype) {
6448 int dreg = alloc_preg (cfg);
6451 * The type parameter is instantiated as a reference
6452 * type. We have a managed pointer on the stack, so
6453 * we need to dereference it here.
6455 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6456 ins->type = STACK_OBJ;
6458 } else if (cmethod->klass->valuetype)
6460 constrained_call = NULL;
6463 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6467 * If the callee is a shared method, then its static cctor
6468 * might not get called after the call was patched.
6470 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6471 emit_generic_class_init (cfg, cmethod->klass);
6472 CHECK_TYPELOAD (cmethod->klass);
6475 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6476 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6477 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6478 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6479 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6482 * Pass vtable iff target method might
6483 * be shared, which means that sharing
6484 * is enabled for its class and its
6485 * context is sharable (and it's not a
6488 if (sharing_enabled && context_sharable &&
6489 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6493 if (cmethod && mini_method_get_context (cmethod) &&
6494 mini_method_get_context (cmethod)->method_inst) {
6495 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6496 MonoGenericContext *context = mini_method_get_context (cmethod);
6497 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6499 g_assert (!pass_vtable);
6501 if (sharing_enabled && context_sharable)
6505 if (cfg->generic_sharing_context && cmethod) {
6506 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6508 context_used = mono_method_check_context_used (cmethod);
6510 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6511 /* Generic method interface
6512 calls are resolved via a
6513 helper function and don't
6515 if (!cmethod_context || !cmethod_context->method_inst)
6516 pass_imt_from_rgctx = TRUE;
6520 * If a shared method calls another
6521 * shared method then the caller must
6522 * have a generic sharing context
6523 * because the magic trampoline
6524 * requires it. FIXME: We shouldn't
6525 * have to force the vtable/mrgctx
6526 * variable here. Instead there
6527 * should be a flag in the cfg to
6528 * request a generic sharing context.
6531 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6532 mono_get_vtable_var (cfg);
6537 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6539 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6541 CHECK_TYPELOAD (cmethod->klass);
6542 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6547 g_assert (!vtable_arg);
6549 if (!cfg->compile_aot) {
6551 * emit_get_rgctx_method () calls mono_class_vtable () so check
6552 * for type load errors before.
6554 mono_class_setup_vtable (cmethod->klass);
6555 CHECK_TYPELOAD (cmethod->klass);
6558 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6560 /* !marshalbyref is needed to properly handle generic methods + remoting */
6561 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6562 MONO_METHOD_IS_FINAL (cmethod)) &&
6563 !cmethod->klass->marshalbyref) {
6570 if (pass_imt_from_rgctx) {
6571 g_assert (!pass_vtable);
6574 imt_arg = emit_get_rgctx_method (cfg, context_used,
6575 cmethod, MONO_RGCTX_INFO_METHOD);
6579 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
6581 /* Calling virtual generic methods */
6582 if (cmethod && virtual &&
6583 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6584 !(MONO_METHOD_IS_FINAL (cmethod) &&
6585 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6586 mono_method_signature (cmethod)->generic_param_count) {
6587 MonoInst *this_temp, *this_arg_temp, *store;
6588 MonoInst *iargs [4];
6590 g_assert (mono_method_signature (cmethod)->is_inflated);
6592 /* Prevent inlining of methods that contain indirect calls */
6595 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
6596 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt) {
6597 g_assert (!imt_arg);
6599 g_assert (cmethod->is_inflated);
6600 imt_arg = emit_get_rgctx_method (cfg, context_used,
6601 cmethod, MONO_RGCTX_INFO_METHOD);
6602 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6606 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6607 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6608 MONO_ADD_INS (bblock, store);
6610 /* FIXME: This should be a managed pointer */
6611 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6613 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6614 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6615 cmethod, MONO_RGCTX_INFO_METHOD);
6616 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6617 addr = mono_emit_jit_icall (cfg,
6618 mono_helper_compile_generic_method, iargs);
6620 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6622 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6625 if (!MONO_TYPE_IS_VOID (fsig->ret))
6626 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6628 CHECK_CFG_EXCEPTION;
6635 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6636 supported_tail_call = cmethod && MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
6638 supported_tail_call = cmethod && mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6642 /* FIXME: runtime generic context pointer for jumps? */
6643 /* FIXME: handle this for generic sharing eventually */
6644 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) && supported_tail_call) {
6647 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6650 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6651 /* Handle tail calls similarly to calls */
6652 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE);
6654 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6655 call->tail_call = TRUE;
6656 call->method = cmethod;
6657 call->signature = mono_method_signature (cmethod);
6660 * We implement tail calls by storing the actual arguments into the
6661 * argument variables, then emitting a CEE_JMP.
6663 for (i = 0; i < n; ++i) {
6664 /* Prevent argument from being register allocated */
6665 arg_array [i]->flags |= MONO_INST_VOLATILE;
6666 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6670 ins = (MonoInst*)call;
6671 ins->inst_p0 = cmethod;
6672 ins->inst_p1 = arg_array [0];
6673 MONO_ADD_INS (bblock, ins);
6674 link_bblock (cfg, bblock, end_bblock);
6675 start_new_bblock = 1;
6677 CHECK_CFG_EXCEPTION;
6679 /* skip CEE_RET as well */
6685 /* Conversion to a JIT intrinsic */
6686 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6688 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6689 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6694 CHECK_CFG_EXCEPTION;
6702 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6703 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6704 mono_method_check_inlining (cfg, cmethod) &&
6705 !g_list_find (dont_inline, cmethod)) {
6707 gboolean allways = FALSE;
6709 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6710 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6711 /* Prevent inlining of methods that call wrappers */
6713 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6717 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6719 cfg->real_offset += 5;
6722 if (!MONO_TYPE_IS_VOID (fsig->ret))
6723 /* *sp is already set by inline_method */
6726 inline_costs += costs;
6732 inline_costs += 10 * num_calls++;
6734 /* Tail recursion elimination */
6735 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6736 gboolean has_vtargs = FALSE;
6739 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6742 /* keep it simple */
6743 for (i = fsig->param_count - 1; i >= 0; i--) {
6744 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6749 for (i = 0; i < n; ++i)
6750 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6751 MONO_INST_NEW (cfg, ins, OP_BR);
6752 MONO_ADD_INS (bblock, ins);
6753 tblock = start_bblock->out_bb [0];
6754 link_bblock (cfg, bblock, tblock);
6755 ins->inst_target_bb = tblock;
6756 start_new_bblock = 1;
6758 /* skip the CEE_RET, too */
6759 if (ip_in_bb (cfg, bblock, ip + 5))
6769 /* Generic sharing */
6770 /* FIXME: only do this for generic methods if
6771 they are not shared! */
6772 if (context_used && !imt_arg && !array_rank &&
6773 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6774 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6775 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6776 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6779 g_assert (cfg->generic_sharing_context && cmethod);
6783 * We are compiling a call to a
6784 * generic method from shared code,
6785 * which means that we have to look up
6786 * the method in the rgctx and do an
6789 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6792 /* Indirect calls */
6794 g_assert (!imt_arg);
6796 if (*ip == CEE_CALL)
6797 g_assert (context_used);
6798 else if (*ip == CEE_CALLI)
6799 g_assert (!vtable_arg);
6801 /* FIXME: what the hell is this??? */
6802 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6803 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6805 /* Prevent inlining of methods with indirect calls */
6810 int rgctx_reg = mono_alloc_preg (cfg);
6812 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6813 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6814 call = (MonoCallInst*)ins;
6815 set_rgctx_arg (cfg, call, rgctx_reg, vtable_arg);
6817 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6819 * Instead of emitting an indirect call, emit a direct call
6820 * with the contents of the aotconst as the patch info.
6822 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
6824 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6825 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
6828 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6831 if (!MONO_TYPE_IS_VOID (fsig->ret))
6832 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6834 CHECK_CFG_EXCEPTION;
6845 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6846 if (sp [fsig->param_count]->type == STACK_OBJ) {
6847 MonoInst *iargs [2];
6850 iargs [1] = sp [fsig->param_count];
6852 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6855 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6856 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6857 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6858 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6860 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6863 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6864 if (!cmethod->klass->element_class->valuetype && !readonly)
6865 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6866 CHECK_TYPELOAD (cmethod->klass);
6869 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6872 g_assert_not_reached ();
6875 CHECK_CFG_EXCEPTION;
6882 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6884 if (!MONO_TYPE_IS_VOID (fsig->ret))
6885 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6887 CHECK_CFG_EXCEPTION;
6897 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6899 } else if (imt_arg) {
6900 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6902 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6905 if (!MONO_TYPE_IS_VOID (fsig->ret))
6906 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6908 CHECK_CFG_EXCEPTION;
6915 if (cfg->method != method) {
6916 /* return from inlined method */
6918 * If in_count == 0, that means the ret is unreachable due to
6919 * being preceeded by a throw. In that case, inline_method () will
6920 * handle setting the return value
6921 * (test case: test_0_inline_throw ()).
6923 if (return_var && cfg->cbb->in_count) {
6927 //g_assert (returnvar != -1);
6928 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6929 cfg->ret_var_set = TRUE;
6933 MonoType *ret_type = mono_method_signature (method)->ret;
6937 * Place a seq point here too even through the IL stack is not
6938 * empty, so a step over on
6941 * will work correctly.
6943 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
6944 MONO_ADD_INS (cfg->cbb, ins);
6947 g_assert (!return_var);
6950 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6953 if (!cfg->vret_addr) {
6956 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6958 EMIT_NEW_RETLOADA (cfg, ret_addr);
6960 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6961 ins->klass = mono_class_from_mono_type (ret_type);
6964 #ifdef MONO_ARCH_SOFT_FLOAT
6965 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6966 MonoInst *iargs [1];
6970 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6971 mono_arch_emit_setret (cfg, method, conv);
6973 mono_arch_emit_setret (cfg, method, *sp);
6976 mono_arch_emit_setret (cfg, method, *sp);
6981 if (sp != stack_start)
6983 MONO_INST_NEW (cfg, ins, OP_BR);
6985 ins->inst_target_bb = end_bblock;
6986 MONO_ADD_INS (bblock, ins);
6987 link_bblock (cfg, bblock, end_bblock);
6988 start_new_bblock = 1;
6992 MONO_INST_NEW (cfg, ins, OP_BR);
6994 target = ip + 1 + (signed char)(*ip);
6996 GET_BBLOCK (cfg, tblock, target);
6997 link_bblock (cfg, bblock, tblock);
6998 ins->inst_target_bb = tblock;
6999 if (sp != stack_start) {
7000 handle_stack_args (cfg, stack_start, sp - stack_start);
7002 CHECK_UNVERIFIABLE (cfg);
7004 MONO_ADD_INS (bblock, ins);
7005 start_new_bblock = 1;
7006 inline_costs += BRANCH_COST;
7020 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
7022 target = ip + 1 + *(signed char*)ip;
7028 inline_costs += BRANCH_COST;
7032 MONO_INST_NEW (cfg, ins, OP_BR);
7035 target = ip + 4 + (gint32)read32(ip);
7037 GET_BBLOCK (cfg, tblock, target);
7038 link_bblock (cfg, bblock, tblock);
7039 ins->inst_target_bb = tblock;
7040 if (sp != stack_start) {
7041 handle_stack_args (cfg, stack_start, sp - stack_start);
7043 CHECK_UNVERIFIABLE (cfg);
7046 MONO_ADD_INS (bblock, ins);
7048 start_new_bblock = 1;
7049 inline_costs += BRANCH_COST;
7056 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
7057 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
7058 guint32 opsize = is_short ? 1 : 4;
7060 CHECK_OPSIZE (opsize);
7062 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
7065 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
7070 GET_BBLOCK (cfg, tblock, target);
7071 link_bblock (cfg, bblock, tblock);
7072 GET_BBLOCK (cfg, tblock, ip);
7073 link_bblock (cfg, bblock, tblock);
7075 if (sp != stack_start) {
7076 handle_stack_args (cfg, stack_start, sp - stack_start);
7077 CHECK_UNVERIFIABLE (cfg);
7080 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
7081 cmp->sreg1 = sp [0]->dreg;
7082 type_from_op (cmp, sp [0], NULL);
7085 #if SIZEOF_REGISTER == 4
7086 if (cmp->opcode == OP_LCOMPARE_IMM) {
7087 /* Convert it to OP_LCOMPARE */
7088 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7089 ins->type = STACK_I8;
7090 ins->dreg = alloc_dreg (cfg, STACK_I8);
7092 MONO_ADD_INS (bblock, ins);
7093 cmp->opcode = OP_LCOMPARE;
7094 cmp->sreg2 = ins->dreg;
7097 MONO_ADD_INS (bblock, cmp);
7099 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
7100 type_from_op (ins, sp [0], NULL);
7101 MONO_ADD_INS (bblock, ins);
7102 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
7103 GET_BBLOCK (cfg, tblock, target);
7104 ins->inst_true_bb = tblock;
7105 GET_BBLOCK (cfg, tblock, ip);
7106 ins->inst_false_bb = tblock;
7107 start_new_bblock = 2;
7110 inline_costs += BRANCH_COST;
7125 MONO_INST_NEW (cfg, ins, *ip);
7127 target = ip + 4 + (gint32)read32(ip);
7133 inline_costs += BRANCH_COST;
7137 MonoBasicBlock **targets;
7138 MonoBasicBlock *default_bblock;
7139 MonoJumpInfoBBTable *table;
7140 int offset_reg = alloc_preg (cfg);
7141 int target_reg = alloc_preg (cfg);
7142 int table_reg = alloc_preg (cfg);
7143 int sum_reg = alloc_preg (cfg);
7144 gboolean use_op_switch;
7148 n = read32 (ip + 1);
7151 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
7155 CHECK_OPSIZE (n * sizeof (guint32));
7156 target = ip + n * sizeof (guint32);
7158 GET_BBLOCK (cfg, default_bblock, target);
7159 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7161 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
7162 for (i = 0; i < n; ++i) {
7163 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
7164 targets [i] = tblock;
7165 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
7169 if (sp != stack_start) {
7171 * Link the current bb with the targets as well, so handle_stack_args
7172 * will set their in_stack correctly.
7174 link_bblock (cfg, bblock, default_bblock);
7175 for (i = 0; i < n; ++i)
7176 link_bblock (cfg, bblock, targets [i]);
7178 handle_stack_args (cfg, stack_start, sp - stack_start);
7180 CHECK_UNVERIFIABLE (cfg);
7183 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
7184 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
7187 for (i = 0; i < n; ++i)
7188 link_bblock (cfg, bblock, targets [i]);
7190 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
7191 table->table = targets;
7192 table->table_size = n;
7194 use_op_switch = FALSE;
7196 /* ARM implements SWITCH statements differently */
7197 /* FIXME: Make it use the generic implementation */
7198 if (!cfg->compile_aot)
7199 use_op_switch = TRUE;
7202 if (COMPILE_LLVM (cfg))
7203 use_op_switch = TRUE;
7205 cfg->cbb->has_jump_table = 1;
7207 if (use_op_switch) {
7208 MONO_INST_NEW (cfg, ins, OP_SWITCH);
7209 ins->sreg1 = src1->dreg;
7210 ins->inst_p0 = table;
7211 ins->inst_many_bb = targets;
7212 ins->klass = GUINT_TO_POINTER (n);
7213 MONO_ADD_INS (cfg->cbb, ins);
7215 if (sizeof (gpointer) == 8)
7216 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
7218 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
7220 #if SIZEOF_REGISTER == 8
7221 /* The upper word might not be zero, and we add it to a 64 bit address later */
7222 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
7225 if (cfg->compile_aot) {
7226 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
7228 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
7229 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
7230 ins->inst_p0 = table;
7231 ins->dreg = table_reg;
7232 MONO_ADD_INS (cfg->cbb, ins);
7235 /* FIXME: Use load_memindex */
7236 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
7237 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
7238 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
7240 start_new_bblock = 1;
7241 inline_costs += (BRANCH_COST * 2);
7261 dreg = alloc_freg (cfg);
7264 dreg = alloc_lreg (cfg);
7267 dreg = alloc_preg (cfg);
7270 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
7271 ins->type = ldind_type [*ip - CEE_LDIND_I1];
7272 ins->flags |= ins_flag;
7274 MONO_ADD_INS (bblock, ins);
7289 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
7290 ins->flags |= ins_flag;
7292 MONO_ADD_INS (bblock, ins);
7294 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
7295 emit_write_barrier (cfg, sp [0], sp [1], -1);
7304 MONO_INST_NEW (cfg, ins, (*ip));
7306 ins->sreg1 = sp [0]->dreg;
7307 ins->sreg2 = sp [1]->dreg;
7308 type_from_op (ins, sp [0], sp [1]);
7310 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7312 /* Use the immediate opcodes if possible */
7313 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
7314 int imm_opcode = mono_op_to_op_imm (ins->opcode);
7315 if (imm_opcode != -1) {
7316 ins->opcode = imm_opcode;
7317 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7320 sp [1]->opcode = OP_NOP;
7324 MONO_ADD_INS ((cfg)->cbb, (ins));
7326 *sp++ = mono_decompose_opcode (cfg, ins);
7343 MONO_INST_NEW (cfg, ins, (*ip));
7345 ins->sreg1 = sp [0]->dreg;
7346 ins->sreg2 = sp [1]->dreg;
7347 type_from_op (ins, sp [0], sp [1]);
7349 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7350 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7352 /* FIXME: Pass opcode to is_inst_imm */
7354 /* Use the immediate opcodes if possible */
7355 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7358 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7359 if (imm_opcode != -1) {
7360 ins->opcode = imm_opcode;
7361 if (sp [1]->opcode == OP_I8CONST) {
7362 #if SIZEOF_REGISTER == 8
7363 ins->inst_imm = sp [1]->inst_l;
7365 ins->inst_ls_word = sp [1]->inst_ls_word;
7366 ins->inst_ms_word = sp [1]->inst_ms_word;
7370 ins->inst_imm = (gssize)(sp [1]->inst_c0);
7373 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7374 if (sp [1]->next == NULL)
7375 sp [1]->opcode = OP_NOP;
7378 MONO_ADD_INS ((cfg)->cbb, (ins));
7380 *sp++ = mono_decompose_opcode (cfg, ins);
7393 case CEE_CONV_OVF_I8:
7394 case CEE_CONV_OVF_U8:
7398 /* Special case this earlier so we have long constants in the IR */
7399 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7400 int data = sp [-1]->inst_c0;
7401 sp [-1]->opcode = OP_I8CONST;
7402 sp [-1]->type = STACK_I8;
7403 #if SIZEOF_REGISTER == 8
7404 if ((*ip) == CEE_CONV_U8)
7405 sp [-1]->inst_c0 = (guint32)data;
7407 sp [-1]->inst_c0 = data;
7409 sp [-1]->inst_ls_word = data;
7410 if ((*ip) == CEE_CONV_U8)
7411 sp [-1]->inst_ms_word = 0;
7413 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7415 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7422 case CEE_CONV_OVF_I4:
7423 case CEE_CONV_OVF_I1:
7424 case CEE_CONV_OVF_I2:
7425 case CEE_CONV_OVF_I:
7426 case CEE_CONV_OVF_U:
7429 if (sp [-1]->type == STACK_R8) {
7430 ADD_UNOP (CEE_CONV_OVF_I8);
7437 case CEE_CONV_OVF_U1:
7438 case CEE_CONV_OVF_U2:
7439 case CEE_CONV_OVF_U4:
7442 if (sp [-1]->type == STACK_R8) {
7443 ADD_UNOP (CEE_CONV_OVF_U8);
7450 case CEE_CONV_OVF_I1_UN:
7451 case CEE_CONV_OVF_I2_UN:
7452 case CEE_CONV_OVF_I4_UN:
7453 case CEE_CONV_OVF_I8_UN:
7454 case CEE_CONV_OVF_U1_UN:
7455 case CEE_CONV_OVF_U2_UN:
7456 case CEE_CONV_OVF_U4_UN:
7457 case CEE_CONV_OVF_U8_UN:
7458 case CEE_CONV_OVF_I_UN:
7459 case CEE_CONV_OVF_U_UN:
7466 CHECK_CFG_EXCEPTION;
7470 case CEE_ADD_OVF_UN:
7472 case CEE_MUL_OVF_UN:
7474 case CEE_SUB_OVF_UN:
7482 token = read32 (ip + 1);
7483 klass = mini_get_class (method, token, generic_context);
7484 CHECK_TYPELOAD (klass);
7486 if (generic_class_is_reference_type (cfg, klass)) {
7487 MonoInst *store, *load;
7488 int dreg = alloc_preg (cfg);
7490 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7491 load->flags |= ins_flag;
7492 MONO_ADD_INS (cfg->cbb, load);
7494 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7495 store->flags |= ins_flag;
7496 MONO_ADD_INS (cfg->cbb, store);
7498 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
7499 emit_write_barrier (cfg, sp [0], sp [1], -1);
7501 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7513 token = read32 (ip + 1);
7514 klass = mini_get_class (method, token, generic_context);
7515 CHECK_TYPELOAD (klass);
7517 /* Optimize the common ldobj+stloc combination */
7527 loc_index = ip [5] - CEE_STLOC_0;
7534 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7535 CHECK_LOCAL (loc_index);
7537 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7538 ins->dreg = cfg->locals [loc_index]->dreg;
7544 /* Optimize the ldobj+stobj combination */
7545 /* The reference case ends up being a load+store anyway */
7546 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7551 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7558 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7567 CHECK_STACK_OVF (1);
7569 n = read32 (ip + 1);
7571 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7572 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7573 ins->type = STACK_OBJ;
7576 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7577 MonoInst *iargs [1];
7579 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7580 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7582 if (cfg->opt & MONO_OPT_SHARED) {
7583 MonoInst *iargs [3];
7585 if (cfg->compile_aot) {
7586 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7588 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7589 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7590 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7591 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7592 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7594 if (bblock->out_of_line) {
7595 MonoInst *iargs [2];
7597 if (image == mono_defaults.corlib) {
7599 * Avoid relocations in AOT and save some space by using a
7600 * version of helper_ldstr specialized to mscorlib.
7602 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7603 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7605 /* Avoid creating the string object */
7606 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7607 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7608 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7612 if (cfg->compile_aot) {
7613 NEW_LDSTRCONST (cfg, ins, image, n);
7615 MONO_ADD_INS (bblock, ins);
7618 NEW_PCONST (cfg, ins, NULL);
7619 ins->type = STACK_OBJ;
7620 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7622 MONO_ADD_INS (bblock, ins);
7631 MonoInst *iargs [2];
7632 MonoMethodSignature *fsig;
7635 MonoInst *vtable_arg = NULL;
7638 token = read32 (ip + 1);
7639 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7640 if (!cmethod || mono_loader_get_last_error ())
7642 fsig = mono_method_get_signature (cmethod, image, token);
7646 mono_save_token_info (cfg, image, token, cmethod);
7648 if (!mono_class_init (cmethod->klass))
7651 if (cfg->generic_sharing_context)
7652 context_used = mono_method_check_context_used (cmethod);
7654 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7655 if (check_linkdemand (cfg, method, cmethod))
7657 CHECK_CFG_EXCEPTION;
7658 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7659 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7662 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
7663 emit_generic_class_init (cfg, cmethod->klass);
7664 CHECK_TYPELOAD (cmethod->klass);
7667 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7668 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7669 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7670 mono_class_vtable (cfg->domain, cmethod->klass);
7671 CHECK_TYPELOAD (cmethod->klass);
7673 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7674 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7677 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7678 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7680 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7682 CHECK_TYPELOAD (cmethod->klass);
7683 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7688 n = fsig->param_count;
7692 * Generate smaller code for the common newobj <exception> instruction in
7693 * argument checking code.
7695 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7696 is_exception_class (cmethod->klass) && n <= 2 &&
7697 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7698 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7699 MonoInst *iargs [3];
7701 g_assert (!vtable_arg);
7705 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7708 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7712 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7717 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7720 g_assert_not_reached ();
7728 /* move the args to allow room for 'this' in the first position */
7734 /* check_call_signature () requires sp[0] to be set */
7735 this_ins.type = STACK_OBJ;
7737 if (check_call_signature (cfg, fsig, sp))
7742 if (mini_class_is_system_array (cmethod->klass)) {
7743 g_assert (!vtable_arg);
7745 *sp = emit_get_rgctx_method (cfg, context_used,
7746 cmethod, MONO_RGCTX_INFO_METHOD);
7748 /* Avoid varargs in the common case */
7749 if (fsig->param_count == 1)
7750 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7751 else if (fsig->param_count == 2)
7752 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7753 else if (fsig->param_count == 3)
7754 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
7756 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7757 } else if (cmethod->string_ctor) {
7758 g_assert (!context_used);
7759 g_assert (!vtable_arg);
7760 /* we simply pass a null pointer */
7761 EMIT_NEW_PCONST (cfg, *sp, NULL);
7762 /* now call the string ctor */
7763 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7765 MonoInst* callvirt_this_arg = NULL;
7767 if (cmethod->klass->valuetype) {
7768 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7769 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7770 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7775 * The code generated by mini_emit_virtual_call () expects
7776 * iargs [0] to be a boxed instance, but luckily the vcall
7777 * will be transformed into a normal call there.
7779 } else if (context_used) {
7780 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
7783 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7785 CHECK_TYPELOAD (cmethod->klass);
7788 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7789 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7790 * As a workaround, we call class cctors before allocating objects.
7792 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7793 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7794 if (cfg->verbose_level > 2)
7795 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7796 class_inits = g_slist_prepend (class_inits, vtable);
7799 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
7802 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
7805 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7807 /* Now call the actual ctor */
7808 /* Avoid virtual calls to ctors if possible */
7809 if (cmethod->klass->marshalbyref)
7810 callvirt_this_arg = sp [0];
7813 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7814 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7815 type_to_eval_stack_type ((cfg), fsig->ret, ins);
7820 CHECK_CFG_EXCEPTION;
7825 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7826 mono_method_check_inlining (cfg, cmethod) &&
7827 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7828 !g_list_find (dont_inline, cmethod)) {
7831 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7832 cfg->real_offset += 5;
7835 inline_costs += costs - 5;
7838 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7840 } else if (context_used &&
7841 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7842 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7843 MonoInst *cmethod_addr;
7845 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7846 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7848 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7851 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7852 callvirt_this_arg, NULL, vtable_arg);
7856 if (alloc == NULL) {
7858 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7859 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7873 token = read32 (ip + 1);
7874 klass = mini_get_class (method, token, generic_context);
7875 CHECK_TYPELOAD (klass);
7876 if (sp [0]->type != STACK_OBJ)
7879 if (cfg->generic_sharing_context)
7880 context_used = mono_class_check_context_used (klass);
7882 if (!context_used && mini_class_has_reference_variant_generic_argument (klass, context_used)) {
7889 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7892 /*FIXME AOT support*/
7893 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
7896 ins = mono_emit_jit_icall (cfg, mono_object_castclass_with_cache, args);
7900 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7901 MonoMethod *mono_castclass;
7902 MonoInst *iargs [1];
7905 mono_castclass = mono_marshal_get_castclass (klass);
7908 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7909 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7910 g_assert (costs > 0);
7913 cfg->real_offset += 5;
7918 inline_costs += costs;
7921 ins = handle_castclass (cfg, klass, *sp, context_used);
7922 CHECK_CFG_EXCEPTION;
7932 token = read32 (ip + 1);
7933 klass = mini_get_class (method, token, generic_context);
7934 CHECK_TYPELOAD (klass);
7935 if (sp [0]->type != STACK_OBJ)
7938 if (cfg->generic_sharing_context)
7939 context_used = mono_class_check_context_used (klass);
7941 if (!context_used && mini_class_has_reference_variant_generic_argument (klass, context_used)) {
7948 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7951 /*FIXME AOT support*/
7952 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
7954 *sp = mono_emit_jit_icall (cfg, mono_object_isinst_with_cache, args);
7958 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7959 MonoMethod *mono_isinst;
7960 MonoInst *iargs [1];
7963 mono_isinst = mono_marshal_get_isinst (klass);
7966 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7967 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7968 g_assert (costs > 0);
7971 cfg->real_offset += 5;
7976 inline_costs += costs;
7979 ins = handle_isinst (cfg, klass, *sp, context_used);
7980 CHECK_CFG_EXCEPTION;
7987 case CEE_UNBOX_ANY: {
7991 token = read32 (ip + 1);
7992 klass = mini_get_class (method, token, generic_context);
7993 CHECK_TYPELOAD (klass);
7995 mono_save_token_info (cfg, image, token, klass);
7997 if (cfg->generic_sharing_context)
7998 context_used = mono_class_check_context_used (klass);
8000 if (generic_class_is_reference_type (cfg, klass)) {
8001 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
8002 if (!context_used && mini_class_has_reference_variant_generic_argument (klass, context_used)) {
8009 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8012 /*FIXME AOT support*/
8013 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8016 ins = mono_emit_jit_icall (cfg, mono_object_castclass_with_cache, args);
8020 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8021 MonoMethod *mono_castclass;
8022 MonoInst *iargs [1];
8025 mono_castclass = mono_marshal_get_castclass (klass);
8028 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
8029 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8031 g_assert (costs > 0);
8034 cfg->real_offset += 5;
8038 inline_costs += costs;
8040 ins = handle_castclass (cfg, klass, *sp, context_used);
8041 CHECK_CFG_EXCEPTION;
8049 if (mono_class_is_nullable (klass)) {
8050 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
8057 ins = handle_unbox (cfg, klass, sp, context_used);
8063 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
8076 token = read32 (ip + 1);
8077 klass = mini_get_class (method, token, generic_context);
8078 CHECK_TYPELOAD (klass);
8080 mono_save_token_info (cfg, image, token, klass);
8082 if (cfg->generic_sharing_context)
8083 context_used = mono_class_check_context_used (klass);
8085 if (generic_class_is_reference_type (cfg, klass)) {
8091 if (klass == mono_defaults.void_class)
8093 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
8095 /* frequent check in generic code: box (struct), brtrue */
8097 // FIXME: LLVM can't handle the inconsistent bb linking
8098 if (!mono_class_is_nullable (klass) &&
8099 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
8100 (ip [5] == CEE_BRTRUE ||
8101 ip [5] == CEE_BRTRUE_S ||
8102 ip [5] == CEE_BRFALSE ||
8103 ip [5] == CEE_BRFALSE_S)) {
8104 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
8106 MonoBasicBlock *true_bb, *false_bb;
8110 if (cfg->verbose_level > 3) {
8111 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8112 printf ("<box+brtrue opt>\n");
8120 target = ip + 1 + (signed char)(*ip);
8127 target = ip + 4 + (gint)(read32 (ip));
8131 g_assert_not_reached ();
8135 * We need to link both bblocks, since it is needed for handling stack
8136 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
8137 * Branching to only one of them would lead to inconsistencies, so
8138 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
8140 GET_BBLOCK (cfg, true_bb, target);
8141 GET_BBLOCK (cfg, false_bb, ip);
8143 mono_link_bblock (cfg, cfg->cbb, true_bb);
8144 mono_link_bblock (cfg, cfg->cbb, false_bb);
8146 if (sp != stack_start) {
8147 handle_stack_args (cfg, stack_start, sp - stack_start);
8149 CHECK_UNVERIFIABLE (cfg);
8152 if (COMPILE_LLVM (cfg)) {
8153 dreg = alloc_ireg (cfg);
8154 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
8155 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
8157 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
8159 /* The JIT can't eliminate the iconst+compare */
8160 MONO_INST_NEW (cfg, ins, OP_BR);
8161 ins->inst_target_bb = is_true ? true_bb : false_bb;
8162 MONO_ADD_INS (cfg->cbb, ins);
8165 start_new_bblock = 1;
8169 *sp++ = handle_box (cfg, val, klass, context_used);
8171 CHECK_CFG_EXCEPTION;
8180 token = read32 (ip + 1);
8181 klass = mini_get_class (method, token, generic_context);
8182 CHECK_TYPELOAD (klass);
8184 mono_save_token_info (cfg, image, token, klass);
8186 if (cfg->generic_sharing_context)
8187 context_used = mono_class_check_context_used (klass);
8189 if (mono_class_is_nullable (klass)) {
8192 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
8193 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
8197 ins = handle_unbox (cfg, klass, sp, context_used);
8207 MonoClassField *field;
8211 if (*ip == CEE_STFLD) {
8218 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
8220 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
8223 token = read32 (ip + 1);
8224 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8225 field = mono_method_get_wrapper_data (method, token);
8226 klass = field->parent;
8229 field = mono_field_from_token (image, token, &klass, generic_context);
8233 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8234 FIELD_ACCESS_FAILURE;
8235 mono_class_init (klass);
8237 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
8238 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
8239 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8240 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8243 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
8244 if (*ip == CEE_STFLD) {
8245 if (target_type_is_incompatible (cfg, field->type, sp [1]))
8247 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8248 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
8249 MonoInst *iargs [5];
8252 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8253 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8254 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
8258 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8259 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
8260 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8261 g_assert (costs > 0);
8263 cfg->real_offset += 5;
8266 inline_costs += costs;
8268 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
8273 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8275 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
8276 if (sp [0]->opcode != OP_LDADDR)
8277 store->flags |= MONO_INST_FAULT;
8279 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
8280 /* insert call to write barrier */
8284 dreg = alloc_preg (cfg);
8285 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8286 emit_write_barrier (cfg, ptr, sp [1], -1);
8289 store->flags |= ins_flag;
8296 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8297 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
8298 MonoInst *iargs [4];
8301 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8302 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8303 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
8304 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8305 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
8306 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8308 g_assert (costs > 0);
8310 cfg->real_offset += 5;
8314 inline_costs += costs;
8316 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
8320 if (sp [0]->type == STACK_VTYPE) {
8323 /* Have to compute the address of the variable */
8325 var = get_vreg_to_inst (cfg, sp [0]->dreg);
8327 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
8329 g_assert (var->klass == klass);
8331 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
8335 if (*ip == CEE_LDFLDA) {
8336 if (sp [0]->type == STACK_OBJ) {
8337 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
8338 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
8341 dreg = alloc_preg (cfg);
8343 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8344 ins->klass = mono_class_from_mono_type (field->type);
8345 ins->type = STACK_MP;
8350 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8352 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
8353 load->flags |= ins_flag;
8354 if (sp [0]->opcode != OP_LDADDR)
8355 load->flags |= MONO_INST_FAULT;
8366 MonoClassField *field;
8367 gpointer addr = NULL;
8368 gboolean is_special_static;
8371 token = read32 (ip + 1);
8373 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8374 field = mono_method_get_wrapper_data (method, token);
8375 klass = field->parent;
8378 field = mono_field_from_token (image, token, &klass, generic_context);
8381 mono_class_init (klass);
8382 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8383 FIELD_ACCESS_FAILURE;
8385 /* if the class is Critical then transparent code cannot access it's fields */
8386 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8387 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8390 * We can only support shared generic static
8391 * field access on architectures where the
8392 * trampoline code has been extended to handle
8393 * the generic class init.
8395 #ifndef MONO_ARCH_VTABLE_REG
8396 GENERIC_SHARING_FAILURE (*ip);
8399 if (cfg->generic_sharing_context)
8400 context_used = mono_class_check_context_used (klass);
8402 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
8404 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8405 * to be called here.
8407 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8408 mono_class_vtable (cfg->domain, klass);
8409 CHECK_TYPELOAD (klass);
8411 mono_domain_lock (cfg->domain);
8412 if (cfg->domain->special_static_fields)
8413 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8414 mono_domain_unlock (cfg->domain);
8416 is_special_static = mono_class_field_is_special_static (field);
8418 /* Generate IR to compute the field address */
8419 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
8421 * Fast access to TLS data
8422 * Inline version of get_thread_static_data () in
8426 int idx, static_data_reg, array_reg, dreg;
8427 MonoInst *thread_ins;
8429 // offset &= 0x7fffffff;
8430 // idx = (offset >> 24) - 1;
8431 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
8433 thread_ins = mono_get_thread_intrinsic (cfg);
8434 MONO_ADD_INS (cfg->cbb, thread_ins);
8435 static_data_reg = alloc_ireg (cfg);
8436 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
8438 if (cfg->compile_aot) {
8439 int offset_reg, offset2_reg, idx_reg;
8441 /* For TLS variables, this will return the TLS offset */
8442 EMIT_NEW_SFLDACONST (cfg, ins, field);
8443 offset_reg = ins->dreg;
8444 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
8445 idx_reg = alloc_ireg (cfg);
8446 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
8447 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
8448 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
8449 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
8450 array_reg = alloc_ireg (cfg);
8451 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
8452 offset2_reg = alloc_ireg (cfg);
8453 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
8454 dreg = alloc_ireg (cfg);
8455 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
8457 offset = (gsize)addr & 0x7fffffff;
8458 idx = (offset >> 24) - 1;
8460 array_reg = alloc_ireg (cfg);
8461 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
8462 dreg = alloc_ireg (cfg);
8463 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
8465 } else if ((cfg->opt & MONO_OPT_SHARED) ||
8466 (cfg->compile_aot && is_special_static) ||
8467 (context_used && is_special_static)) {
8468 MonoInst *iargs [2];
8470 g_assert (field->parent);
8471 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8473 iargs [1] = emit_get_rgctx_field (cfg, context_used,
8474 field, MONO_RGCTX_INFO_CLASS_FIELD);
8476 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8478 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8479 } else if (context_used) {
8480 MonoInst *static_data;
8483 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8484 method->klass->name_space, method->klass->name, method->name,
8485 depth, field->offset);
8488 if (mono_class_needs_cctor_run (klass, method))
8489 emit_generic_class_init (cfg, klass);
8492 * The pointer we're computing here is
8494 * super_info.static_data + field->offset
8496 static_data = emit_get_rgctx_klass (cfg, context_used,
8497 klass, MONO_RGCTX_INFO_STATIC_DATA);
8499 if (field->offset == 0) {
8502 int addr_reg = mono_alloc_preg (cfg);
8503 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8505 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8506 MonoInst *iargs [2];
8508 g_assert (field->parent);
8509 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8510 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8511 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8513 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8515 CHECK_TYPELOAD (klass);
8517 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8518 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8519 if (cfg->verbose_level > 2)
8520 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8521 class_inits = g_slist_prepend (class_inits, vtable);
8523 if (cfg->run_cctors) {
8525 /* This makes so that inline cannot trigger */
8526 /* .cctors: too many apps depend on them */
8527 /* running with a specific order... */
8528 if (! vtable->initialized)
8530 ex = mono_runtime_class_init_full (vtable, FALSE);
8532 set_exception_object (cfg, ex);
8533 goto exception_exit;
8537 addr = (char*)vtable->data + field->offset;
8539 if (cfg->compile_aot)
8540 EMIT_NEW_SFLDACONST (cfg, ins, field);
8542 EMIT_NEW_PCONST (cfg, ins, addr);
8544 MonoInst *iargs [1];
8545 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8546 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8550 /* Generate IR to do the actual load/store operation */
8552 if (*ip == CEE_LDSFLDA) {
8553 ins->klass = mono_class_from_mono_type (field->type);
8554 ins->type = STACK_PTR;
8556 } else if (*ip == CEE_STSFLD) {
8561 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
8562 store->flags |= ins_flag;
8564 gboolean is_const = FALSE;
8565 MonoVTable *vtable = NULL;
8567 if (!context_used) {
8568 vtable = mono_class_vtable (cfg->domain, klass);
8569 CHECK_TYPELOAD (klass);
8571 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8572 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8573 gpointer addr = (char*)vtable->data + field->offset;
8574 int ro_type = field->type->type;
8575 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
8576 ro_type = mono_class_enum_basetype (field->type->data.klass)->type;
8578 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8581 case MONO_TYPE_BOOLEAN:
8583 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8587 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8590 case MONO_TYPE_CHAR:
8592 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8596 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8601 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8605 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8611 case MONO_TYPE_FNPTR:
8612 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8613 type_to_eval_stack_type ((cfg), field->type, *sp);
8616 case MONO_TYPE_STRING:
8617 case MONO_TYPE_OBJECT:
8618 case MONO_TYPE_CLASS:
8619 case MONO_TYPE_SZARRAY:
8620 case MONO_TYPE_ARRAY:
8621 if (!mono_gc_is_moving ()) {
8622 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8623 type_to_eval_stack_type ((cfg), field->type, *sp);
8631 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8636 case MONO_TYPE_VALUETYPE:
8646 CHECK_STACK_OVF (1);
8648 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8649 load->flags |= ins_flag;
8662 token = read32 (ip + 1);
8663 klass = mini_get_class (method, token, generic_context);
8664 CHECK_TYPELOAD (klass);
8665 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8666 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8667 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
8668 generic_class_is_reference_type (cfg, klass)) {
8669 /* insert call to write barrier */
8670 emit_write_barrier (cfg, sp [0], sp [1], -1);
8682 const char *data_ptr;
8684 guint32 field_token;
8690 token = read32 (ip + 1);
8692 klass = mini_get_class (method, token, generic_context);
8693 CHECK_TYPELOAD (klass);
8695 if (cfg->generic_sharing_context)
8696 context_used = mono_class_check_context_used (klass);
8698 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
8699 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
8700 ins->sreg1 = sp [0]->dreg;
8701 ins->type = STACK_I4;
8702 ins->dreg = alloc_ireg (cfg);
8703 MONO_ADD_INS (cfg->cbb, ins);
8704 *sp = mono_decompose_opcode (cfg, ins);
8709 MonoClass *array_class = mono_array_class_get (klass, 1);
8710 /* FIXME: we cannot get a managed
8711 allocator because we can't get the
8712 open generic class's vtable. We
8713 have the same problem in
8714 handle_alloc(). This
8715 needs to be solved so that we can
8716 have managed allocs of shared
8719 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
8720 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
8722 MonoMethod *managed_alloc = NULL;
8724 /* FIXME: Decompose later to help abcrem */
8727 args [0] = emit_get_rgctx_klass (cfg, context_used,
8728 array_class, MONO_RGCTX_INFO_VTABLE);
8733 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
8735 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8737 if (cfg->opt & MONO_OPT_SHARED) {
8738 /* Decompose now to avoid problems with references to the domainvar */
8739 MonoInst *iargs [3];
8741 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8742 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8745 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8747 /* Decompose later since it is needed by abcrem */
8748 MonoClass *array_type = mono_array_class_get (klass, 1);
8749 mono_class_vtable (cfg->domain, array_type);
8750 CHECK_TYPELOAD (array_type);
8752 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8753 ins->dreg = alloc_preg (cfg);
8754 ins->sreg1 = sp [0]->dreg;
8755 ins->inst_newa_class = klass;
8756 ins->type = STACK_OBJ;
8758 MONO_ADD_INS (cfg->cbb, ins);
8759 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8760 cfg->cbb->has_array_access = TRUE;
8762 /* Needed so mono_emit_load_get_addr () gets called */
8763 mono_get_got_var (cfg);
8773 * we inline/optimize the initialization sequence if possible.
8774 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8775 * for small sizes open code the memcpy
8776 * ensure the rva field is big enough
8778 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8779 MonoMethod *memcpy_method = get_memcpy_method ();
8780 MonoInst *iargs [3];
8781 int add_reg = alloc_preg (cfg);
8783 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8784 if (cfg->compile_aot) {
8785 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8787 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8789 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8790 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8799 if (sp [0]->type != STACK_OBJ)
8802 dreg = alloc_preg (cfg);
8803 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8804 ins->dreg = alloc_preg (cfg);
8805 ins->sreg1 = sp [0]->dreg;
8806 ins->type = STACK_I4;
8807 /* This flag will be inherited by the decomposition */
8808 ins->flags |= MONO_INST_FAULT;
8809 MONO_ADD_INS (cfg->cbb, ins);
8810 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8811 cfg->cbb->has_array_access = TRUE;
8819 if (sp [0]->type != STACK_OBJ)
8822 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8824 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8825 CHECK_TYPELOAD (klass);
8826 /* we need to make sure that this array is exactly the type it needs
8827 * to be for correctness. the wrappers are lax with their usage
8828 * so we need to ignore them here
8830 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
8831 MonoClass *array_class = mono_array_class_get (klass, 1);
8832 mini_emit_check_array_type (cfg, sp [0], array_class);
8833 CHECK_TYPELOAD (array_class);
8837 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8852 case CEE_LDELEM_REF: {
8858 if (*ip == CEE_LDELEM) {
8860 token = read32 (ip + 1);
8861 klass = mini_get_class (method, token, generic_context);
8862 CHECK_TYPELOAD (klass);
8863 mono_class_init (klass);
8866 klass = array_access_to_klass (*ip);
8868 if (sp [0]->type != STACK_OBJ)
8871 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8873 if (sp [1]->opcode == OP_ICONST) {
8874 int array_reg = sp [0]->dreg;
8875 int index_reg = sp [1]->dreg;
8876 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8878 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8879 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8881 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8882 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8885 if (*ip == CEE_LDELEM)
8898 case CEE_STELEM_REF:
8905 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8907 if (*ip == CEE_STELEM) {
8909 token = read32 (ip + 1);
8910 klass = mini_get_class (method, token, generic_context);
8911 CHECK_TYPELOAD (klass);
8912 mono_class_init (klass);
8915 klass = array_access_to_klass (*ip);
8917 if (sp [0]->type != STACK_OBJ)
8920 /* storing a NULL doesn't need any of the complex checks in stelemref */
8921 if (generic_class_is_reference_type (cfg, klass) &&
8922 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8923 MonoMethod* helper = mono_marshal_get_stelemref ();
8924 MonoInst *iargs [3];
8926 if (sp [0]->type != STACK_OBJ)
8928 if (sp [2]->type != STACK_OBJ)
8935 mono_emit_method_call (cfg, helper, iargs, NULL);
8937 if (sp [1]->opcode == OP_ICONST) {
8938 int array_reg = sp [0]->dreg;
8939 int index_reg = sp [1]->dreg;
8940 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8942 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8943 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8945 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8946 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8950 if (*ip == CEE_STELEM)
8957 case CEE_CKFINITE: {
8961 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8962 ins->sreg1 = sp [0]->dreg;
8963 ins->dreg = alloc_freg (cfg);
8964 ins->type = STACK_R8;
8965 MONO_ADD_INS (bblock, ins);
8967 *sp++ = mono_decompose_opcode (cfg, ins);
8972 case CEE_REFANYVAL: {
8973 MonoInst *src_var, *src;
8975 int klass_reg = alloc_preg (cfg);
8976 int dreg = alloc_preg (cfg);
8979 MONO_INST_NEW (cfg, ins, *ip);
8982 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8983 CHECK_TYPELOAD (klass);
8984 mono_class_init (klass);
8986 if (cfg->generic_sharing_context)
8987 context_used = mono_class_check_context_used (klass);
8990 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8992 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8993 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8994 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8997 MonoInst *klass_ins;
8999 klass_ins = emit_get_rgctx_klass (cfg, context_used,
9000 klass, MONO_RGCTX_INFO_KLASS);
9003 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
9004 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
9006 mini_emit_class_check (cfg, klass_reg, klass);
9008 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
9009 ins->type = STACK_MP;
9014 case CEE_MKREFANY: {
9015 MonoInst *loc, *addr;
9018 MONO_INST_NEW (cfg, ins, *ip);
9021 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
9022 CHECK_TYPELOAD (klass);
9023 mono_class_init (klass);
9025 if (cfg->generic_sharing_context)
9026 context_used = mono_class_check_context_used (klass);
9028 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
9029 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
9032 MonoInst *const_ins;
9033 int type_reg = alloc_preg (cfg);
9035 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
9036 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
9037 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
9038 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
9039 } else if (cfg->compile_aot) {
9040 int const_reg = alloc_preg (cfg);
9041 int type_reg = alloc_preg (cfg);
9043 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
9044 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
9045 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
9046 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
9048 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
9049 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
9051 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
9053 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
9054 ins->type = STACK_VTYPE;
9055 ins->klass = mono_defaults.typed_reference_class;
9062 MonoClass *handle_class;
9064 CHECK_STACK_OVF (1);
9067 n = read32 (ip + 1);
9069 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
9070 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9071 handle = mono_method_get_wrapper_data (method, n);
9072 handle_class = mono_method_get_wrapper_data (method, n + 1);
9073 if (handle_class == mono_defaults.typehandle_class)
9074 handle = &((MonoClass*)handle)->byval_arg;
9077 handle = mono_ldtoken (image, n, &handle_class, generic_context);
9081 mono_class_init (handle_class);
9082 if (cfg->generic_sharing_context) {
9083 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
9084 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
9085 /* This case handles ldtoken
9086 of an open type, like for
9089 } else if (handle_class == mono_defaults.typehandle_class) {
9090 /* If we get a MONO_TYPE_CLASS
9091 then we need to provide the
9093 instantiation of it. */
9094 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
9097 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
9098 } else if (handle_class == mono_defaults.fieldhandle_class)
9099 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
9100 else if (handle_class == mono_defaults.methodhandle_class)
9101 context_used = mono_method_check_context_used (handle);
9103 g_assert_not_reached ();
9106 if ((cfg->opt & MONO_OPT_SHARED) &&
9107 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
9108 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
9109 MonoInst *addr, *vtvar, *iargs [3];
9110 int method_context_used;
9112 if (cfg->generic_sharing_context)
9113 method_context_used = mono_method_check_context_used (method);
9115 method_context_used = 0;
9117 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9119 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9120 EMIT_NEW_ICONST (cfg, iargs [1], n);
9121 if (method_context_used) {
9122 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
9123 method, MONO_RGCTX_INFO_METHOD);
9124 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
9126 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
9127 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
9129 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9131 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9133 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9135 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
9136 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
9137 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
9138 (cmethod->klass == mono_defaults.monotype_class->parent) &&
9139 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
9140 MonoClass *tclass = mono_class_from_mono_type (handle);
9142 mono_class_init (tclass);
9144 ins = emit_get_rgctx_klass (cfg, context_used,
9145 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
9146 } else if (cfg->compile_aot) {
9147 if (method->wrapper_type) {
9148 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
9149 /* Special case for static synchronized wrappers */
9150 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
9152 /* FIXME: n is not a normal token */
9153 cfg->disable_aot = TRUE;
9154 EMIT_NEW_PCONST (cfg, ins, NULL);
9157 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
9160 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
9162 ins->type = STACK_OBJ;
9163 ins->klass = cmethod->klass;
9166 MonoInst *addr, *vtvar;
9168 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9171 if (handle_class == mono_defaults.typehandle_class) {
9172 ins = emit_get_rgctx_klass (cfg, context_used,
9173 mono_class_from_mono_type (handle),
9174 MONO_RGCTX_INFO_TYPE);
9175 } else if (handle_class == mono_defaults.methodhandle_class) {
9176 ins = emit_get_rgctx_method (cfg, context_used,
9177 handle, MONO_RGCTX_INFO_METHOD);
9178 } else if (handle_class == mono_defaults.fieldhandle_class) {
9179 ins = emit_get_rgctx_field (cfg, context_used,
9180 handle, MONO_RGCTX_INFO_CLASS_FIELD);
9182 g_assert_not_reached ();
9184 } else if (cfg->compile_aot) {
9185 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
9187 EMIT_NEW_PCONST (cfg, ins, handle);
9189 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9190 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9191 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9201 MONO_INST_NEW (cfg, ins, OP_THROW);
9203 ins->sreg1 = sp [0]->dreg;
9205 bblock->out_of_line = TRUE;
9206 MONO_ADD_INS (bblock, ins);
9207 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
9208 MONO_ADD_INS (bblock, ins);
9211 link_bblock (cfg, bblock, end_bblock);
9212 start_new_bblock = 1;
9214 case CEE_ENDFINALLY:
9215 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
9216 MONO_ADD_INS (bblock, ins);
9218 start_new_bblock = 1;
9221 * Control will leave the method so empty the stack, otherwise
9222 * the next basic block will start with a nonempty stack.
9224 while (sp != stack_start) {
9232 if (*ip == CEE_LEAVE) {
9234 target = ip + 5 + (gint32)read32(ip + 1);
9237 target = ip + 2 + (signed char)(ip [1]);
9240 /* empty the stack */
9241 while (sp != stack_start) {
9246 * If this leave statement is in a catch block, check for a
9247 * pending exception, and rethrow it if necessary.
9248 * We avoid doing this in runtime invoke wrappers, since those are called
9249 * by native code which excepts the wrapper to catch all exceptions.
9251 for (i = 0; i < header->num_clauses; ++i) {
9252 MonoExceptionClause *clause = &header->clauses [i];
9255 * Use <= in the final comparison to handle clauses with multiple
9256 * leave statements, like in bug #78024.
9257 * The ordering of the exception clauses guarantees that we find the
9260 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
9262 MonoBasicBlock *dont_throw;
9267 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
9270 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
9272 NEW_BBLOCK (cfg, dont_throw);
9275 * Currently, we allways rethrow the abort exception, despite the
9276 * fact that this is not correct. See thread6.cs for an example.
9277 * But propagating the abort exception is more important than
9278 * getting the sematics right.
9280 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
9281 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
9282 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
9284 MONO_START_BB (cfg, dont_throw);
9289 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
9291 MonoExceptionClause *clause;
9293 for (tmp = handlers; tmp; tmp = tmp->next) {
9295 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
9297 link_bblock (cfg, bblock, tblock);
9298 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
9299 ins->inst_target_bb = tblock;
9300 ins->inst_eh_block = clause;
9301 MONO_ADD_INS (bblock, ins);
9302 bblock->has_call_handler = 1;
9303 if (COMPILE_LLVM (cfg)) {
9304 MonoBasicBlock *target_bb;
9307 * Link the finally bblock with the target, since it will
9308 * conceptually branch there.
9309 * FIXME: Have to link the bblock containing the endfinally.
9311 GET_BBLOCK (cfg, target_bb, target);
9312 link_bblock (cfg, tblock, target_bb);
9315 g_list_free (handlers);
9318 MONO_INST_NEW (cfg, ins, OP_BR);
9319 MONO_ADD_INS (bblock, ins);
9320 GET_BBLOCK (cfg, tblock, target);
9321 link_bblock (cfg, bblock, tblock);
9322 ins->inst_target_bb = tblock;
9323 start_new_bblock = 1;
9325 if (*ip == CEE_LEAVE)
9334 * Mono specific opcodes
9336 case MONO_CUSTOM_PREFIX: {
9338 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
9342 case CEE_MONO_ICALL: {
9344 MonoJitICallInfo *info;
9346 token = read32 (ip + 2);
9347 func = mono_method_get_wrapper_data (method, token);
9348 info = mono_find_jit_icall_by_addr (func);
9351 CHECK_STACK (info->sig->param_count);
9352 sp -= info->sig->param_count;
9354 ins = mono_emit_jit_icall (cfg, info->func, sp);
9355 if (!MONO_TYPE_IS_VOID (info->sig->ret))
9359 inline_costs += 10 * num_calls++;
9363 case CEE_MONO_LDPTR: {
9366 CHECK_STACK_OVF (1);
9368 token = read32 (ip + 2);
9370 ptr = mono_method_get_wrapper_data (method, token);
9371 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
9372 MonoJitICallInfo *callinfo;
9373 const char *icall_name;
9375 icall_name = method->name + strlen ("__icall_wrapper_");
9376 g_assert (icall_name);
9377 callinfo = mono_find_jit_icall_by_name (icall_name);
9378 g_assert (callinfo);
9380 if (ptr == callinfo->func) {
9381 /* Will be transformed into an AOTCONST later */
9382 EMIT_NEW_PCONST (cfg, ins, ptr);
9388 /* FIXME: Generalize this */
9389 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
9390 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
9395 EMIT_NEW_PCONST (cfg, ins, ptr);
9398 inline_costs += 10 * num_calls++;
9399 /* Can't embed random pointers into AOT code */
9400 cfg->disable_aot = 1;
9403 case CEE_MONO_ICALL_ADDR: {
9404 MonoMethod *cmethod;
9407 CHECK_STACK_OVF (1);
9409 token = read32 (ip + 2);
9411 cmethod = mono_method_get_wrapper_data (method, token);
9413 if (cfg->compile_aot) {
9414 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
9416 ptr = mono_lookup_internal_call (cmethod);
9418 EMIT_NEW_PCONST (cfg, ins, ptr);
9424 case CEE_MONO_VTADDR: {
9425 MonoInst *src_var, *src;
9431 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9432 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
9437 case CEE_MONO_NEWOBJ: {
9438 MonoInst *iargs [2];
9440 CHECK_STACK_OVF (1);
9442 token = read32 (ip + 2);
9443 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9444 mono_class_init (klass);
9445 NEW_DOMAINCONST (cfg, iargs [0]);
9446 MONO_ADD_INS (cfg->cbb, iargs [0]);
9447 NEW_CLASSCONST (cfg, iargs [1], klass);
9448 MONO_ADD_INS (cfg->cbb, iargs [1]);
9449 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
9451 inline_costs += 10 * num_calls++;
9454 case CEE_MONO_OBJADDR:
9457 MONO_INST_NEW (cfg, ins, OP_MOVE);
9458 ins->dreg = alloc_preg (cfg);
9459 ins->sreg1 = sp [0]->dreg;
9460 ins->type = STACK_MP;
9461 MONO_ADD_INS (cfg->cbb, ins);
9465 case CEE_MONO_LDNATIVEOBJ:
9467 * Similar to LDOBJ, but instead load the unmanaged
9468 * representation of the vtype to the stack.
9473 token = read32 (ip + 2);
9474 klass = mono_method_get_wrapper_data (method, token);
9475 g_assert (klass->valuetype);
9476 mono_class_init (klass);
9479 MonoInst *src, *dest, *temp;
9482 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
9483 temp->backend.is_pinvoke = 1;
9484 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
9485 mini_emit_stobj (cfg, dest, src, klass, TRUE);
9487 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
9488 dest->type = STACK_VTYPE;
9489 dest->klass = klass;
9495 case CEE_MONO_RETOBJ: {
9497 * Same as RET, but return the native representation of a vtype
9500 g_assert (cfg->ret);
9501 g_assert (mono_method_signature (method)->pinvoke);
9506 token = read32 (ip + 2);
9507 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9509 if (!cfg->vret_addr) {
9510 g_assert (cfg->ret_var_is_local);
9512 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
9514 EMIT_NEW_RETLOADA (cfg, ins);
9516 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
9518 if (sp != stack_start)
9521 MONO_INST_NEW (cfg, ins, OP_BR);
9522 ins->inst_target_bb = end_bblock;
9523 MONO_ADD_INS (bblock, ins);
9524 link_bblock (cfg, bblock, end_bblock);
9525 start_new_bblock = 1;
9529 case CEE_MONO_CISINST:
9530 case CEE_MONO_CCASTCLASS: {
9535 token = read32 (ip + 2);
9536 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9537 if (ip [1] == CEE_MONO_CISINST)
9538 ins = handle_cisinst (cfg, klass, sp [0]);
9540 ins = handle_ccastclass (cfg, klass, sp [0]);
9546 case CEE_MONO_SAVE_LMF:
9547 case CEE_MONO_RESTORE_LMF:
9548 #ifdef MONO_ARCH_HAVE_LMF_OPS
9549 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9550 MONO_ADD_INS (bblock, ins);
9551 cfg->need_lmf_area = TRUE;
9555 case CEE_MONO_CLASSCONST:
9556 CHECK_STACK_OVF (1);
9558 token = read32 (ip + 2);
9559 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9562 inline_costs += 10 * num_calls++;
9564 case CEE_MONO_NOT_TAKEN:
9565 bblock->out_of_line = TRUE;
9569 CHECK_STACK_OVF (1);
9571 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9572 ins->dreg = alloc_preg (cfg);
9573 ins->inst_offset = (gint32)read32 (ip + 2);
9574 ins->type = STACK_PTR;
9575 MONO_ADD_INS (bblock, ins);
9579 case CEE_MONO_DYN_CALL: {
9582 /* It would be easier to call a trampoline, but that would put an
9583 * extra frame on the stack, confusing exception handling. So
9584 * implement it inline using an opcode for now.
9587 if (!cfg->dyn_call_var) {
9588 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9589 /* prevent it from being register allocated */
9590 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
9593 /* Has to use a call inst since it local regalloc expects it */
9594 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
9595 ins = (MonoInst*)call;
9597 ins->sreg1 = sp [0]->dreg;
9598 ins->sreg2 = sp [1]->dreg;
9599 MONO_ADD_INS (bblock, ins);
9601 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
9602 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
9606 inline_costs += 10 * num_calls++;
9611 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9621 /* somewhat similar to LDTOKEN */
9622 MonoInst *addr, *vtvar;
9623 CHECK_STACK_OVF (1);
9624 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9626 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9627 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9629 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9630 ins->type = STACK_VTYPE;
9631 ins->klass = mono_defaults.argumenthandle_class;
9644 * The following transforms:
9645 * CEE_CEQ into OP_CEQ
9646 * CEE_CGT into OP_CGT
9647 * CEE_CGT_UN into OP_CGT_UN
9648 * CEE_CLT into OP_CLT
9649 * CEE_CLT_UN into OP_CLT_UN
9651 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9653 MONO_INST_NEW (cfg, ins, cmp->opcode);
9655 cmp->sreg1 = sp [0]->dreg;
9656 cmp->sreg2 = sp [1]->dreg;
9657 type_from_op (cmp, sp [0], sp [1]);
9659 if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9660 cmp->opcode = OP_LCOMPARE;
9661 else if (sp [0]->type == STACK_R8)
9662 cmp->opcode = OP_FCOMPARE;
9664 cmp->opcode = OP_ICOMPARE;
9665 MONO_ADD_INS (bblock, cmp);
9666 ins->type = STACK_I4;
9667 ins->dreg = alloc_dreg (cfg, ins->type);
9668 type_from_op (ins, sp [0], sp [1]);
9670 if (cmp->opcode == OP_FCOMPARE) {
9672 * The backends expect the fceq opcodes to do the
9675 cmp->opcode = OP_NOP;
9676 ins->sreg1 = cmp->sreg1;
9677 ins->sreg2 = cmp->sreg2;
9679 MONO_ADD_INS (bblock, ins);
9686 MonoMethod *cil_method;
9687 gboolean needs_static_rgctx_invoke;
9689 CHECK_STACK_OVF (1);
9691 n = read32 (ip + 2);
9692 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9693 if (!cmethod || mono_loader_get_last_error ())
9695 mono_class_init (cmethod->klass);
9697 mono_save_token_info (cfg, image, n, cmethod);
9699 if (cfg->generic_sharing_context)
9700 context_used = mono_method_check_context_used (cmethod);
9702 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9704 cil_method = cmethod;
9705 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9706 METHOD_ACCESS_FAILURE;
9708 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9709 if (check_linkdemand (cfg, method, cmethod))
9711 CHECK_CFG_EXCEPTION;
9712 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9713 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9717 * Optimize the common case of ldftn+delegate creation
9719 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9720 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9721 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9722 MonoInst *target_ins;
9724 int invoke_context_used = 0;
9726 invoke = mono_get_delegate_invoke (ctor_method->klass);
9727 if (!invoke || !mono_method_signature (invoke))
9730 if (cfg->generic_sharing_context)
9731 invoke_context_used = mono_method_check_context_used (invoke);
9733 target_ins = sp [-1];
9735 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
9736 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
9737 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
9738 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
9739 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
9743 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
9744 /* FIXME: SGEN support */
9745 if (invoke_context_used == 0) {
9747 if (cfg->verbose_level > 3)
9748 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9750 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
9751 CHECK_CFG_EXCEPTION;
9760 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
9761 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
9765 inline_costs += 10 * num_calls++;
9768 case CEE_LDVIRTFTN: {
9773 n = read32 (ip + 2);
9774 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9775 if (!cmethod || mono_loader_get_last_error ())
9777 mono_class_init (cmethod->klass);
9779 if (cfg->generic_sharing_context)
9780 context_used = mono_method_check_context_used (cmethod);
9782 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9783 if (check_linkdemand (cfg, method, cmethod))
9785 CHECK_CFG_EXCEPTION;
9786 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9787 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9793 args [1] = emit_get_rgctx_method (cfg, context_used,
9794 cmethod, MONO_RGCTX_INFO_METHOD);
9797 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
9799 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
9802 inline_costs += 10 * num_calls++;
9806 CHECK_STACK_OVF (1);
9808 n = read16 (ip + 2);
9810 EMIT_NEW_ARGLOAD (cfg, ins, n);
9815 CHECK_STACK_OVF (1);
9817 n = read16 (ip + 2);
9819 NEW_ARGLOADA (cfg, ins, n);
9820 MONO_ADD_INS (cfg->cbb, ins);
9828 n = read16 (ip + 2);
9830 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9832 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9836 CHECK_STACK_OVF (1);
9838 n = read16 (ip + 2);
9840 EMIT_NEW_LOCLOAD (cfg, ins, n);
9845 unsigned char *tmp_ip;
9846 CHECK_STACK_OVF (1);
9848 n = read16 (ip + 2);
9851 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9857 EMIT_NEW_LOCLOADA (cfg, ins, n);
9866 n = read16 (ip + 2);
9868 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9870 emit_stloc_ir (cfg, sp, header, n);
9877 if (sp != stack_start)
9879 if (cfg->method != method)
9881 * Inlining this into a loop in a parent could lead to
9882 * stack overflows which is different behavior than the
9883 * non-inlined case, thus disable inlining in this case.
9885 goto inline_failure;
9887 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9888 ins->dreg = alloc_preg (cfg);
9889 ins->sreg1 = sp [0]->dreg;
9890 ins->type = STACK_PTR;
9891 MONO_ADD_INS (cfg->cbb, ins);
9893 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9895 ins->flags |= MONO_INST_INIT;
9900 case CEE_ENDFILTER: {
9901 MonoExceptionClause *clause, *nearest;
9902 int cc, nearest_num;
9906 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9908 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9909 ins->sreg1 = (*sp)->dreg;
9910 MONO_ADD_INS (bblock, ins);
9911 start_new_bblock = 1;
9916 for (cc = 0; cc < header->num_clauses; ++cc) {
9917 clause = &header->clauses [cc];
9918 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9919 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9920 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9926 if ((ip - header->code) != nearest->handler_offset)
9931 case CEE_UNALIGNED_:
9932 ins_flag |= MONO_INST_UNALIGNED;
9933 /* FIXME: record alignment? we can assume 1 for now */
9938 ins_flag |= MONO_INST_VOLATILE;
9942 ins_flag |= MONO_INST_TAILCALL;
9943 cfg->flags |= MONO_CFG_HAS_TAIL;
9944 /* Can't inline tail calls at this time */
9945 inline_costs += 100000;
9952 token = read32 (ip + 2);
9953 klass = mini_get_class (method, token, generic_context);
9954 CHECK_TYPELOAD (klass);
9955 if (generic_class_is_reference_type (cfg, klass))
9956 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9958 mini_emit_initobj (cfg, *sp, NULL, klass);
9962 case CEE_CONSTRAINED_:
9964 token = read32 (ip + 2);
9965 if (method->wrapper_type != MONO_WRAPPER_NONE)
9966 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
9968 constrained_call = mono_class_get_full (image, token, generic_context);
9969 CHECK_TYPELOAD (constrained_call);
9974 MonoInst *iargs [3];
9978 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9979 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9980 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9981 /* emit_memset only works when val == 0 */
9982 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9987 if (ip [1] == CEE_CPBLK) {
9988 MonoMethod *memcpy_method = get_memcpy_method ();
9989 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9991 MonoMethod *memset_method = get_memset_method ();
9992 mono_emit_method_call (cfg, memset_method, iargs, NULL);
10002 ins_flag |= MONO_INST_NOTYPECHECK;
10004 ins_flag |= MONO_INST_NORANGECHECK;
10005 /* we ignore the no-nullcheck for now since we
10006 * really do it explicitly only when doing callvirt->call
10010 case CEE_RETHROW: {
10012 int handler_offset = -1;
10014 for (i = 0; i < header->num_clauses; ++i) {
10015 MonoExceptionClause *clause = &header->clauses [i];
10016 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
10017 handler_offset = clause->handler_offset;
10022 bblock->flags |= BB_EXCEPTION_UNSAFE;
10024 g_assert (handler_offset != -1);
10026 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
10027 MONO_INST_NEW (cfg, ins, OP_RETHROW);
10028 ins->sreg1 = load->dreg;
10029 MONO_ADD_INS (bblock, ins);
10031 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
10032 MONO_ADD_INS (bblock, ins);
10035 link_bblock (cfg, bblock, end_bblock);
10036 start_new_bblock = 1;
10044 CHECK_STACK_OVF (1);
10046 token = read32 (ip + 2);
10047 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic) {
10048 MonoType *type = mono_type_create_from_typespec (image, token);
10049 token = mono_type_size (type, &ialign);
10051 MonoClass *klass = mono_class_get_full (image, token, generic_context);
10052 CHECK_TYPELOAD (klass);
10053 mono_class_init (klass);
10054 token = mono_class_value_size (klass, &align);
10056 EMIT_NEW_ICONST (cfg, ins, token);
10061 case CEE_REFANYTYPE: {
10062 MonoInst *src_var, *src;
10068 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10070 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
10071 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
10072 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
10077 case CEE_READONLY_:
10090 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
10100 g_warning ("opcode 0x%02x not handled", *ip);
10104 if (start_new_bblock != 1)
10107 bblock->cil_length = ip - bblock->cil_code;
10108 bblock->next_bb = end_bblock;
10110 if (cfg->method == method && cfg->domainvar) {
10112 MonoInst *get_domain;
10114 cfg->cbb = init_localsbb;
10116 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
10117 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
10120 get_domain->dreg = alloc_preg (cfg);
10121 MONO_ADD_INS (cfg->cbb, get_domain);
10123 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
10124 MONO_ADD_INS (cfg->cbb, store);
10127 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
10128 if (cfg->compile_aot)
10129 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
10130 mono_get_got_var (cfg);
10133 if (cfg->method == method && cfg->got_var)
10134 mono_emit_load_got_addr (cfg);
10139 cfg->cbb = init_localsbb;
10141 for (i = 0; i < header->num_locals; ++i) {
10142 MonoType *ptype = header->locals [i];
10143 int t = ptype->type;
10144 dreg = cfg->locals [i]->dreg;
10146 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
10147 t = mono_class_enum_basetype (ptype->data.klass)->type;
10148 if (ptype->byref) {
10149 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10150 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
10151 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
10152 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
10153 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
10154 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
10155 MONO_INST_NEW (cfg, ins, OP_R8CONST);
10156 ins->type = STACK_R8;
10157 ins->inst_p0 = (void*)&r8_0;
10158 ins->dreg = alloc_dreg (cfg, STACK_R8);
10159 MONO_ADD_INS (init_localsbb, ins);
10160 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
10161 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
10162 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
10163 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
10165 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10170 if (cfg->init_ref_vars && cfg->method == method) {
10171 /* Emit initialization for ref vars */
10172 // FIXME: Avoid duplication initialization for IL locals.
10173 for (i = 0; i < cfg->num_varinfo; ++i) {
10174 MonoInst *ins = cfg->varinfo [i];
10176 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
10177 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
10181 /* Add a sequence point for method entry/exit events */
10183 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
10184 MONO_ADD_INS (init_localsbb, ins);
10185 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
10186 MONO_ADD_INS (cfg->bb_exit, ins);
10191 if (cfg->method == method) {
10192 MonoBasicBlock *bb;
10193 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10194 bb->region = mono_find_block_region (cfg, bb->real_offset);
10196 mono_create_spvar_for_region (cfg, bb->region);
10197 if (cfg->verbose_level > 2)
10198 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
10202 g_slist_free (class_inits);
10203 dont_inline = g_list_remove (dont_inline, method);
10205 if (inline_costs < 0) {
10208 /* Method is too large */
10209 mname = mono_method_full_name (method, TRUE);
10210 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
10211 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
10213 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10214 mono_basic_block_free (original_bb);
10218 if ((cfg->verbose_level > 2) && (cfg->method == method))
10219 mono_print_code (cfg, "AFTER METHOD-TO-IR");
10221 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10222 mono_basic_block_free (original_bb);
10223 return inline_costs;
10226 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
10233 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
10237 set_exception_type_from_invalid_il (cfg, method, ip);
10241 g_slist_free (class_inits);
10242 mono_basic_block_free (original_bb);
10243 dont_inline = g_list_remove (dont_inline, method);
10244 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10249 store_membase_reg_to_store_membase_imm (int opcode)
10252 case OP_STORE_MEMBASE_REG:
10253 return OP_STORE_MEMBASE_IMM;
10254 case OP_STOREI1_MEMBASE_REG:
10255 return OP_STOREI1_MEMBASE_IMM;
10256 case OP_STOREI2_MEMBASE_REG:
10257 return OP_STOREI2_MEMBASE_IMM;
10258 case OP_STOREI4_MEMBASE_REG:
10259 return OP_STOREI4_MEMBASE_IMM;
10260 case OP_STOREI8_MEMBASE_REG:
10261 return OP_STOREI8_MEMBASE_IMM;
10263 g_assert_not_reached ();
10269 #endif /* DISABLE_JIT */
10272 mono_op_to_op_imm (int opcode)
10276 return OP_IADD_IMM;
10278 return OP_ISUB_IMM;
10280 return OP_IDIV_IMM;
10282 return OP_IDIV_UN_IMM;
10284 return OP_IREM_IMM;
10286 return OP_IREM_UN_IMM;
10288 return OP_IMUL_IMM;
10290 return OP_IAND_IMM;
10294 return OP_IXOR_IMM;
10296 return OP_ISHL_IMM;
10298 return OP_ISHR_IMM;
10300 return OP_ISHR_UN_IMM;
10303 return OP_LADD_IMM;
10305 return OP_LSUB_IMM;
10307 return OP_LAND_IMM;
10311 return OP_LXOR_IMM;
10313 return OP_LSHL_IMM;
10315 return OP_LSHR_IMM;
10317 return OP_LSHR_UN_IMM;
10320 return OP_COMPARE_IMM;
10322 return OP_ICOMPARE_IMM;
10324 return OP_LCOMPARE_IMM;
10326 case OP_STORE_MEMBASE_REG:
10327 return OP_STORE_MEMBASE_IMM;
10328 case OP_STOREI1_MEMBASE_REG:
10329 return OP_STOREI1_MEMBASE_IMM;
10330 case OP_STOREI2_MEMBASE_REG:
10331 return OP_STOREI2_MEMBASE_IMM;
10332 case OP_STOREI4_MEMBASE_REG:
10333 return OP_STOREI4_MEMBASE_IMM;
10335 #if defined(TARGET_X86) || defined (TARGET_AMD64)
10337 return OP_X86_PUSH_IMM;
10338 case OP_X86_COMPARE_MEMBASE_REG:
10339 return OP_X86_COMPARE_MEMBASE_IMM;
10341 #if defined(TARGET_AMD64)
10342 case OP_AMD64_ICOMPARE_MEMBASE_REG:
10343 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10345 case OP_VOIDCALL_REG:
10346 return OP_VOIDCALL;
10354 return OP_LOCALLOC_IMM;
10361 ldind_to_load_membase (int opcode)
10365 return OP_LOADI1_MEMBASE;
10367 return OP_LOADU1_MEMBASE;
10369 return OP_LOADI2_MEMBASE;
10371 return OP_LOADU2_MEMBASE;
10373 return OP_LOADI4_MEMBASE;
10375 return OP_LOADU4_MEMBASE;
10377 return OP_LOAD_MEMBASE;
10378 case CEE_LDIND_REF:
10379 return OP_LOAD_MEMBASE;
10381 return OP_LOADI8_MEMBASE;
10383 return OP_LOADR4_MEMBASE;
10385 return OP_LOADR8_MEMBASE;
10387 g_assert_not_reached ();
10394 stind_to_store_membase (int opcode)
10398 return OP_STOREI1_MEMBASE_REG;
10400 return OP_STOREI2_MEMBASE_REG;
10402 return OP_STOREI4_MEMBASE_REG;
10404 case CEE_STIND_REF:
10405 return OP_STORE_MEMBASE_REG;
10407 return OP_STOREI8_MEMBASE_REG;
10409 return OP_STORER4_MEMBASE_REG;
10411 return OP_STORER8_MEMBASE_REG;
10413 g_assert_not_reached ();
10420 mono_load_membase_to_load_mem (int opcode)
10422 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
10423 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10425 case OP_LOAD_MEMBASE:
10426 return OP_LOAD_MEM;
10427 case OP_LOADU1_MEMBASE:
10428 return OP_LOADU1_MEM;
10429 case OP_LOADU2_MEMBASE:
10430 return OP_LOADU2_MEM;
10431 case OP_LOADI4_MEMBASE:
10432 return OP_LOADI4_MEM;
10433 case OP_LOADU4_MEMBASE:
10434 return OP_LOADU4_MEM;
10435 #if SIZEOF_REGISTER == 8
10436 case OP_LOADI8_MEMBASE:
10437 return OP_LOADI8_MEM;
10446 op_to_op_dest_membase (int store_opcode, int opcode)
10448 #if defined(TARGET_X86)
10449 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
10454 return OP_X86_ADD_MEMBASE_REG;
10456 return OP_X86_SUB_MEMBASE_REG;
10458 return OP_X86_AND_MEMBASE_REG;
10460 return OP_X86_OR_MEMBASE_REG;
10462 return OP_X86_XOR_MEMBASE_REG;
10465 return OP_X86_ADD_MEMBASE_IMM;
10468 return OP_X86_SUB_MEMBASE_IMM;
10471 return OP_X86_AND_MEMBASE_IMM;
10474 return OP_X86_OR_MEMBASE_IMM;
10477 return OP_X86_XOR_MEMBASE_IMM;
10483 #if defined(TARGET_AMD64)
10484 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
10489 return OP_X86_ADD_MEMBASE_REG;
10491 return OP_X86_SUB_MEMBASE_REG;
10493 return OP_X86_AND_MEMBASE_REG;
10495 return OP_X86_OR_MEMBASE_REG;
10497 return OP_X86_XOR_MEMBASE_REG;
10499 return OP_X86_ADD_MEMBASE_IMM;
10501 return OP_X86_SUB_MEMBASE_IMM;
10503 return OP_X86_AND_MEMBASE_IMM;
10505 return OP_X86_OR_MEMBASE_IMM;
10507 return OP_X86_XOR_MEMBASE_IMM;
10509 return OP_AMD64_ADD_MEMBASE_REG;
10511 return OP_AMD64_SUB_MEMBASE_REG;
10513 return OP_AMD64_AND_MEMBASE_REG;
10515 return OP_AMD64_OR_MEMBASE_REG;
10517 return OP_AMD64_XOR_MEMBASE_REG;
10520 return OP_AMD64_ADD_MEMBASE_IMM;
10523 return OP_AMD64_SUB_MEMBASE_IMM;
10526 return OP_AMD64_AND_MEMBASE_IMM;
10529 return OP_AMD64_OR_MEMBASE_IMM;
10532 return OP_AMD64_XOR_MEMBASE_IMM;
10542 op_to_op_store_membase (int store_opcode, int opcode)
10544 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10547 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10548 return OP_X86_SETEQ_MEMBASE;
10550 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10551 return OP_X86_SETNE_MEMBASE;
10559 op_to_op_src1_membase (int load_opcode, int opcode)
10562 /* FIXME: This has sign extension issues */
10564 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10565 return OP_X86_COMPARE_MEMBASE8_IMM;
10568 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10573 return OP_X86_PUSH_MEMBASE;
10574 case OP_COMPARE_IMM:
10575 case OP_ICOMPARE_IMM:
10576 return OP_X86_COMPARE_MEMBASE_IMM;
10579 return OP_X86_COMPARE_MEMBASE_REG;
10583 #ifdef TARGET_AMD64
10584 /* FIXME: This has sign extension issues */
10586 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10587 return OP_X86_COMPARE_MEMBASE8_IMM;
10592 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10593 return OP_X86_PUSH_MEMBASE;
10595 /* FIXME: This only works for 32 bit immediates
10596 case OP_COMPARE_IMM:
10597 case OP_LCOMPARE_IMM:
10598 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10599 return OP_AMD64_COMPARE_MEMBASE_IMM;
10601 case OP_ICOMPARE_IMM:
10602 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10603 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10607 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10608 return OP_AMD64_COMPARE_MEMBASE_REG;
10611 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10612 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10621 op_to_op_src2_membase (int load_opcode, int opcode)
10624 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10630 return OP_X86_COMPARE_REG_MEMBASE;
10632 return OP_X86_ADD_REG_MEMBASE;
10634 return OP_X86_SUB_REG_MEMBASE;
10636 return OP_X86_AND_REG_MEMBASE;
10638 return OP_X86_OR_REG_MEMBASE;
10640 return OP_X86_XOR_REG_MEMBASE;
10644 #ifdef TARGET_AMD64
10645 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
10648 return OP_AMD64_ICOMPARE_REG_MEMBASE;
10650 return OP_X86_ADD_REG_MEMBASE;
10652 return OP_X86_SUB_REG_MEMBASE;
10654 return OP_X86_AND_REG_MEMBASE;
10656 return OP_X86_OR_REG_MEMBASE;
10658 return OP_X86_XOR_REG_MEMBASE;
10660 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
10664 return OP_AMD64_COMPARE_REG_MEMBASE;
10666 return OP_AMD64_ADD_REG_MEMBASE;
10668 return OP_AMD64_SUB_REG_MEMBASE;
10670 return OP_AMD64_AND_REG_MEMBASE;
10672 return OP_AMD64_OR_REG_MEMBASE;
10674 return OP_AMD64_XOR_REG_MEMBASE;
10683 mono_op_to_op_imm_noemul (int opcode)
10686 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10692 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10700 return mono_op_to_op_imm (opcode);
10704 #ifndef DISABLE_JIT
10707 * mono_handle_global_vregs:
10709 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10713 mono_handle_global_vregs (MonoCompile *cfg)
10715 gint32 *vreg_to_bb;
10716 MonoBasicBlock *bb;
10719 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
10721 #ifdef MONO_ARCH_SIMD_INTRINSICS
10722 if (cfg->uses_simd_intrinsics)
10723 mono_simd_simplify_indirection (cfg);
10726 /* Find local vregs used in more than one bb */
10727 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10728 MonoInst *ins = bb->code;
10729 int block_num = bb->block_num;
10731 if (cfg->verbose_level > 2)
10732 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
10735 for (; ins; ins = ins->next) {
10736 const char *spec = INS_INFO (ins->opcode);
10737 int regtype = 0, regindex;
10740 if (G_UNLIKELY (cfg->verbose_level > 2))
10741 mono_print_ins (ins);
10743 g_assert (ins->opcode >= MONO_CEE_LAST);
10745 for (regindex = 0; regindex < 4; regindex ++) {
10748 if (regindex == 0) {
10749 regtype = spec [MONO_INST_DEST];
10750 if (regtype == ' ')
10753 } else if (regindex == 1) {
10754 regtype = spec [MONO_INST_SRC1];
10755 if (regtype == ' ')
10758 } else if (regindex == 2) {
10759 regtype = spec [MONO_INST_SRC2];
10760 if (regtype == ' ')
10763 } else if (regindex == 3) {
10764 regtype = spec [MONO_INST_SRC3];
10765 if (regtype == ' ')
10770 #if SIZEOF_REGISTER == 4
10771 /* In the LLVM case, the long opcodes are not decomposed */
10772 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
10774 * Since some instructions reference the original long vreg,
10775 * and some reference the two component vregs, it is quite hard
10776 * to determine when it needs to be global. So be conservative.
10778 if (!get_vreg_to_inst (cfg, vreg)) {
10779 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10781 if (cfg->verbose_level > 2)
10782 printf ("LONG VREG R%d made global.\n", vreg);
10786 * Make the component vregs volatile since the optimizations can
10787 * get confused otherwise.
10789 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
10790 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
10794 g_assert (vreg != -1);
10796 prev_bb = vreg_to_bb [vreg];
10797 if (prev_bb == 0) {
10798 /* 0 is a valid block num */
10799 vreg_to_bb [vreg] = block_num + 1;
10800 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
10801 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
10804 if (!get_vreg_to_inst (cfg, vreg)) {
10805 if (G_UNLIKELY (cfg->verbose_level > 2))
10806 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
10810 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
10813 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10816 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
10819 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
10822 g_assert_not_reached ();
10826 /* Flag as having been used in more than one bb */
10827 vreg_to_bb [vreg] = -1;
10833 /* If a variable is used in only one bblock, convert it into a local vreg */
10834 for (i = 0; i < cfg->num_varinfo; i++) {
10835 MonoInst *var = cfg->varinfo [i];
10836 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
10838 switch (var->type) {
10844 #if SIZEOF_REGISTER == 8
10847 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
10848 /* Enabling this screws up the fp stack on x86 */
10851 /* Arguments are implicitly global */
10852 /* Putting R4 vars into registers doesn't work currently */
10853 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
10855 * Make that the variable's liveness interval doesn't contain a call, since
10856 * that would cause the lvreg to be spilled, making the whole optimization
10859 /* This is too slow for JIT compilation */
10861 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
10863 int def_index, call_index, ins_index;
10864 gboolean spilled = FALSE;
10869 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
10870 const char *spec = INS_INFO (ins->opcode);
10872 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
10873 def_index = ins_index;
10875 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
10876 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
10877 if (call_index > def_index) {
10883 if (MONO_IS_CALL (ins))
10884 call_index = ins_index;
10894 if (G_UNLIKELY (cfg->verbose_level > 2))
10895 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10896 var->flags |= MONO_INST_IS_DEAD;
10897 cfg->vreg_to_inst [var->dreg] = NULL;
10904 * Compress the varinfo and vars tables so the liveness computation is faster and
10905 * takes up less space.
10908 for (i = 0; i < cfg->num_varinfo; ++i) {
10909 MonoInst *var = cfg->varinfo [i];
10910 if (pos < i && cfg->locals_start == i)
10911 cfg->locals_start = pos;
10912 if (!(var->flags & MONO_INST_IS_DEAD)) {
10914 cfg->varinfo [pos] = cfg->varinfo [i];
10915 cfg->varinfo [pos]->inst_c0 = pos;
10916 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10917 cfg->vars [pos].idx = pos;
10918 #if SIZEOF_REGISTER == 4
10919 if (cfg->varinfo [pos]->type == STACK_I8) {
10920 /* Modify the two component vars too */
10923 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10924 var1->inst_c0 = pos;
10925 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10926 var1->inst_c0 = pos;
10933 cfg->num_varinfo = pos;
10934 if (cfg->locals_start > cfg->num_varinfo)
10935 cfg->locals_start = cfg->num_varinfo;
10939 * mono_spill_global_vars:
10941 * Generate spill code for variables which are not allocated to registers,
10942 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10943 * code is generated which could be optimized by the local optimization passes.
10946 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10948 MonoBasicBlock *bb;
10950 int orig_next_vreg;
10951 guint32 *vreg_to_lvreg;
10953 guint32 i, lvregs_len;
10954 gboolean dest_has_lvreg = FALSE;
10955 guint32 stacktypes [128];
10956 MonoInst **live_range_start, **live_range_end;
10957 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
10959 *need_local_opts = FALSE;
10961 memset (spec2, 0, sizeof (spec2));
10963 /* FIXME: Move this function to mini.c */
10964 stacktypes ['i'] = STACK_PTR;
10965 stacktypes ['l'] = STACK_I8;
10966 stacktypes ['f'] = STACK_R8;
10967 #ifdef MONO_ARCH_SIMD_INTRINSICS
10968 stacktypes ['x'] = STACK_VTYPE;
10971 #if SIZEOF_REGISTER == 4
10972 /* Create MonoInsts for longs */
10973 for (i = 0; i < cfg->num_varinfo; i++) {
10974 MonoInst *ins = cfg->varinfo [i];
10976 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10977 switch (ins->type) {
10982 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
10985 g_assert (ins->opcode == OP_REGOFFSET);
10987 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10989 tree->opcode = OP_REGOFFSET;
10990 tree->inst_basereg = ins->inst_basereg;
10991 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10993 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10995 tree->opcode = OP_REGOFFSET;
10996 tree->inst_basereg = ins->inst_basereg;
10997 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
11007 /* FIXME: widening and truncation */
11010 * As an optimization, when a variable allocated to the stack is first loaded into
11011 * an lvreg, we will remember the lvreg and use it the next time instead of loading
11012 * the variable again.
11014 orig_next_vreg = cfg->next_vreg;
11015 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
11016 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
11020 * These arrays contain the first and last instructions accessing a given
11022 * Since we emit bblocks in the same order we process them here, and we
11023 * don't split live ranges, these will precisely describe the live range of
11024 * the variable, i.e. the instruction range where a valid value can be found
11025 * in the variables location.
11026 * The live range is computed using the liveness info computed by the liveness pass.
11027 * We can't use vmv->range, since that is an abstract live range, and we need
11028 * one which is instruction precise.
11029 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
11031 /* FIXME: Only do this if debugging info is requested */
11032 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
11033 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
11034 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
11035 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
11037 /* Add spill loads/stores */
11038 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
11041 if (cfg->verbose_level > 2)
11042 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
11044 /* Clear vreg_to_lvreg array */
11045 for (i = 0; i < lvregs_len; i++)
11046 vreg_to_lvreg [lvregs [i]] = 0;
11050 MONO_BB_FOR_EACH_INS (bb, ins) {
11051 const char *spec = INS_INFO (ins->opcode);
11052 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
11053 gboolean store, no_lvreg;
11054 int sregs [MONO_MAX_SRC_REGS];
11056 if (G_UNLIKELY (cfg->verbose_level > 2))
11057 mono_print_ins (ins);
11059 if (ins->opcode == OP_NOP)
11063 * We handle LDADDR here as well, since it can only be decomposed
11064 * when variable addresses are known.
11066 if (ins->opcode == OP_LDADDR) {
11067 MonoInst *var = ins->inst_p0;
11069 if (var->opcode == OP_VTARG_ADDR) {
11070 /* Happens on SPARC/S390 where vtypes are passed by reference */
11071 MonoInst *vtaddr = var->inst_left;
11072 if (vtaddr->opcode == OP_REGVAR) {
11073 ins->opcode = OP_MOVE;
11074 ins->sreg1 = vtaddr->dreg;
11076 else if (var->inst_left->opcode == OP_REGOFFSET) {
11077 ins->opcode = OP_LOAD_MEMBASE;
11078 ins->inst_basereg = vtaddr->inst_basereg;
11079 ins->inst_offset = vtaddr->inst_offset;
11083 g_assert (var->opcode == OP_REGOFFSET);
11085 ins->opcode = OP_ADD_IMM;
11086 ins->sreg1 = var->inst_basereg;
11087 ins->inst_imm = var->inst_offset;
11090 *need_local_opts = TRUE;
11091 spec = INS_INFO (ins->opcode);
11094 if (ins->opcode < MONO_CEE_LAST) {
11095 mono_print_ins (ins);
11096 g_assert_not_reached ();
11100 * Store opcodes have destbasereg in the dreg, but in reality, it is an
11104 if (MONO_IS_STORE_MEMBASE (ins)) {
11105 tmp_reg = ins->dreg;
11106 ins->dreg = ins->sreg2;
11107 ins->sreg2 = tmp_reg;
11110 spec2 [MONO_INST_DEST] = ' ';
11111 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11112 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11113 spec2 [MONO_INST_SRC3] = ' ';
11115 } else if (MONO_IS_STORE_MEMINDEX (ins))
11116 g_assert_not_reached ();
11121 if (G_UNLIKELY (cfg->verbose_level > 2)) {
11122 printf ("\t %.3s %d", spec, ins->dreg);
11123 num_sregs = mono_inst_get_src_registers (ins, sregs);
11124 for (srcindex = 0; srcindex < 3; ++srcindex)
11125 printf (" %d", sregs [srcindex]);
11132 regtype = spec [MONO_INST_DEST];
11133 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
11136 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
11137 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
11138 MonoInst *store_ins;
11140 MonoInst *def_ins = ins;
11141 int dreg = ins->dreg; /* The original vreg */
11143 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
11145 if (var->opcode == OP_REGVAR) {
11146 ins->dreg = var->dreg;
11147 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
11149 * Instead of emitting a load+store, use a _membase opcode.
11151 g_assert (var->opcode == OP_REGOFFSET);
11152 if (ins->opcode == OP_MOVE) {
11156 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
11157 ins->inst_basereg = var->inst_basereg;
11158 ins->inst_offset = var->inst_offset;
11161 spec = INS_INFO (ins->opcode);
11165 g_assert (var->opcode == OP_REGOFFSET);
11167 prev_dreg = ins->dreg;
11169 /* Invalidate any previous lvreg for this vreg */
11170 vreg_to_lvreg [ins->dreg] = 0;
11174 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
11176 store_opcode = OP_STOREI8_MEMBASE_REG;
11179 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
11181 if (regtype == 'l') {
11182 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
11183 mono_bblock_insert_after_ins (bb, ins, store_ins);
11184 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
11185 mono_bblock_insert_after_ins (bb, ins, store_ins);
11186 def_ins = store_ins;
11189 g_assert (store_opcode != OP_STOREV_MEMBASE);
11191 /* Try to fuse the store into the instruction itself */
11192 /* FIXME: Add more instructions */
11193 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
11194 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
11195 ins->inst_imm = ins->inst_c0;
11196 ins->inst_destbasereg = var->inst_basereg;
11197 ins->inst_offset = var->inst_offset;
11198 spec = INS_INFO (ins->opcode);
11199 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
11200 ins->opcode = store_opcode;
11201 ins->inst_destbasereg = var->inst_basereg;
11202 ins->inst_offset = var->inst_offset;
11206 tmp_reg = ins->dreg;
11207 ins->dreg = ins->sreg2;
11208 ins->sreg2 = tmp_reg;
11211 spec2 [MONO_INST_DEST] = ' ';
11212 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11213 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11214 spec2 [MONO_INST_SRC3] = ' ';
11216 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
11217 // FIXME: The backends expect the base reg to be in inst_basereg
11218 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
11220 ins->inst_basereg = var->inst_basereg;
11221 ins->inst_offset = var->inst_offset;
11222 spec = INS_INFO (ins->opcode);
11224 /* printf ("INS: "); mono_print_ins (ins); */
11225 /* Create a store instruction */
11226 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
11228 /* Insert it after the instruction */
11229 mono_bblock_insert_after_ins (bb, ins, store_ins);
11231 def_ins = store_ins;
11234 * We can't assign ins->dreg to var->dreg here, since the
11235 * sregs could use it. So set a flag, and do it after
11238 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
11239 dest_has_lvreg = TRUE;
11244 if (def_ins && !live_range_start [dreg]) {
11245 live_range_start [dreg] = def_ins;
11246 live_range_start_bb [dreg] = bb;
11253 num_sregs = mono_inst_get_src_registers (ins, sregs);
11254 for (srcindex = 0; srcindex < 3; ++srcindex) {
11255 regtype = spec [MONO_INST_SRC1 + srcindex];
11256 sreg = sregs [srcindex];
11258 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
11259 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
11260 MonoInst *var = get_vreg_to_inst (cfg, sreg);
11261 MonoInst *use_ins = ins;
11262 MonoInst *load_ins;
11263 guint32 load_opcode;
11265 if (var->opcode == OP_REGVAR) {
11266 sregs [srcindex] = var->dreg;
11267 //mono_inst_set_src_registers (ins, sregs);
11268 live_range_end [sreg] = use_ins;
11269 live_range_end_bb [sreg] = bb;
11273 g_assert (var->opcode == OP_REGOFFSET);
11275 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
11277 g_assert (load_opcode != OP_LOADV_MEMBASE);
11279 if (vreg_to_lvreg [sreg]) {
11280 g_assert (vreg_to_lvreg [sreg] != -1);
11282 /* The variable is already loaded to an lvreg */
11283 if (G_UNLIKELY (cfg->verbose_level > 2))
11284 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
11285 sregs [srcindex] = vreg_to_lvreg [sreg];
11286 //mono_inst_set_src_registers (ins, sregs);
11290 /* Try to fuse the load into the instruction */
11291 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
11292 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
11293 sregs [0] = var->inst_basereg;
11294 //mono_inst_set_src_registers (ins, sregs);
11295 ins->inst_offset = var->inst_offset;
11296 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
11297 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
11298 sregs [1] = var->inst_basereg;
11299 //mono_inst_set_src_registers (ins, sregs);
11300 ins->inst_offset = var->inst_offset;
11302 if (MONO_IS_REAL_MOVE (ins)) {
11303 ins->opcode = OP_NOP;
11306 //printf ("%d ", srcindex); mono_print_ins (ins);
11308 sreg = alloc_dreg (cfg, stacktypes [regtype]);
11310 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
11311 if (var->dreg == prev_dreg) {
11313 * sreg refers to the value loaded by the load
11314 * emitted below, but we need to use ins->dreg
11315 * since it refers to the store emitted earlier.
11319 g_assert (sreg != -1);
11320 vreg_to_lvreg [var->dreg] = sreg;
11321 g_assert (lvregs_len < 1024);
11322 lvregs [lvregs_len ++] = var->dreg;
11326 sregs [srcindex] = sreg;
11327 //mono_inst_set_src_registers (ins, sregs);
11329 if (regtype == 'l') {
11330 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
11331 mono_bblock_insert_before_ins (bb, ins, load_ins);
11332 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
11333 mono_bblock_insert_before_ins (bb, ins, load_ins);
11334 use_ins = load_ins;
11337 #if SIZEOF_REGISTER == 4
11338 g_assert (load_opcode != OP_LOADI8_MEMBASE);
11340 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
11341 mono_bblock_insert_before_ins (bb, ins, load_ins);
11342 use_ins = load_ins;
11346 if (var->dreg < orig_next_vreg) {
11347 live_range_end [var->dreg] = use_ins;
11348 live_range_end_bb [var->dreg] = bb;
11352 mono_inst_set_src_registers (ins, sregs);
11354 if (dest_has_lvreg) {
11355 g_assert (ins->dreg != -1);
11356 vreg_to_lvreg [prev_dreg] = ins->dreg;
11357 g_assert (lvregs_len < 1024);
11358 lvregs [lvregs_len ++] = prev_dreg;
11359 dest_has_lvreg = FALSE;
11363 tmp_reg = ins->dreg;
11364 ins->dreg = ins->sreg2;
11365 ins->sreg2 = tmp_reg;
11368 if (MONO_IS_CALL (ins)) {
11369 /* Clear vreg_to_lvreg array */
11370 for (i = 0; i < lvregs_len; i++)
11371 vreg_to_lvreg [lvregs [i]] = 0;
11373 } else if (ins->opcode == OP_NOP) {
11375 MONO_INST_NULLIFY_SREGS (ins);
11378 if (cfg->verbose_level > 2)
11379 mono_print_ins_index (1, ins);
11382 /* Extend the live range based on the liveness info */
11383 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
11384 for (i = 0; i < cfg->num_varinfo; i ++) {
11385 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
11387 if (vreg_is_volatile (cfg, vi->vreg))
11388 /* The liveness info is incomplete */
11391 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
11392 /* Live from at least the first ins of this bb */
11393 live_range_start [vi->vreg] = bb->code;
11394 live_range_start_bb [vi->vreg] = bb;
11397 if (mono_bitset_test_fast (bb->live_out_set, i)) {
11398 /* Live at least until the last ins of this bb */
11399 live_range_end [vi->vreg] = bb->last_ins;
11400 live_range_end_bb [vi->vreg] = bb;
11406 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
11408 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
11409 * by storing the current native offset into MonoMethodVar->live_range_start/end.
11411 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
11412 for (i = 0; i < cfg->num_varinfo; ++i) {
11413 int vreg = MONO_VARINFO (cfg, i)->vreg;
11416 if (live_range_start [vreg]) {
11417 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
11419 ins->inst_c1 = vreg;
11420 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
11422 if (live_range_end [vreg]) {
11423 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
11425 ins->inst_c1 = vreg;
11426 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
11427 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
11429 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
11435 g_free (live_range_start);
11436 g_free (live_range_end);
11437 g_free (live_range_start_bb);
11438 g_free (live_range_end_bb);
11443 * - use 'iadd' instead of 'int_add'
11444 * - handling ovf opcodes: decompose in method_to_ir.
11445 * - unify iregs/fregs
11446 * -> partly done, the missing parts are:
11447 * - a more complete unification would involve unifying the hregs as well, so
11448 * code wouldn't need if (fp) all over the place. but that would mean the hregs
11449 * would no longer map to the machine hregs, so the code generators would need to
11450 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
11451 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
11452 * fp/non-fp branches speeds it up by about 15%.
11453 * - use sext/zext opcodes instead of shifts
11455 * - get rid of TEMPLOADs if possible and use vregs instead
11456 * - clean up usage of OP_P/OP_ opcodes
11457 * - cleanup usage of DUMMY_USE
11458 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
11460 * - set the stack type and allocate a dreg in the EMIT_NEW macros
11461 * - get rid of all the <foo>2 stuff when the new JIT is ready.
11462 * - make sure handle_stack_args () is called before the branch is emitted
11463 * - when the new IR is done, get rid of all unused stuff
11464 * - COMPARE/BEQ as separate instructions or unify them ?
11465 * - keeping them separate allows specialized compare instructions like
11466 * compare_imm, compare_membase
11467 * - most back ends unify fp compare+branch, fp compare+ceq
11468 * - integrate mono_save_args into inline_method
11469 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
11470 * - handle long shift opts on 32 bit platforms somehow: they require
11471 * 3 sregs (2 for arg1 and 1 for arg2)
11472 * - make byref a 'normal' type.
11473 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
11474 * variable if needed.
11475 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
11476 * like inline_method.
11477 * - remove inlining restrictions
11478 * - fix LNEG and enable cfold of INEG
11479 * - generalize x86 optimizations like ldelema as a peephole optimization
11480 * - add store_mem_imm for amd64
11481 * - optimize the loading of the interruption flag in the managed->native wrappers
11482 * - avoid special handling of OP_NOP in passes
11483 * - move code inserting instructions into one function/macro.
11484 * - try a coalescing phase after liveness analysis
11485 * - add float -> vreg conversion + local optimizations on !x86
11486 * - figure out how to handle decomposed branches during optimizations, ie.
11487 * compare+branch, op_jump_table+op_br etc.
11488 * - promote RuntimeXHandles to vregs
11489 * - vtype cleanups:
11490 * - add a NEW_VARLOADA_VREG macro
11491 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11492 * accessing vtype fields.
11493 * - get rid of I8CONST on 64 bit platforms
11494 * - dealing with the increase in code size due to branches created during opcode
11496 * - use extended basic blocks
11497 * - all parts of the JIT
11498 * - handle_global_vregs () && local regalloc
11499 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
11500 * - sources of increase in code size:
11503 * - isinst and castclass
11504 * - lvregs not allocated to global registers even if used multiple times
11505 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
11507 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
11508 * - add all micro optimizations from the old JIT
11509 * - put tree optimizations into the deadce pass
11510 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
11511 * specific function.
11512 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
11513 * fcompare + branchCC.
11514 * - create a helper function for allocating a stack slot, taking into account
11515 * MONO_CFG_HAS_SPILLUP.
11517 * - merge the ia64 switch changes.
11518 * - optimize mono_regstate2_alloc_int/float.
11519 * - fix the pessimistic handling of variables accessed in exception handler blocks.
11520 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
11521 * parts of the tree could be separated by other instructions, killing the tree
11522 * arguments, or stores killing loads etc. Also, should we fold loads into other
11523 * instructions if the result of the load is used multiple times ?
11524 * - make the REM_IMM optimization in mini-x86.c arch-independent.
11525 * - LAST MERGE: 108395.
11526 * - when returning vtypes in registers, generate IR and append it to the end of the
11527 * last bb instead of doing it in the epilog.
11528 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
11536 - When to decompose opcodes:
11537 - earlier: this makes some optimizations hard to implement, since the low level IR
11538 no longer contains the neccessary information. But it is easier to do.
11539 - later: harder to implement, enables more optimizations.
11540 - Branches inside bblocks:
11541 - created when decomposing complex opcodes.
11542 - branches to another bblock: harmless, but not tracked by the branch
11543 optimizations, so need to branch to a label at the start of the bblock.
11544 - branches to inside the same bblock: very problematic, trips up the local
11545 reg allocator. Can be fixed by spitting the current bblock, but that is a
11546 complex operation, since some local vregs can become global vregs etc.
11547 - Local/global vregs:
11548 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11549 local register allocator.
11550 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11551 structure, created by mono_create_var (). Assigned to hregs or the stack by
11552 the global register allocator.
11553 - When to do optimizations like alu->alu_imm:
11554 - earlier -> saves work later on since the IR will be smaller/simpler
11555 - later -> can work on more instructions
11556 - Handling of valuetypes:
11557 - When a vtype is pushed on the stack, a new temporary is created, an
11558 instruction computing its address (LDADDR) is emitted and pushed on
11559 the stack. Need to optimize cases when the vtype is used immediately as in
11560 argument passing, stloc etc.
11561 - Instead of the to_end stuff in the old JIT, simply call the function handling
11562 the values on the stack before emitting the last instruction of the bb.
11565 #endif /* DISABLE_JIT */