2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
30 #include <mono/utils/memcheck.h>
32 #include <mono/metadata/assembly.h>
33 #include <mono/metadata/loader.h>
34 #include <mono/metadata/tabledefs.h>
35 #include <mono/metadata/class.h>
36 #include <mono/metadata/object.h>
37 #include <mono/metadata/exception.h>
38 #include <mono/metadata/opcodes.h>
39 #include <mono/metadata/mono-endian.h>
40 #include <mono/metadata/tokentype.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/marshal.h>
43 #include <mono/metadata/debug-helpers.h>
44 #include <mono/metadata/mono-debug.h>
45 #include <mono/metadata/gc-internal.h>
46 #include <mono/metadata/security-manager.h>
47 #include <mono/metadata/threads-types.h>
48 #include <mono/metadata/security-core-clr.h>
49 #include <mono/metadata/monitor.h>
50 #include <mono/metadata/profiler-private.h>
51 #include <mono/metadata/profiler.h>
52 #include <mono/utils/mono-compiler.h>
53 #include <mono/metadata/mono-basic-block.h>
60 #include "jit-icalls.h"
62 #include "debugger-agent.h"
64 #define BRANCH_COST 100
65 #define INLINE_LENGTH_LIMIT 20
66 #define INLINE_FAILURE do {\
67 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
70 #define CHECK_CFG_EXCEPTION do {\
71 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
74 #define METHOD_ACCESS_FAILURE do { \
75 char *method_fname = mono_method_full_name (method, TRUE); \
76 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
77 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
78 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
79 g_free (method_fname); \
80 g_free (cil_method_fname); \
81 goto exception_exit; \
83 #define FIELD_ACCESS_FAILURE do { \
84 char *method_fname = mono_method_full_name (method, TRUE); \
85 char *field_fname = mono_field_full_name (field); \
86 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
87 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
88 g_free (method_fname); \
89 g_free (field_fname); \
90 goto exception_exit; \
92 #define GENERIC_SHARING_FAILURE(opcode) do { \
93 if (cfg->generic_sharing_context) { \
94 if (cfg->verbose_level > 2) \
95 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
96 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
97 goto exception_exit; \
101 /* Determine whenever 'ins' represents a load of the 'this' argument */
102 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
104 static int ldind_to_load_membase (int opcode);
105 static int stind_to_store_membase (int opcode);
107 int mono_op_to_op_imm (int opcode);
108 int mono_op_to_op_imm_noemul (int opcode);
110 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
111 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
112 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
114 /* helper methods signature */
115 extern MonoMethodSignature *helper_sig_class_init_trampoline;
116 extern MonoMethodSignature *helper_sig_domain_get;
117 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
118 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm;
119 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
120 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
121 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
124 * Instruction metadata
132 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
133 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
139 #if SIZEOF_REGISTER == 8
144 /* keep in sync with the enum in mini.h */
147 #include "mini-ops.h"
152 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
153 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
155 * This should contain the index of the last sreg + 1. This is not the same
156 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
158 const gint8 ins_sreg_counts[] = {
159 #include "mini-ops.h"
164 #define MONO_INIT_VARINFO(vi,id) do { \
165 (vi)->range.first_use.pos.bid = 0xffff; \
171 mono_inst_set_src_registers (MonoInst *ins, int *regs)
173 ins->sreg1 = regs [0];
174 ins->sreg2 = regs [1];
175 ins->sreg3 = regs [2];
179 mono_alloc_ireg (MonoCompile *cfg)
181 return alloc_ireg (cfg);
185 mono_alloc_freg (MonoCompile *cfg)
187 return alloc_freg (cfg);
191 mono_alloc_preg (MonoCompile *cfg)
193 return alloc_preg (cfg);
197 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
199 return alloc_dreg (cfg, stack_type);
203 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
209 switch (type->type) {
212 case MONO_TYPE_BOOLEAN:
224 case MONO_TYPE_FNPTR:
226 case MONO_TYPE_CLASS:
227 case MONO_TYPE_STRING:
228 case MONO_TYPE_OBJECT:
229 case MONO_TYPE_SZARRAY:
230 case MONO_TYPE_ARRAY:
234 #if SIZEOF_REGISTER == 8
243 case MONO_TYPE_VALUETYPE:
244 if (type->data.klass->enumtype) {
245 type = mono_class_enum_basetype (type->data.klass);
248 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
251 case MONO_TYPE_TYPEDBYREF:
253 case MONO_TYPE_GENERICINST:
254 type = &type->data.generic_class->container_class->byval_arg;
258 g_assert (cfg->generic_sharing_context);
261 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
267 mono_print_bb (MonoBasicBlock *bb, const char *msg)
272 printf ("\n%s %d: [IN: ", msg, bb->block_num);
273 for (i = 0; i < bb->in_count; ++i)
274 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
276 for (i = 0; i < bb->out_count; ++i)
277 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
279 for (tree = bb->code; tree; tree = tree->next)
280 mono_print_ins_index (-1, tree);
284 * Can't put this at the beginning, since other files reference stuff from this
289 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
291 #define GET_BBLOCK(cfg,tblock,ip) do { \
292 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
294 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
295 NEW_BBLOCK (cfg, (tblock)); \
296 (tblock)->cil_code = (ip); \
297 ADD_BBLOCK (cfg, (tblock)); \
301 #if defined(TARGET_X86) || defined(TARGET_AMD64)
302 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
303 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
304 (dest)->dreg = alloc_preg ((cfg)); \
305 (dest)->sreg1 = (sr1); \
306 (dest)->sreg2 = (sr2); \
307 (dest)->inst_imm = (imm); \
308 (dest)->backend.shift_amount = (shift); \
309 MONO_ADD_INS ((cfg)->cbb, (dest)); \
313 #if SIZEOF_REGISTER == 8
314 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
315 /* FIXME: Need to add many more cases */ \
316 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
318 int dr = alloc_preg (cfg); \
319 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
320 (ins)->sreg2 = widen->dreg; \
324 #define ADD_WIDEN_OP(ins, arg1, arg2)
327 #define ADD_BINOP(op) do { \
328 MONO_INST_NEW (cfg, ins, (op)); \
330 ins->sreg1 = sp [0]->dreg; \
331 ins->sreg2 = sp [1]->dreg; \
332 type_from_op (ins, sp [0], sp [1]); \
334 /* Have to insert a widening op */ \
335 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
336 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
337 MONO_ADD_INS ((cfg)->cbb, (ins)); \
338 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
341 #define ADD_UNOP(op) do { \
342 MONO_INST_NEW (cfg, ins, (op)); \
344 ins->sreg1 = sp [0]->dreg; \
345 type_from_op (ins, sp [0], NULL); \
347 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
348 MONO_ADD_INS ((cfg)->cbb, (ins)); \
349 *sp++ = mono_decompose_opcode (cfg, ins); \
352 #define ADD_BINCOND(next_block) do { \
355 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
356 cmp->sreg1 = sp [0]->dreg; \
357 cmp->sreg2 = sp [1]->dreg; \
358 type_from_op (cmp, sp [0], sp [1]); \
360 type_from_op (ins, sp [0], sp [1]); \
361 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
362 GET_BBLOCK (cfg, tblock, target); \
363 link_bblock (cfg, bblock, tblock); \
364 ins->inst_true_bb = tblock; \
365 if ((next_block)) { \
366 link_bblock (cfg, bblock, (next_block)); \
367 ins->inst_false_bb = (next_block); \
368 start_new_bblock = 1; \
370 GET_BBLOCK (cfg, tblock, ip); \
371 link_bblock (cfg, bblock, tblock); \
372 ins->inst_false_bb = tblock; \
373 start_new_bblock = 2; \
375 if (sp != stack_start) { \
376 handle_stack_args (cfg, stack_start, sp - stack_start); \
377 CHECK_UNVERIFIABLE (cfg); \
379 MONO_ADD_INS (bblock, cmp); \
380 MONO_ADD_INS (bblock, ins); \
384 * link_bblock: Links two basic blocks
386 * links two basic blocks in the control flow graph, the 'from'
387 * argument is the starting block and the 'to' argument is the block
388 * the control flow ends to after 'from'.
391 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
393 MonoBasicBlock **newa;
397 if (from->cil_code) {
399 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
401 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
404 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
406 printf ("edge from entry to exit\n");
411 for (i = 0; i < from->out_count; ++i) {
412 if (to == from->out_bb [i]) {
418 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
419 for (i = 0; i < from->out_count; ++i) {
420 newa [i] = from->out_bb [i];
428 for (i = 0; i < to->in_count; ++i) {
429 if (from == to->in_bb [i]) {
435 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
436 for (i = 0; i < to->in_count; ++i) {
437 newa [i] = to->in_bb [i];
446 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
448 link_bblock (cfg, from, to);
452 * mono_find_block_region:
454 * We mark each basic block with a region ID. We use that to avoid BB
455 * optimizations when blocks are in different regions.
458 * A region token that encodes where this region is, and information
459 * about the clause owner for this block.
461 * The region encodes the try/catch/filter clause that owns this block
462 * as well as the type. -1 is a special value that represents a block
463 * that is in none of try/catch/filter.
466 mono_find_block_region (MonoCompile *cfg, int offset)
468 MonoMethodHeader *header = cfg->header;
469 MonoExceptionClause *clause;
472 for (i = 0; i < header->num_clauses; ++i) {
473 clause = &header->clauses [i];
474 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
475 (offset < (clause->handler_offset)))
476 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
478 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
479 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
480 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
481 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
482 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
484 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
487 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
488 return ((i + 1) << 8) | clause->flags;
495 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
497 MonoMethodHeader *header = cfg->header;
498 MonoExceptionClause *clause;
502 for (i = 0; i < header->num_clauses; ++i) {
503 clause = &header->clauses [i];
504 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
505 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
506 if (clause->flags == type)
507 res = g_list_append (res, clause);
514 mono_create_spvar_for_region (MonoCompile *cfg, int region)
518 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
522 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
523 /* prevent it from being register allocated */
524 var->flags |= MONO_INST_INDIRECT;
526 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
530 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
532 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
536 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
540 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
544 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
545 /* prevent it from being register allocated */
546 var->flags |= MONO_INST_INDIRECT;
548 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
554 * Returns the type used in the eval stack when @type is loaded.
555 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
558 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
562 inst->klass = klass = mono_class_from_mono_type (type);
564 inst->type = STACK_MP;
569 switch (type->type) {
571 inst->type = STACK_INV;
575 case MONO_TYPE_BOOLEAN:
581 inst->type = STACK_I4;
586 case MONO_TYPE_FNPTR:
587 inst->type = STACK_PTR;
589 case MONO_TYPE_CLASS:
590 case MONO_TYPE_STRING:
591 case MONO_TYPE_OBJECT:
592 case MONO_TYPE_SZARRAY:
593 case MONO_TYPE_ARRAY:
594 inst->type = STACK_OBJ;
598 inst->type = STACK_I8;
602 inst->type = STACK_R8;
604 case MONO_TYPE_VALUETYPE:
605 if (type->data.klass->enumtype) {
606 type = mono_class_enum_basetype (type->data.klass);
610 inst->type = STACK_VTYPE;
613 case MONO_TYPE_TYPEDBYREF:
614 inst->klass = mono_defaults.typed_reference_class;
615 inst->type = STACK_VTYPE;
617 case MONO_TYPE_GENERICINST:
618 type = &type->data.generic_class->container_class->byval_arg;
621 case MONO_TYPE_MVAR :
622 /* FIXME: all the arguments must be references for now,
623 * later look inside cfg and see if the arg num is
626 g_assert (cfg->generic_sharing_context);
627 inst->type = STACK_OBJ;
630 g_error ("unknown type 0x%02x in eval stack type", type->type);
635 * The following tables are used to quickly validate the IL code in type_from_op ().
638 bin_num_table [STACK_MAX] [STACK_MAX] = {
639 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
640 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
641 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
642 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
643 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
644 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
645 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
646 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
651 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
654 /* reduce the size of this table */
656 bin_int_table [STACK_MAX] [STACK_MAX] = {
657 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
658 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
659 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
660 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
661 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
662 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
663 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
664 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
668 bin_comp_table [STACK_MAX] [STACK_MAX] = {
669 /* Inv i L p F & O vt */
671 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
672 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
673 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
674 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
675 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
676 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
677 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
680 /* reduce the size of this table */
682 shift_table [STACK_MAX] [STACK_MAX] = {
683 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
684 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
685 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
686 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
687 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
688 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
689 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
690 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
694 * Tables to map from the non-specific opcode to the matching
695 * type-specific opcode.
697 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
699 binops_op_map [STACK_MAX] = {
700 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
703 /* handles from CEE_NEG to CEE_CONV_U8 */
705 unops_op_map [STACK_MAX] = {
706 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
709 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
711 ovfops_op_map [STACK_MAX] = {
712 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
715 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
717 ovf2ops_op_map [STACK_MAX] = {
718 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
721 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
723 ovf3ops_op_map [STACK_MAX] = {
724 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
727 /* handles from CEE_BEQ to CEE_BLT_UN */
729 beqops_op_map [STACK_MAX] = {
730 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
733 /* handles from CEE_CEQ to CEE_CLT_UN */
735 ceqops_op_map [STACK_MAX] = {
736 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
740 * Sets ins->type (the type on the eval stack) according to the
741 * type of the opcode and the arguments to it.
742 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
744 * FIXME: this function sets ins->type unconditionally in some cases, but
745 * it should set it to invalid for some types (a conv.x on an object)
748 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
750 switch (ins->opcode) {
757 /* FIXME: check unverifiable args for STACK_MP */
758 ins->type = bin_num_table [src1->type] [src2->type];
759 ins->opcode += binops_op_map [ins->type];
766 ins->type = bin_int_table [src1->type] [src2->type];
767 ins->opcode += binops_op_map [ins->type];
772 ins->type = shift_table [src1->type] [src2->type];
773 ins->opcode += binops_op_map [ins->type];
778 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
779 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
780 ins->opcode = OP_LCOMPARE;
781 else if (src1->type == STACK_R8)
782 ins->opcode = OP_FCOMPARE;
784 ins->opcode = OP_ICOMPARE;
786 case OP_ICOMPARE_IMM:
787 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
788 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
789 ins->opcode = OP_LCOMPARE_IMM;
801 ins->opcode += beqops_op_map [src1->type];
804 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
805 ins->opcode += ceqops_op_map [src1->type];
811 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
812 ins->opcode += ceqops_op_map [src1->type];
816 ins->type = neg_table [src1->type];
817 ins->opcode += unops_op_map [ins->type];
820 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
821 ins->type = src1->type;
823 ins->type = STACK_INV;
824 ins->opcode += unops_op_map [ins->type];
830 ins->type = STACK_I4;
831 ins->opcode += unops_op_map [src1->type];
834 ins->type = STACK_R8;
835 switch (src1->type) {
838 ins->opcode = OP_ICONV_TO_R_UN;
841 ins->opcode = OP_LCONV_TO_R_UN;
845 case CEE_CONV_OVF_I1:
846 case CEE_CONV_OVF_U1:
847 case CEE_CONV_OVF_I2:
848 case CEE_CONV_OVF_U2:
849 case CEE_CONV_OVF_I4:
850 case CEE_CONV_OVF_U4:
851 ins->type = STACK_I4;
852 ins->opcode += ovf3ops_op_map [src1->type];
854 case CEE_CONV_OVF_I_UN:
855 case CEE_CONV_OVF_U_UN:
856 ins->type = STACK_PTR;
857 ins->opcode += ovf2ops_op_map [src1->type];
859 case CEE_CONV_OVF_I1_UN:
860 case CEE_CONV_OVF_I2_UN:
861 case CEE_CONV_OVF_I4_UN:
862 case CEE_CONV_OVF_U1_UN:
863 case CEE_CONV_OVF_U2_UN:
864 case CEE_CONV_OVF_U4_UN:
865 ins->type = STACK_I4;
866 ins->opcode += ovf2ops_op_map [src1->type];
869 ins->type = STACK_PTR;
870 switch (src1->type) {
872 ins->opcode = OP_ICONV_TO_U;
876 #if SIZEOF_REGISTER == 8
877 ins->opcode = OP_LCONV_TO_U;
879 ins->opcode = OP_MOVE;
883 ins->opcode = OP_LCONV_TO_U;
886 ins->opcode = OP_FCONV_TO_U;
892 ins->type = STACK_I8;
893 ins->opcode += unops_op_map [src1->type];
895 case CEE_CONV_OVF_I8:
896 case CEE_CONV_OVF_U8:
897 ins->type = STACK_I8;
898 ins->opcode += ovf3ops_op_map [src1->type];
900 case CEE_CONV_OVF_U8_UN:
901 case CEE_CONV_OVF_I8_UN:
902 ins->type = STACK_I8;
903 ins->opcode += ovf2ops_op_map [src1->type];
907 ins->type = STACK_R8;
908 ins->opcode += unops_op_map [src1->type];
911 ins->type = STACK_R8;
915 ins->type = STACK_I4;
916 ins->opcode += ovfops_op_map [src1->type];
921 ins->type = STACK_PTR;
922 ins->opcode += ovfops_op_map [src1->type];
930 ins->type = bin_num_table [src1->type] [src2->type];
931 ins->opcode += ovfops_op_map [src1->type];
932 if (ins->type == STACK_R8)
933 ins->type = STACK_INV;
935 case OP_LOAD_MEMBASE:
936 ins->type = STACK_PTR;
938 case OP_LOADI1_MEMBASE:
939 case OP_LOADU1_MEMBASE:
940 case OP_LOADI2_MEMBASE:
941 case OP_LOADU2_MEMBASE:
942 case OP_LOADI4_MEMBASE:
943 case OP_LOADU4_MEMBASE:
944 ins->type = STACK_PTR;
946 case OP_LOADI8_MEMBASE:
947 ins->type = STACK_I8;
949 case OP_LOADR4_MEMBASE:
950 case OP_LOADR8_MEMBASE:
951 ins->type = STACK_R8;
954 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
958 if (ins->type == STACK_MP)
959 ins->klass = mono_defaults.object_class;
964 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
970 param_table [STACK_MAX] [STACK_MAX] = {
975 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
979 switch (args->type) {
989 for (i = 0; i < sig->param_count; ++i) {
990 switch (args [i].type) {
994 if (!sig->params [i]->byref)
998 if (sig->params [i]->byref)
1000 switch (sig->params [i]->type) {
1001 case MONO_TYPE_CLASS:
1002 case MONO_TYPE_STRING:
1003 case MONO_TYPE_OBJECT:
1004 case MONO_TYPE_SZARRAY:
1005 case MONO_TYPE_ARRAY:
1012 if (sig->params [i]->byref)
1014 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1023 /*if (!param_table [args [i].type] [sig->params [i]->type])
1031 * When we need a pointer to the current domain many times in a method, we
1032 * call mono_domain_get() once and we store the result in a local variable.
1033 * This function returns the variable that represents the MonoDomain*.
1035 inline static MonoInst *
1036 mono_get_domainvar (MonoCompile *cfg)
1038 if (!cfg->domainvar)
1039 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1040 return cfg->domainvar;
1044 * The got_var contains the address of the Global Offset Table when AOT
1048 mono_get_got_var (MonoCompile *cfg)
1050 #ifdef MONO_ARCH_NEED_GOT_VAR
1051 if (!cfg->compile_aot)
1053 if (!cfg->got_var) {
1054 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1056 return cfg->got_var;
1063 mono_get_vtable_var (MonoCompile *cfg)
1065 g_assert (cfg->generic_sharing_context);
1067 if (!cfg->rgctx_var) {
1068 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1069 /* force the var to be stack allocated */
1070 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1073 return cfg->rgctx_var;
1077 type_from_stack_type (MonoInst *ins) {
1078 switch (ins->type) {
1079 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1080 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1081 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1082 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1084 return &ins->klass->this_arg;
1085 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1086 case STACK_VTYPE: return &ins->klass->byval_arg;
1088 g_error ("stack type %d to monotype not handled\n", ins->type);
1093 static G_GNUC_UNUSED int
1094 type_to_stack_type (MonoType *t)
1096 t = mono_type_get_underlying_type (t);
1100 case MONO_TYPE_BOOLEAN:
1103 case MONO_TYPE_CHAR:
1110 case MONO_TYPE_FNPTR:
1112 case MONO_TYPE_CLASS:
1113 case MONO_TYPE_STRING:
1114 case MONO_TYPE_OBJECT:
1115 case MONO_TYPE_SZARRAY:
1116 case MONO_TYPE_ARRAY:
1124 case MONO_TYPE_VALUETYPE:
1125 case MONO_TYPE_TYPEDBYREF:
1127 case MONO_TYPE_GENERICINST:
1128 if (mono_type_generic_inst_is_valuetype (t))
1134 g_assert_not_reached ();
1141 array_access_to_klass (int opcode)
1145 return mono_defaults.byte_class;
1147 return mono_defaults.uint16_class;
1150 return mono_defaults.int_class;
1153 return mono_defaults.sbyte_class;
1156 return mono_defaults.int16_class;
1159 return mono_defaults.int32_class;
1161 return mono_defaults.uint32_class;
1164 return mono_defaults.int64_class;
1167 return mono_defaults.single_class;
1170 return mono_defaults.double_class;
1171 case CEE_LDELEM_REF:
1172 case CEE_STELEM_REF:
1173 return mono_defaults.object_class;
1175 g_assert_not_reached ();
1181 * We try to share variables when possible
1184 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1189 /* inlining can result in deeper stacks */
1190 if (slot >= cfg->header->max_stack)
1191 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1193 pos = ins->type - 1 + slot * STACK_MAX;
1195 switch (ins->type) {
1202 if ((vnum = cfg->intvars [pos]))
1203 return cfg->varinfo [vnum];
1204 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1205 cfg->intvars [pos] = res->inst_c0;
1208 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1214 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1217 * Don't use this if a generic_context is set, since that means AOT can't
1218 * look up the method using just the image+token.
1219 * table == 0 means this is a reference made from a wrapper.
1221 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1222 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1223 jump_info_token->image = image;
1224 jump_info_token->token = token;
1225 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1230 * This function is called to handle items that are left on the evaluation stack
1231 * at basic block boundaries. What happens is that we save the values to local variables
1232 * and we reload them later when first entering the target basic block (with the
1233 * handle_loaded_temps () function).
1234 * A single joint point will use the same variables (stored in the array bb->out_stack or
1235 * bb->in_stack, if the basic block is before or after the joint point).
1237 * This function needs to be called _before_ emitting the last instruction of
1238 * the bb (i.e. before emitting a branch).
1239 * If the stack merge fails at a join point, cfg->unverifiable is set.
1242 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1245 MonoBasicBlock *bb = cfg->cbb;
1246 MonoBasicBlock *outb;
1247 MonoInst *inst, **locals;
1252 if (cfg->verbose_level > 3)
1253 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1254 if (!bb->out_scount) {
1255 bb->out_scount = count;
1256 //printf ("bblock %d has out:", bb->block_num);
1258 for (i = 0; i < bb->out_count; ++i) {
1259 outb = bb->out_bb [i];
1260 /* exception handlers are linked, but they should not be considered for stack args */
1261 if (outb->flags & BB_EXCEPTION_HANDLER)
1263 //printf (" %d", outb->block_num);
1264 if (outb->in_stack) {
1266 bb->out_stack = outb->in_stack;
1272 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1273 for (i = 0; i < count; ++i) {
1275 * try to reuse temps already allocated for this purpouse, if they occupy the same
1276 * stack slot and if they are of the same type.
1277 * This won't cause conflicts since if 'local' is used to
1278 * store one of the values in the in_stack of a bblock, then
1279 * the same variable will be used for the same outgoing stack
1281 * This doesn't work when inlining methods, since the bblocks
1282 * in the inlined methods do not inherit their in_stack from
1283 * the bblock they are inlined to. See bug #58863 for an
1286 if (cfg->inlined_method)
1287 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1289 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1294 for (i = 0; i < bb->out_count; ++i) {
1295 outb = bb->out_bb [i];
1296 /* exception handlers are linked, but they should not be considered for stack args */
1297 if (outb->flags & BB_EXCEPTION_HANDLER)
1299 if (outb->in_scount) {
1300 if (outb->in_scount != bb->out_scount) {
1301 cfg->unverifiable = TRUE;
1304 continue; /* check they are the same locals */
1306 outb->in_scount = count;
1307 outb->in_stack = bb->out_stack;
1310 locals = bb->out_stack;
1312 for (i = 0; i < count; ++i) {
1313 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1314 inst->cil_code = sp [i]->cil_code;
1315 sp [i] = locals [i];
1316 if (cfg->verbose_level > 3)
1317 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1321 * It is possible that the out bblocks already have in_stack assigned, and
1322 * the in_stacks differ. In this case, we will store to all the different
1329 /* Find a bblock which has a different in_stack */
1331 while (bindex < bb->out_count) {
1332 outb = bb->out_bb [bindex];
1333 /* exception handlers are linked, but they should not be considered for stack args */
1334 if (outb->flags & BB_EXCEPTION_HANDLER) {
1338 if (outb->in_stack != locals) {
1339 for (i = 0; i < count; ++i) {
1340 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1341 inst->cil_code = sp [i]->cil_code;
1342 sp [i] = locals [i];
1343 if (cfg->verbose_level > 3)
1344 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1346 locals = outb->in_stack;
1355 /* Emit code which loads interface_offsets [klass->interface_id]
1356 * The array is stored in memory before vtable.
1359 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1361 if (cfg->compile_aot) {
1362 int ioffset_reg = alloc_preg (cfg);
1363 int iid_reg = alloc_preg (cfg);
1365 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1366 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1367 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1370 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1375 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1377 int ibitmap_reg = alloc_preg (cfg);
1378 #ifdef COMPRESSED_INTERFACE_BITMAP
1380 MonoInst *res, *ins;
1381 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1382 MONO_ADD_INS (cfg->cbb, ins);
1384 if (cfg->compile_aot)
1385 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1387 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1388 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1389 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1391 int ibitmap_byte_reg = alloc_preg (cfg);
1393 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1395 if (cfg->compile_aot) {
1396 int iid_reg = alloc_preg (cfg);
1397 int shifted_iid_reg = alloc_preg (cfg);
1398 int ibitmap_byte_address_reg = alloc_preg (cfg);
1399 int masked_iid_reg = alloc_preg (cfg);
1400 int iid_one_bit_reg = alloc_preg (cfg);
1401 int iid_bit_reg = alloc_preg (cfg);
1402 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1403 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1404 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1405 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1406 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1407 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1408 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1409 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1411 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1412 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1418 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1419 * stored in "klass_reg" implements the interface "klass".
1422 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1424 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1428 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1429 * stored in "vtable_reg" implements the interface "klass".
1432 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1434 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1438 * Emit code which checks whenever the interface id of @klass is smaller than
1439 * than the value given by max_iid_reg.
1442 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1443 MonoBasicBlock *false_target)
1445 if (cfg->compile_aot) {
1446 int iid_reg = alloc_preg (cfg);
1447 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1448 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1451 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1453 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1455 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1458 /* Same as above, but obtains max_iid from a vtable */
1460 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1461 MonoBasicBlock *false_target)
1463 int max_iid_reg = alloc_preg (cfg);
1465 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1466 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1469 /* Same as above, but obtains max_iid from a klass */
1471 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1472 MonoBasicBlock *false_target)
1474 int max_iid_reg = alloc_preg (cfg);
1476 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1477 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1481 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1483 int idepth_reg = alloc_preg (cfg);
1484 int stypes_reg = alloc_preg (cfg);
1485 int stype = alloc_preg (cfg);
1487 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1488 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1489 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1490 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1492 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1493 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1495 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1496 } else if (cfg->compile_aot) {
1497 int const_reg = alloc_preg (cfg);
1498 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1499 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1501 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1503 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1507 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1509 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1513 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1515 int intf_reg = alloc_preg (cfg);
1517 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1518 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1519 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1521 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1523 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1527 * Variant of the above that takes a register to the class, not the vtable.
1530 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1532 int intf_bit_reg = alloc_preg (cfg);
1534 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1535 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1536 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1538 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1540 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1544 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1547 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1548 } else if (cfg->compile_aot) {
1549 int const_reg = alloc_preg (cfg);
1550 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1551 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1553 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1555 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1559 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1561 return mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1565 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1567 if (cfg->compile_aot) {
1568 int const_reg = alloc_preg (cfg);
1569 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1570 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1572 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1574 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1578 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1581 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1584 int rank_reg = alloc_preg (cfg);
1585 int eclass_reg = alloc_preg (cfg);
1587 g_assert (!klass_inst);
1588 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1589 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1590 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1591 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1592 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1593 if (klass->cast_class == mono_defaults.object_class) {
1594 int parent_reg = alloc_preg (cfg);
1595 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1596 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1597 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1598 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1599 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1600 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1601 } else if (klass->cast_class == mono_defaults.enum_class) {
1602 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1603 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1604 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1606 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1607 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1610 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1611 /* Check that the object is a vector too */
1612 int bounds_reg = alloc_preg (cfg);
1613 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1614 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1615 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1618 int idepth_reg = alloc_preg (cfg);
1619 int stypes_reg = alloc_preg (cfg);
1620 int stype = alloc_preg (cfg);
1622 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1623 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1624 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1625 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1627 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1628 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1629 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1634 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1636 return mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1640 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1644 g_assert (val == 0);
1649 if ((size <= 4) && (size <= align)) {
1652 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1655 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1658 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1660 #if SIZEOF_REGISTER == 8
1662 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1668 val_reg = alloc_preg (cfg);
1670 if (SIZEOF_REGISTER == 8)
1671 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1673 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1676 /* This could be optimized further if neccesary */
1678 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1685 #if !NO_UNALIGNED_ACCESS
1686 if (SIZEOF_REGISTER == 8) {
1688 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1693 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1701 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1706 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1711 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1718 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1725 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1726 g_assert (size < 10000);
1729 /* This could be optimized further if neccesary */
1731 cur_reg = alloc_preg (cfg);
1732 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1733 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1740 #if !NO_UNALIGNED_ACCESS
1741 if (SIZEOF_REGISTER == 8) {
1743 cur_reg = alloc_preg (cfg);
1744 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1745 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1754 cur_reg = alloc_preg (cfg);
1755 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1756 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1762 cur_reg = alloc_preg (cfg);
1763 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1764 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1770 cur_reg = alloc_preg (cfg);
1771 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1772 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1780 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1783 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1786 type = mini_get_basic_type_from_generic (gsctx, type);
1787 switch (type->type) {
1788 case MONO_TYPE_VOID:
1789 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1792 case MONO_TYPE_BOOLEAN:
1795 case MONO_TYPE_CHAR:
1798 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1802 case MONO_TYPE_FNPTR:
1803 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1804 case MONO_TYPE_CLASS:
1805 case MONO_TYPE_STRING:
1806 case MONO_TYPE_OBJECT:
1807 case MONO_TYPE_SZARRAY:
1808 case MONO_TYPE_ARRAY:
1809 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1812 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1815 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1816 case MONO_TYPE_VALUETYPE:
1817 if (type->data.klass->enumtype) {
1818 type = mono_class_enum_basetype (type->data.klass);
1821 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1822 case MONO_TYPE_TYPEDBYREF:
1823 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1824 case MONO_TYPE_GENERICINST:
1825 type = &type->data.generic_class->container_class->byval_arg;
1828 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1834 * target_type_is_incompatible:
1835 * @cfg: MonoCompile context
1837 * Check that the item @arg on the evaluation stack can be stored
1838 * in the target type (can be a local, or field, etc).
1839 * The cfg arg can be used to check if we need verification or just
1842 * Returns: non-0 value if arg can't be stored on a target.
1845 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1847 MonoType *simple_type;
1850 if (target->byref) {
1851 /* FIXME: check that the pointed to types match */
1852 if (arg->type == STACK_MP)
1853 return arg->klass != mono_class_from_mono_type (target);
1854 if (arg->type == STACK_PTR)
1859 simple_type = mono_type_get_underlying_type (target);
1860 switch (simple_type->type) {
1861 case MONO_TYPE_VOID:
1865 case MONO_TYPE_BOOLEAN:
1868 case MONO_TYPE_CHAR:
1871 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1875 /* STACK_MP is needed when setting pinned locals */
1876 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1881 case MONO_TYPE_FNPTR:
1882 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1885 case MONO_TYPE_CLASS:
1886 case MONO_TYPE_STRING:
1887 case MONO_TYPE_OBJECT:
1888 case MONO_TYPE_SZARRAY:
1889 case MONO_TYPE_ARRAY:
1890 if (arg->type != STACK_OBJ)
1892 /* FIXME: check type compatibility */
1896 if (arg->type != STACK_I8)
1901 if (arg->type != STACK_R8)
1904 case MONO_TYPE_VALUETYPE:
1905 if (arg->type != STACK_VTYPE)
1907 klass = mono_class_from_mono_type (simple_type);
1908 if (klass != arg->klass)
1911 case MONO_TYPE_TYPEDBYREF:
1912 if (arg->type != STACK_VTYPE)
1914 klass = mono_class_from_mono_type (simple_type);
1915 if (klass != arg->klass)
1918 case MONO_TYPE_GENERICINST:
1919 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1920 if (arg->type != STACK_VTYPE)
1922 klass = mono_class_from_mono_type (simple_type);
1923 if (klass != arg->klass)
1927 if (arg->type != STACK_OBJ)
1929 /* FIXME: check type compatibility */
1933 case MONO_TYPE_MVAR:
1934 /* FIXME: all the arguments must be references for now,
1935 * later look inside cfg and see if the arg num is
1936 * really a reference
1938 g_assert (cfg->generic_sharing_context);
1939 if (arg->type != STACK_OBJ)
1943 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1949 * Prepare arguments for passing to a function call.
1950 * Return a non-zero value if the arguments can't be passed to the given
1952 * The type checks are not yet complete and some conversions may need
1953 * casts on 32 or 64 bit architectures.
1955 * FIXME: implement this using target_type_is_incompatible ()
1958 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1960 MonoType *simple_type;
1964 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1968 for (i = 0; i < sig->param_count; ++i) {
1969 if (sig->params [i]->byref) {
1970 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1974 simple_type = sig->params [i];
1975 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1977 switch (simple_type->type) {
1978 case MONO_TYPE_VOID:
1983 case MONO_TYPE_BOOLEAN:
1986 case MONO_TYPE_CHAR:
1989 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
1995 case MONO_TYPE_FNPTR:
1996 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
1999 case MONO_TYPE_CLASS:
2000 case MONO_TYPE_STRING:
2001 case MONO_TYPE_OBJECT:
2002 case MONO_TYPE_SZARRAY:
2003 case MONO_TYPE_ARRAY:
2004 if (args [i]->type != STACK_OBJ)
2009 if (args [i]->type != STACK_I8)
2014 if (args [i]->type != STACK_R8)
2017 case MONO_TYPE_VALUETYPE:
2018 if (simple_type->data.klass->enumtype) {
2019 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2022 if (args [i]->type != STACK_VTYPE)
2025 case MONO_TYPE_TYPEDBYREF:
2026 if (args [i]->type != STACK_VTYPE)
2029 case MONO_TYPE_GENERICINST:
2030 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2034 g_error ("unknown type 0x%02x in check_call_signature",
2042 callvirt_to_call (int opcode)
2047 case OP_VOIDCALLVIRT:
2056 g_assert_not_reached ();
2063 callvirt_to_call_membase (int opcode)
2067 return OP_CALL_MEMBASE;
2068 case OP_VOIDCALLVIRT:
2069 return OP_VOIDCALL_MEMBASE;
2071 return OP_FCALL_MEMBASE;
2073 return OP_LCALL_MEMBASE;
2075 return OP_VCALL_MEMBASE;
2077 g_assert_not_reached ();
2083 #ifdef MONO_ARCH_HAVE_IMT
2085 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2089 if (COMPILE_LLVM (cfg)) {
2090 method_reg = alloc_preg (cfg);
2093 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2094 } else if (cfg->compile_aot) {
2095 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2098 MONO_INST_NEW (cfg, ins, OP_PCONST);
2099 ins->inst_p0 = call->method;
2100 ins->dreg = method_reg;
2101 MONO_ADD_INS (cfg->cbb, ins);
2105 call->imt_arg_reg = method_reg;
2107 #ifdef MONO_ARCH_IMT_REG
2108 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2110 /* Need this to keep the IMT arg alive */
2111 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2116 #ifdef MONO_ARCH_IMT_REG
2117 method_reg = alloc_preg (cfg);
2120 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2121 } else if (cfg->compile_aot) {
2122 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2125 MONO_INST_NEW (cfg, ins, OP_PCONST);
2126 ins->inst_p0 = call->method;
2127 ins->dreg = method_reg;
2128 MONO_ADD_INS (cfg->cbb, ins);
2131 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2133 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2138 static MonoJumpInfo *
2139 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2141 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2145 ji->data.target = target;
2150 inline static MonoCallInst *
2151 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2152 MonoInst **args, int calli, int virtual, int tail)
2155 #ifdef MONO_ARCH_SOFT_FLOAT
2160 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2162 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2165 call->signature = sig;
2167 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2170 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2171 call->vret_var = cfg->vret_addr;
2172 //g_assert_not_reached ();
2174 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2175 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2178 temp->backend.is_pinvoke = sig->pinvoke;
2181 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2182 * address of return value to increase optimization opportunities.
2183 * Before vtype decomposition, the dreg of the call ins itself represents the
2184 * fact the call modifies the return value. After decomposition, the call will
2185 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2186 * will be transformed into an LDADDR.
2188 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2189 loada->dreg = alloc_preg (cfg);
2190 loada->inst_p0 = temp;
2191 /* We reference the call too since call->dreg could change during optimization */
2192 loada->inst_p1 = call;
2193 MONO_ADD_INS (cfg->cbb, loada);
2195 call->inst.dreg = temp->dreg;
2197 call->vret_var = loada;
2198 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2199 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2201 #ifdef MONO_ARCH_SOFT_FLOAT
2202 if (COMPILE_SOFT_FLOAT (cfg)) {
2204 * If the call has a float argument, we would need to do an r8->r4 conversion using
2205 * an icall, but that cannot be done during the call sequence since it would clobber
2206 * the call registers + the stack. So we do it before emitting the call.
2208 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2210 MonoInst *in = call->args [i];
2212 if (i >= sig->hasthis)
2213 t = sig->params [i - sig->hasthis];
2215 t = &mono_defaults.int_class->byval_arg;
2216 t = mono_type_get_underlying_type (t);
2218 if (!t->byref && t->type == MONO_TYPE_R4) {
2219 MonoInst *iargs [1];
2223 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2225 /* The result will be in an int vreg */
2226 call->args [i] = conv;
2233 if (COMPILE_LLVM (cfg))
2234 mono_llvm_emit_call (cfg, call);
2236 mono_arch_emit_call (cfg, call);
2238 mono_arch_emit_call (cfg, call);
2241 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2242 cfg->flags |= MONO_CFG_HAS_CALLS;
2247 inline static MonoInst*
2248 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2250 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE);
2252 call->inst.sreg1 = addr->dreg;
2254 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2256 return (MonoInst*)call;
2260 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2262 #ifdef MONO_ARCH_RGCTX_REG
2263 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2264 cfg->uses_rgctx_reg = TRUE;
2265 call->rgctx_reg = TRUE;
2267 call->rgctx_arg_reg = rgctx_reg;
2274 inline static MonoInst*
2275 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2281 rgctx_reg = mono_alloc_preg (cfg);
2282 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2284 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2286 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2287 return (MonoInst*)call;
2291 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, int rgctx_type);
2293 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, int rgctx_type);
2296 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2297 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2299 gboolean might_be_remote;
2300 gboolean virtual = this != NULL;
2301 gboolean enable_for_aot = TRUE;
2305 if (method->string_ctor) {
2306 /* Create the real signature */
2307 /* FIXME: Cache these */
2308 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2309 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2314 might_be_remote = this && sig->hasthis &&
2315 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2316 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this);
2318 context_used = mono_method_check_context_used (method);
2319 if (might_be_remote && context_used) {
2322 g_assert (cfg->generic_sharing_context);
2324 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2326 return mono_emit_calli (cfg, sig, args, addr);
2329 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE);
2331 if (might_be_remote)
2332 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2334 call->method = method;
2335 call->inst.flags |= MONO_INST_HAS_METHOD;
2336 call->inst.inst_left = this;
2339 int vtable_reg, slot_reg, this_reg;
2341 this_reg = this->dreg;
2343 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2344 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2345 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2347 /* Make a call to delegate->invoke_impl */
2348 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2349 call->inst.inst_basereg = this_reg;
2350 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2351 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2353 return (MonoInst*)call;
2357 if ((!cfg->compile_aot || enable_for_aot) &&
2358 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2359 (MONO_METHOD_IS_FINAL (method) &&
2360 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2361 !(method->klass->marshalbyref && context_used)) {
2363 * the method is not virtual, we just need to ensure this is not null
2364 * and then we can call the method directly.
2366 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2368 * The check above ensures method is not gshared, this is needed since
2369 * gshared methods can't have wrappers.
2371 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2374 if (!method->string_ctor)
2375 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2377 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2379 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2381 return (MonoInst*)call;
2384 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2386 * the method is virtual, but we can statically dispatch since either
2387 * it's class or the method itself are sealed.
2388 * But first we need to ensure it's not a null reference.
2390 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2392 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2393 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2395 return (MonoInst*)call;
2398 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2400 vtable_reg = alloc_preg (cfg);
2401 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2402 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2404 #ifdef MONO_ARCH_HAVE_IMT
2406 guint32 imt_slot = mono_method_get_imt_slot (method);
2407 emit_imt_argument (cfg, call, imt_arg);
2408 slot_reg = vtable_reg;
2409 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2412 if (slot_reg == -1) {
2413 slot_reg = alloc_preg (cfg);
2414 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2415 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2418 slot_reg = vtable_reg;
2419 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2420 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2421 #ifdef MONO_ARCH_HAVE_IMT
2423 g_assert (mono_method_signature (method)->generic_param_count);
2424 emit_imt_argument (cfg, call, imt_arg);
2429 call->inst.sreg1 = slot_reg;
2430 call->virtual = TRUE;
2433 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2435 return (MonoInst*)call;
2439 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2440 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2447 rgctx_reg = mono_alloc_preg (cfg);
2448 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2450 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2452 call = (MonoCallInst*)ins;
2454 set_rgctx_arg (cfg, call, rgctx_reg, vtable_arg);
2460 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2462 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2466 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2473 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE);
2476 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2478 return (MonoInst*)call;
2482 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2484 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2488 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2492 * mono_emit_abs_call:
2494 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2496 inline static MonoInst*
2497 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2498 MonoMethodSignature *sig, MonoInst **args)
2500 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2504 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2507 if (cfg->abs_patches == NULL)
2508 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2509 g_hash_table_insert (cfg->abs_patches, ji, ji);
2510 ins = mono_emit_native_call (cfg, ji, sig, args);
2511 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2516 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2518 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2519 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2523 * Native code might return non register sized integers
2524 * without initializing the upper bits.
2526 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2527 case OP_LOADI1_MEMBASE:
2528 widen_op = OP_ICONV_TO_I1;
2530 case OP_LOADU1_MEMBASE:
2531 widen_op = OP_ICONV_TO_U1;
2533 case OP_LOADI2_MEMBASE:
2534 widen_op = OP_ICONV_TO_I2;
2536 case OP_LOADU2_MEMBASE:
2537 widen_op = OP_ICONV_TO_U2;
2543 if (widen_op != -1) {
2544 int dreg = alloc_preg (cfg);
2547 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2548 widen->type = ins->type;
2558 get_memcpy_method (void)
2560 static MonoMethod *memcpy_method = NULL;
2561 if (!memcpy_method) {
2562 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2564 g_error ("Old corlib found. Install a new one");
2566 return memcpy_method;
2570 create_write_barrier_bitmap (MonoClass *klass, unsigned *wb_bitmap, int offset)
2572 MonoClassField *field;
2573 gpointer iter = NULL;
2575 while ((field = mono_class_get_fields (klass, &iter))) {
2578 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2580 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2581 if (mono_type_is_reference (field->type)) {
2582 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2583 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2585 /*FIXME support nested value types so this works for: struct X { Y y; int z;} struct Y { object a,b; }*/
2586 MonoClass *field_class = mono_class_from_mono_type (field->type);
2587 if (field_class->has_references)
2588 create_write_barrier_bitmap (field_class, wb_bitmap, offset + foffset);
2594 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
2596 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
2597 unsigned need_wb = 0;
2602 /*types with references can't have alignment smaller than sizeof(void*) */
2603 if (align < SIZEOF_VOID_P)
2606 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
2607 if (size > 32 * SIZEOF_VOID_P)
2610 create_write_barrier_bitmap (klass, &need_wb, 0);
2612 /* We don't unroll more than 5 stores to avoid code bloat. */
2613 if (size > 5 * SIZEOF_VOID_P) {
2614 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
2615 size += (SIZEOF_VOID_P - 1);
2616 size &= ~(SIZEOF_VOID_P - 1);
2618 EMIT_NEW_ICONST (cfg, iargs [2], size);
2619 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
2620 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
2624 destreg = iargs [0]->dreg;
2625 srcreg = iargs [1]->dreg;
2628 dest_ptr_reg = alloc_preg (cfg);
2629 tmp_reg = alloc_preg (cfg);
2632 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
2634 while (size >= SIZEOF_VOID_P) {
2635 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, tmp_reg, srcreg, offset);
2636 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
2638 if (need_wb & 0x1) {
2639 MonoInst *dummy_use;
2641 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2642 mono_emit_method_call (cfg, write_barrier, &iargs [0], NULL);
2644 MONO_INST_NEW (cfg, dummy_use, OP_DUMMY_USE);
2645 dummy_use->sreg1 = dest_ptr_reg;
2646 MONO_ADD_INS (cfg->cbb, dummy_use);
2650 offset += SIZEOF_VOID_P;
2651 size -= SIZEOF_VOID_P;
2654 /*tmp += sizeof (void*)*/
2655 if (size >= SIZEOF_VOID_P) {
2656 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
2657 MONO_ADD_INS (cfg->cbb, iargs [0]);
2661 /* Those cannot be references since size < sizeof (void*) */
2663 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
2664 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
2670 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
2671 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
2677 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
2678 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
2687 * Emit code to copy a valuetype of type @klass whose address is stored in
2688 * @src->dreg to memory whose address is stored at @dest->dreg.
2691 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2693 MonoInst *iargs [4];
2696 MonoMethod *memcpy_method;
2700 * This check breaks with spilled vars... need to handle it during verification anyway.
2701 * g_assert (klass && klass == src->klass && klass == dest->klass);
2705 n = mono_class_native_size (klass, &align);
2707 n = mono_class_value_size (klass, &align);
2709 /* if native is true there should be no references in the struct */
2710 if (cfg->gen_write_barriers && klass->has_references && !native) {
2711 /* Avoid barriers when storing to the stack */
2712 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2713 (dest->opcode == OP_LDADDR))) {
2714 int context_used = 0;
2719 if (cfg->generic_sharing_context)
2720 context_used = mono_class_check_context_used (klass);
2722 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
2723 if ((cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
2725 } else if (context_used) {
2726 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2728 if (cfg->compile_aot) {
2729 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2731 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2732 mono_class_compute_gc_descriptor (klass);
2736 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2741 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2742 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2743 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2747 EMIT_NEW_ICONST (cfg, iargs [2], n);
2749 memcpy_method = get_memcpy_method ();
2750 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2755 get_memset_method (void)
2757 static MonoMethod *memset_method = NULL;
2758 if (!memset_method) {
2759 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2761 g_error ("Old corlib found. Install a new one");
2763 return memset_method;
2767 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2769 MonoInst *iargs [3];
2772 MonoMethod *memset_method;
2774 /* FIXME: Optimize this for the case when dest is an LDADDR */
2776 mono_class_init (klass);
2777 n = mono_class_value_size (klass, &align);
2779 if (n <= sizeof (gpointer) * 5) {
2780 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2783 memset_method = get_memset_method ();
2785 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2786 EMIT_NEW_ICONST (cfg, iargs [2], n);
2787 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2792 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2794 MonoInst *this = NULL;
2796 g_assert (cfg->generic_sharing_context);
2798 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2799 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2800 !method->klass->valuetype)
2801 EMIT_NEW_ARGLOAD (cfg, this, 0);
2803 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2804 MonoInst *mrgctx_loc, *mrgctx_var;
2807 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2809 mrgctx_loc = mono_get_vtable_var (cfg);
2810 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2813 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2814 MonoInst *vtable_loc, *vtable_var;
2818 vtable_loc = mono_get_vtable_var (cfg);
2819 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2821 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2822 MonoInst *mrgctx_var = vtable_var;
2825 vtable_reg = alloc_preg (cfg);
2826 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2827 vtable_var->type = STACK_PTR;
2833 int vtable_reg, res_reg;
2835 vtable_reg = alloc_preg (cfg);
2836 res_reg = alloc_preg (cfg);
2837 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2842 static MonoJumpInfoRgctxEntry *
2843 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2845 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2846 res->method = method;
2847 res->in_mrgctx = in_mrgctx;
2848 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2849 res->data->type = patch_type;
2850 res->data->data.target = patch_data;
2851 res->info_type = info_type;
2856 static inline MonoInst*
2857 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2859 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2863 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2864 MonoClass *klass, int rgctx_type)
2866 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2867 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2869 return emit_rgctx_fetch (cfg, rgctx, entry);
2873 * emit_get_rgctx_method:
2875 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2876 * normal constants, else emit a load from the rgctx.
2879 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2880 MonoMethod *cmethod, int rgctx_type)
2882 if (!context_used) {
2885 switch (rgctx_type) {
2886 case MONO_RGCTX_INFO_METHOD:
2887 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
2889 case MONO_RGCTX_INFO_METHOD_RGCTX:
2890 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
2893 g_assert_not_reached ();
2896 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2897 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2899 return emit_rgctx_fetch (cfg, rgctx, entry);
2904 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2905 MonoClassField *field, int rgctx_type)
2907 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2908 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2910 return emit_rgctx_fetch (cfg, rgctx, entry);
2914 * On return the caller must check @klass for load errors.
2917 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
2919 MonoInst *vtable_arg;
2921 int context_used = 0;
2923 if (cfg->generic_sharing_context)
2924 context_used = mono_class_check_context_used (klass);
2927 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
2928 klass, MONO_RGCTX_INFO_VTABLE);
2930 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2934 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
2937 if (COMPILE_LLVM (cfg))
2938 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
2940 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
2941 #ifdef MONO_ARCH_VTABLE_REG
2942 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
2943 cfg->uses_vtable_reg = TRUE;
2950 * On return the caller must check @array_class for load errors
2953 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
2955 int vtable_reg = alloc_preg (cfg);
2956 int context_used = 0;
2958 if (cfg->generic_sharing_context)
2959 context_used = mono_class_check_context_used (array_class);
2961 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2963 if (cfg->opt & MONO_OPT_SHARED) {
2964 int class_reg = alloc_preg (cfg);
2965 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2966 if (cfg->compile_aot) {
2967 int klass_reg = alloc_preg (cfg);
2968 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
2969 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
2971 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
2973 } else if (context_used) {
2974 MonoInst *vtable_ins;
2976 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
2977 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
2979 if (cfg->compile_aot) {
2983 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2985 vt_reg = alloc_preg (cfg);
2986 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
2987 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
2990 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2992 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
2996 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3000 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
3002 if (mini_get_debug_options ()->better_cast_details) {
3003 int to_klass_reg = alloc_preg (cfg);
3004 int vtable_reg = alloc_preg (cfg);
3005 int klass_reg = alloc_preg (cfg);
3006 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3009 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3013 MONO_ADD_INS (cfg->cbb, tls_get);
3014 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3015 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3017 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3018 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3019 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3024 reset_cast_details (MonoCompile *cfg)
3026 /* Reset the variables holding the cast details */
3027 if (mini_get_debug_options ()->better_cast_details) {
3028 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3030 MONO_ADD_INS (cfg->cbb, tls_get);
3031 /* It is enough to reset the from field */
3032 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3037 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3038 * generic code is generated.
3041 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3043 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3046 MonoInst *rgctx, *addr;
3048 /* FIXME: What if the class is shared? We might not
3049 have to get the address of the method from the
3051 addr = emit_get_rgctx_method (cfg, context_used, method,
3052 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3054 rgctx = emit_get_rgctx (cfg, method, context_used);
3056 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3058 return mono_emit_method_call (cfg, method, &val, NULL);
3063 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3067 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3068 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3069 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3070 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3072 obj_reg = sp [0]->dreg;
3073 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3074 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3076 /* FIXME: generics */
3077 g_assert (klass->rank == 0);
3080 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3081 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3083 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3084 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3087 MonoInst *element_class;
3089 /* This assertion is from the unboxcast insn */
3090 g_assert (klass->rank == 0);
3092 element_class = emit_get_rgctx_klass (cfg, context_used,
3093 klass->element_class, MONO_RGCTX_INFO_KLASS);
3095 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3096 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3098 save_cast_details (cfg, klass->element_class, obj_reg);
3099 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3100 reset_cast_details (cfg);
3103 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
3104 MONO_ADD_INS (cfg->cbb, add);
3105 add->type = STACK_MP;
3112 * Returns NULL and set the cfg exception on error.
3115 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3117 MonoInst *iargs [2];
3123 MonoInst *iargs [2];
3126 FIXME: we cannot get managed_alloc here because we can't get
3127 the class's vtable (because it's not a closed class)
3129 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3130 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3133 if (cfg->opt & MONO_OPT_SHARED)
3134 rgctx_info = MONO_RGCTX_INFO_KLASS;
3136 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3137 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3139 if (cfg->opt & MONO_OPT_SHARED) {
3140 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3142 alloc_ftn = mono_object_new;
3145 alloc_ftn = mono_object_new_specific;
3148 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3151 if (cfg->opt & MONO_OPT_SHARED) {
3152 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3153 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3155 alloc_ftn = mono_object_new;
3156 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3157 /* This happens often in argument checking code, eg. throw new FooException... */
3158 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3159 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3160 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3162 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3163 MonoMethod *managed_alloc = NULL;
3167 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3168 cfg->exception_ptr = klass;
3172 #ifndef MONO_CROSS_COMPILE
3173 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3176 if (managed_alloc) {
3177 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3178 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3180 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3182 guint32 lw = vtable->klass->instance_size;
3183 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3184 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3185 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3188 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3192 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3196 * Returns NULL and set the cfg exception on error.
3199 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
3201 MonoInst *alloc, *ins;
3203 if (mono_class_is_nullable (klass)) {
3204 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3207 /* FIXME: What if the class is shared? We might not
3208 have to get the method address from the RGCTX. */
3209 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3210 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3211 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3213 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3215 return mono_emit_method_call (cfg, method, &val, NULL);
3219 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3223 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3228 // FIXME: This doesn't work yet (class libs tests fail?)
3229 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || mono_class_has_variant_generic_params (klass) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3232 * Returns NULL and set the cfg exception on error.
3235 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3237 MonoBasicBlock *is_null_bb;
3238 int obj_reg = src->dreg;
3239 int vtable_reg = alloc_preg (cfg);
3240 MonoInst *klass_inst = NULL;
3245 klass_inst = emit_get_rgctx_klass (cfg, context_used,
3246 klass, MONO_RGCTX_INFO_KLASS);
3248 if (is_complex_isinst (klass)) {
3249 /* Complex case, handle by an icall */
3255 args [1] = klass_inst;
3257 return mono_emit_jit_icall (cfg, mono_object_castclass, args);
3259 /* Simple case, handled by the code below */
3263 NEW_BBLOCK (cfg, is_null_bb);
3265 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3266 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3268 save_cast_details (cfg, klass, obj_reg);
3270 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3271 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3272 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3274 int klass_reg = alloc_preg (cfg);
3276 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3278 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3279 /* the remoting code is broken, access the class for now */
3280 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3281 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3283 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3284 cfg->exception_ptr = klass;
3287 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3289 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3290 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3292 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3294 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3295 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3299 MONO_START_BB (cfg, is_null_bb);
3301 reset_cast_details (cfg);
3307 * Returns NULL and set the cfg exception on error.
3310 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3313 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3314 int obj_reg = src->dreg;
3315 int vtable_reg = alloc_preg (cfg);
3316 int res_reg = alloc_preg (cfg);
3317 MonoInst *klass_inst = NULL;
3320 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3322 if (is_complex_isinst (klass)) {
3325 /* Complex case, handle by an icall */
3331 args [1] = klass_inst;
3333 return mono_emit_jit_icall (cfg, mono_object_isinst, args);
3335 /* Simple case, the code below can handle it */
3339 NEW_BBLOCK (cfg, is_null_bb);
3340 NEW_BBLOCK (cfg, false_bb);
3341 NEW_BBLOCK (cfg, end_bb);
3343 /* Do the assignment at the beginning, so the other assignment can be if converted */
3344 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3345 ins->type = STACK_OBJ;
3348 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3349 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3351 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3353 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3354 g_assert (!context_used);
3355 /* the is_null_bb target simply copies the input register to the output */
3356 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3358 int klass_reg = alloc_preg (cfg);
3361 int rank_reg = alloc_preg (cfg);
3362 int eclass_reg = alloc_preg (cfg);
3364 g_assert (!context_used);
3365 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3366 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3367 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3368 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3369 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3370 if (klass->cast_class == mono_defaults.object_class) {
3371 int parent_reg = alloc_preg (cfg);
3372 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3373 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3374 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3375 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3376 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3377 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3378 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3379 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3380 } else if (klass->cast_class == mono_defaults.enum_class) {
3381 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3382 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3383 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3384 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3386 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3387 /* Check that the object is a vector too */
3388 int bounds_reg = alloc_preg (cfg);
3389 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3390 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3391 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3394 /* the is_null_bb target simply copies the input register to the output */
3395 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3397 } else if (mono_class_is_nullable (klass)) {
3398 g_assert (!context_used);
3399 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3400 /* the is_null_bb target simply copies the input register to the output */
3401 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3403 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3404 g_assert (!context_used);
3405 /* the remoting code is broken, access the class for now */
3406 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3407 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3409 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3410 cfg->exception_ptr = klass;
3413 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3415 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3416 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3418 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3419 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3421 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3422 /* the is_null_bb target simply copies the input register to the output */
3423 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
3428 MONO_START_BB (cfg, false_bb);
3430 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3431 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3433 MONO_START_BB (cfg, is_null_bb);
3435 MONO_START_BB (cfg, end_bb);
3441 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3443 /* This opcode takes as input an object reference and a class, and returns:
3444 0) if the object is an instance of the class,
3445 1) if the object is not instance of the class,
3446 2) if the object is a proxy whose type cannot be determined */
3449 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3450 int obj_reg = src->dreg;
3451 int dreg = alloc_ireg (cfg);
3453 int klass_reg = alloc_preg (cfg);
3455 NEW_BBLOCK (cfg, true_bb);
3456 NEW_BBLOCK (cfg, false_bb);
3457 NEW_BBLOCK (cfg, false2_bb);
3458 NEW_BBLOCK (cfg, end_bb);
3459 NEW_BBLOCK (cfg, no_proxy_bb);
3461 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3462 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3464 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3465 NEW_BBLOCK (cfg, interface_fail_bb);
3467 tmp_reg = alloc_preg (cfg);
3468 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3469 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3470 MONO_START_BB (cfg, interface_fail_bb);
3471 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3473 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3475 tmp_reg = alloc_preg (cfg);
3476 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3477 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3478 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3480 tmp_reg = alloc_preg (cfg);
3481 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3482 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3484 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3485 tmp_reg = alloc_preg (cfg);
3486 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3487 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3489 tmp_reg = alloc_preg (cfg);
3490 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3491 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3492 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3494 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3495 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3497 MONO_START_BB (cfg, no_proxy_bb);
3499 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3502 MONO_START_BB (cfg, false_bb);
3504 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3505 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3507 MONO_START_BB (cfg, false2_bb);
3509 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3510 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3512 MONO_START_BB (cfg, true_bb);
3514 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3516 MONO_START_BB (cfg, end_bb);
3519 MONO_INST_NEW (cfg, ins, OP_ICONST);
3521 ins->type = STACK_I4;
3527 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3529 /* This opcode takes as input an object reference and a class, and returns:
3530 0) if the object is an instance of the class,
3531 1) if the object is a proxy whose type cannot be determined
3532 an InvalidCastException exception is thrown otherwhise*/
3535 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3536 int obj_reg = src->dreg;
3537 int dreg = alloc_ireg (cfg);
3538 int tmp_reg = alloc_preg (cfg);
3539 int klass_reg = alloc_preg (cfg);
3541 NEW_BBLOCK (cfg, end_bb);
3542 NEW_BBLOCK (cfg, ok_result_bb);
3544 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3545 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3547 save_cast_details (cfg, klass, obj_reg);
3549 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3550 NEW_BBLOCK (cfg, interface_fail_bb);
3552 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3553 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3554 MONO_START_BB (cfg, interface_fail_bb);
3555 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3557 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3559 tmp_reg = alloc_preg (cfg);
3560 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3561 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3562 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3564 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3565 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3568 NEW_BBLOCK (cfg, no_proxy_bb);
3570 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3571 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3572 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3574 tmp_reg = alloc_preg (cfg);
3575 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3576 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3578 tmp_reg = alloc_preg (cfg);
3579 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3580 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3581 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3583 NEW_BBLOCK (cfg, fail_1_bb);
3585 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3587 MONO_START_BB (cfg, fail_1_bb);
3589 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3590 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3592 MONO_START_BB (cfg, no_proxy_bb);
3594 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3597 MONO_START_BB (cfg, ok_result_bb);
3599 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3601 MONO_START_BB (cfg, end_bb);
3604 MONO_INST_NEW (cfg, ins, OP_ICONST);
3606 ins->type = STACK_I4;
3612 * Returns NULL and set the cfg exception on error.
3614 static G_GNUC_UNUSED MonoInst*
3615 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3617 gpointer *trampoline;
3618 MonoInst *obj, *method_ins, *tramp_ins;
3622 obj = handle_alloc (cfg, klass, FALSE, 0);
3626 /* Inline the contents of mono_delegate_ctor */
3628 /* Set target field */
3629 /* Optimize away setting of NULL target */
3630 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3631 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3633 /* Set method field */
3634 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3635 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3638 * To avoid looking up the compiled code belonging to the target method
3639 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3640 * store it, and we fill it after the method has been compiled.
3642 if (!cfg->compile_aot && !method->dynamic) {
3643 MonoInst *code_slot_ins;
3646 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3648 domain = mono_domain_get ();
3649 mono_domain_lock (domain);
3650 if (!domain_jit_info (domain)->method_code_hash)
3651 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3652 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3654 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3655 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3657 mono_domain_unlock (domain);
3659 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3661 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3664 /* Set invoke_impl field */
3665 if (cfg->compile_aot) {
3666 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3668 trampoline = mono_create_delegate_trampoline (klass);
3669 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3671 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3673 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3679 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3681 MonoJitICallInfo *info;
3683 /* Need to register the icall so it gets an icall wrapper */
3684 info = mono_get_array_new_va_icall (rank);
3686 cfg->flags |= MONO_CFG_HAS_VARARGS;
3688 /* mono_array_new_va () needs a vararg calling convention */
3689 cfg->disable_llvm = TRUE;
3691 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3692 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3696 mono_emit_load_got_addr (MonoCompile *cfg)
3698 MonoInst *getaddr, *dummy_use;
3700 if (!cfg->got_var || cfg->got_var_allocated)
3703 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3704 getaddr->dreg = cfg->got_var->dreg;
3706 /* Add it to the start of the first bblock */
3707 if (cfg->bb_entry->code) {
3708 getaddr->next = cfg->bb_entry->code;
3709 cfg->bb_entry->code = getaddr;
3712 MONO_ADD_INS (cfg->bb_entry, getaddr);
3714 cfg->got_var_allocated = TRUE;
3717 * Add a dummy use to keep the got_var alive, since real uses might
3718 * only be generated by the back ends.
3719 * Add it to end_bblock, so the variable's lifetime covers the whole
3721 * It would be better to make the usage of the got var explicit in all
3722 * cases when the backend needs it (i.e. calls, throw etc.), so this
3723 * wouldn't be needed.
3725 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3726 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3729 static int inline_limit;
3730 static gboolean inline_limit_inited;
3733 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3735 MonoMethodHeaderSummary header;
3737 #ifdef MONO_ARCH_SOFT_FLOAT
3738 MonoMethodSignature *sig = mono_method_signature (method);
3742 if (cfg->generic_sharing_context)
3745 if (cfg->inline_depth > 10)
3748 #ifdef MONO_ARCH_HAVE_LMF_OPS
3749 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3750 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3751 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3756 if (!mono_method_get_header_summary (method, &header))
3759 /*runtime, icall and pinvoke are checked by summary call*/
3760 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3761 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3762 (method->klass->marshalbyref) ||
3766 /* also consider num_locals? */
3767 /* Do the size check early to avoid creating vtables */
3768 if (!inline_limit_inited) {
3769 if (getenv ("MONO_INLINELIMIT"))
3770 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3772 inline_limit = INLINE_LENGTH_LIMIT;
3773 inline_limit_inited = TRUE;
3775 if (header.code_size >= inline_limit)
3779 * if we can initialize the class of the method right away, we do,
3780 * otherwise we don't allow inlining if the class needs initialization,
3781 * since it would mean inserting a call to mono_runtime_class_init()
3782 * inside the inlined code
3784 if (!(cfg->opt & MONO_OPT_SHARED)) {
3785 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3786 if (cfg->run_cctors && method->klass->has_cctor) {
3787 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3788 if (!method->klass->runtime_info)
3789 /* No vtable created yet */
3791 vtable = mono_class_vtable (cfg->domain, method->klass);
3794 /* This makes so that inline cannot trigger */
3795 /* .cctors: too many apps depend on them */
3796 /* running with a specific order... */
3797 if (! vtable->initialized)
3799 mono_runtime_class_init (vtable);
3801 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3802 if (!method->klass->runtime_info)
3803 /* No vtable created yet */
3805 vtable = mono_class_vtable (cfg->domain, method->klass);
3808 if (!vtable->initialized)
3813 * If we're compiling for shared code
3814 * the cctor will need to be run at aot method load time, for example,
3815 * or at the end of the compilation of the inlining method.
3817 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3822 * CAS - do not inline methods with declarative security
3823 * Note: this has to be before any possible return TRUE;
3825 if (mono_method_has_declsec (method))
3828 #ifdef MONO_ARCH_SOFT_FLOAT
3830 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3832 for (i = 0; i < sig->param_count; ++i)
3833 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3841 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3843 if (vtable->initialized && !cfg->compile_aot)
3846 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3849 if (!mono_class_needs_cctor_run (vtable->klass, method))
3852 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3853 /* The initialization is already done before the method is called */
3860 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
3864 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3866 mono_class_init (klass);
3867 size = mono_class_array_element_size (klass);
3869 mult_reg = alloc_preg (cfg);
3870 array_reg = arr->dreg;
3871 index_reg = index->dreg;
3873 #if SIZEOF_REGISTER == 8
3874 /* The array reg is 64 bits but the index reg is only 32 */
3875 if (COMPILE_LLVM (cfg)) {
3877 index2_reg = index_reg;
3879 index2_reg = alloc_preg (cfg);
3880 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3883 if (index->type == STACK_I8) {
3884 index2_reg = alloc_preg (cfg);
3885 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
3887 index2_reg = index_reg;
3892 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3894 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3895 if (size == 1 || size == 2 || size == 4 || size == 8) {
3896 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3898 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3899 ins->type = STACK_PTR;
3905 add_reg = alloc_preg (cfg);
3907 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3908 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3909 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3910 ins->type = STACK_PTR;
3911 MONO_ADD_INS (cfg->cbb, ins);
3916 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3918 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3920 int bounds_reg = alloc_preg (cfg);
3921 int add_reg = alloc_preg (cfg);
3922 int mult_reg = alloc_preg (cfg);
3923 int mult2_reg = alloc_preg (cfg);
3924 int low1_reg = alloc_preg (cfg);
3925 int low2_reg = alloc_preg (cfg);
3926 int high1_reg = alloc_preg (cfg);
3927 int high2_reg = alloc_preg (cfg);
3928 int realidx1_reg = alloc_preg (cfg);
3929 int realidx2_reg = alloc_preg (cfg);
3930 int sum_reg = alloc_preg (cfg);
3935 mono_class_init (klass);
3936 size = mono_class_array_element_size (klass);
3938 index1 = index_ins1->dreg;
3939 index2 = index_ins2->dreg;
3941 /* range checking */
3942 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3943 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3945 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3946 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3947 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3948 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3949 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3950 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3951 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3953 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3954 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3955 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3956 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3957 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3958 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3959 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3961 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3962 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3963 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3964 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3965 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3967 ins->type = STACK_MP;
3969 MONO_ADD_INS (cfg->cbb, ins);
3976 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3980 MonoMethod *addr_method;
3983 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3986 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
3988 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3989 /* emit_ldelema_2 depends on OP_LMUL */
3990 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3991 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3995 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3996 addr_method = mono_marshal_get_array_address (rank, element_size);
3997 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4002 static MonoBreakPolicy
4003 always_insert_breakpoint (MonoMethod *method)
4005 return MONO_BREAK_POLICY_ALWAYS;
4008 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4011 * mono_set_break_policy:
4012 * policy_callback: the new callback function
4014 * Allow embedders to decide wherther to actually obey breakpoint instructions
4015 * (both break IL instructions and Debugger.Break () method calls), for example
4016 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4017 * untrusted or semi-trusted code.
4019 * @policy_callback will be called every time a break point instruction needs to
4020 * be inserted with the method argument being the method that calls Debugger.Break()
4021 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4022 * if it wants the breakpoint to not be effective in the given method.
4023 * #MONO_BREAK_POLICY_ALWAYS is the default.
4026 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4028 if (policy_callback)
4029 break_policy_func = policy_callback;
4031 break_policy_func = always_insert_breakpoint;
4035 should_insert_brekpoint (MonoMethod *method) {
4036 switch (break_policy_func (method)) {
4037 case MONO_BREAK_POLICY_ALWAYS:
4039 case MONO_BREAK_POLICY_NEVER:
4041 case MONO_BREAK_POLICY_ON_DBG:
4042 return mono_debug_using_mono_debugger ();
4044 g_warning ("Incorrect value returned from break policy callback");
4049 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4051 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4053 MonoInst *addr, *store, *load;
4054 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4056 /* the bounds check is already done by the callers */
4057 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4059 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4060 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4062 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4063 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4069 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4071 MonoInst *ins = NULL;
4072 #ifdef MONO_ARCH_SIMD_INTRINSICS
4073 if (cfg->opt & MONO_OPT_SIMD) {
4074 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4084 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4086 MonoInst *ins = NULL;
4088 static MonoClass *runtime_helpers_class = NULL;
4089 if (! runtime_helpers_class)
4090 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
4091 "System.Runtime.CompilerServices", "RuntimeHelpers");
4093 if (cmethod->klass == mono_defaults.string_class) {
4094 if (strcmp (cmethod->name, "get_Chars") == 0) {
4095 int dreg = alloc_ireg (cfg);
4096 int index_reg = alloc_preg (cfg);
4097 int mult_reg = alloc_preg (cfg);
4098 int add_reg = alloc_preg (cfg);
4100 #if SIZEOF_REGISTER == 8
4101 /* The array reg is 64 bits but the index reg is only 32 */
4102 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
4104 index_reg = args [1]->dreg;
4106 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
4108 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4109 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
4110 add_reg = ins->dreg;
4111 /* Avoid a warning */
4113 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4116 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
4117 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4118 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4119 add_reg, G_STRUCT_OFFSET (MonoString, chars));
4121 type_from_op (ins, NULL, NULL);
4123 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4124 int dreg = alloc_ireg (cfg);
4125 /* Decompose later to allow more optimizations */
4126 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
4127 ins->type = STACK_I4;
4128 ins->flags |= MONO_INST_FAULT;
4129 cfg->cbb->has_array_access = TRUE;
4130 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
4133 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
4134 int mult_reg = alloc_preg (cfg);
4135 int add_reg = alloc_preg (cfg);
4137 /* The corlib functions check for oob already. */
4138 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
4139 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4140 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
4141 return cfg->cbb->last_ins;
4144 } else if (cmethod->klass == mono_defaults.object_class) {
4146 if (strcmp (cmethod->name, "GetType") == 0) {
4147 int dreg = alloc_preg (cfg);
4148 int vt_reg = alloc_preg (cfg);
4149 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4150 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
4151 type_from_op (ins, NULL, NULL);
4154 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
4155 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0) {
4156 int dreg = alloc_ireg (cfg);
4157 int t1 = alloc_ireg (cfg);
4159 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
4160 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
4161 ins->type = STACK_I4;
4165 } else if (strcmp (cmethod->name, ".ctor") == 0) {
4166 MONO_INST_NEW (cfg, ins, OP_NOP);
4167 MONO_ADD_INS (cfg->cbb, ins);
4171 } else if (cmethod->klass == mono_defaults.array_class) {
4172 if (strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
4173 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
4174 if (cmethod->name [0] != 'g')
4177 if (strcmp (cmethod->name, "get_Rank") == 0) {
4178 int dreg = alloc_ireg (cfg);
4179 int vtable_reg = alloc_preg (cfg);
4180 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
4181 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4182 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
4183 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4184 type_from_op (ins, NULL, NULL);
4187 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4188 int dreg = alloc_ireg (cfg);
4190 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4191 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4192 type_from_op (ins, NULL, NULL);
4197 } else if (cmethod->klass == runtime_helpers_class) {
4199 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
4200 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
4204 } else if (cmethod->klass == mono_defaults.thread_class) {
4205 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
4206 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
4207 MONO_ADD_INS (cfg->cbb, ins);
4209 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
4210 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4211 MONO_ADD_INS (cfg->cbb, ins);
4214 } else if (cmethod->klass == mono_defaults.monitor_class) {
4215 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4216 if (strcmp (cmethod->name, "Enter") == 0) {
4219 if (COMPILE_LLVM (cfg)) {
4221 * Pass the argument normally, the LLVM backend will handle the
4222 * calling convention problems.
4224 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4226 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
4227 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4228 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4229 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4232 return (MonoInst*)call;
4233 } else if (strcmp (cmethod->name, "Exit") == 0) {
4236 if (COMPILE_LLVM (cfg)) {
4237 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4239 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
4240 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4241 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4242 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4245 return (MonoInst*)call;
4247 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4248 MonoMethod *fast_method = NULL;
4250 /* Avoid infinite recursion */
4251 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
4252 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
4253 strcmp (cfg->method->name, "FastMonitorExit") == 0))
4256 if (strcmp (cmethod->name, "Enter") == 0 ||
4257 strcmp (cmethod->name, "Exit") == 0)
4258 fast_method = mono_monitor_get_fast_path (cmethod);
4262 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4264 } else if (cmethod->klass->image == mono_defaults.corlib &&
4265 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4266 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4269 #if SIZEOF_REGISTER == 8
4270 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
4271 /* 64 bit reads are already atomic */
4272 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
4273 ins->dreg = mono_alloc_preg (cfg);
4274 ins->inst_basereg = args [0]->dreg;
4275 ins->inst_offset = 0;
4276 MONO_ADD_INS (cfg->cbb, ins);
4280 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4281 if (strcmp (cmethod->name, "Increment") == 0) {
4282 MonoInst *ins_iconst;
4285 if (fsig->params [0]->type == MONO_TYPE_I4)
4286 opcode = OP_ATOMIC_ADD_NEW_I4;
4287 #if SIZEOF_REGISTER == 8
4288 else if (fsig->params [0]->type == MONO_TYPE_I8)
4289 opcode = OP_ATOMIC_ADD_NEW_I8;
4292 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4293 ins_iconst->inst_c0 = 1;
4294 ins_iconst->dreg = mono_alloc_ireg (cfg);
4295 MONO_ADD_INS (cfg->cbb, ins_iconst);
4297 MONO_INST_NEW (cfg, ins, opcode);
4298 ins->dreg = mono_alloc_ireg (cfg);
4299 ins->inst_basereg = args [0]->dreg;
4300 ins->inst_offset = 0;
4301 ins->sreg2 = ins_iconst->dreg;
4302 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4303 MONO_ADD_INS (cfg->cbb, ins);
4305 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4306 MonoInst *ins_iconst;
4309 if (fsig->params [0]->type == MONO_TYPE_I4)
4310 opcode = OP_ATOMIC_ADD_NEW_I4;
4311 #if SIZEOF_REGISTER == 8
4312 else if (fsig->params [0]->type == MONO_TYPE_I8)
4313 opcode = OP_ATOMIC_ADD_NEW_I8;
4316 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4317 ins_iconst->inst_c0 = -1;
4318 ins_iconst->dreg = mono_alloc_ireg (cfg);
4319 MONO_ADD_INS (cfg->cbb, ins_iconst);
4321 MONO_INST_NEW (cfg, ins, opcode);
4322 ins->dreg = mono_alloc_ireg (cfg);
4323 ins->inst_basereg = args [0]->dreg;
4324 ins->inst_offset = 0;
4325 ins->sreg2 = ins_iconst->dreg;
4326 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4327 MONO_ADD_INS (cfg->cbb, ins);
4329 } else if (strcmp (cmethod->name, "Add") == 0) {
4332 if (fsig->params [0]->type == MONO_TYPE_I4)
4333 opcode = OP_ATOMIC_ADD_NEW_I4;
4334 #if SIZEOF_REGISTER == 8
4335 else if (fsig->params [0]->type == MONO_TYPE_I8)
4336 opcode = OP_ATOMIC_ADD_NEW_I8;
4340 MONO_INST_NEW (cfg, ins, opcode);
4341 ins->dreg = mono_alloc_ireg (cfg);
4342 ins->inst_basereg = args [0]->dreg;
4343 ins->inst_offset = 0;
4344 ins->sreg2 = args [1]->dreg;
4345 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4346 MONO_ADD_INS (cfg->cbb, ins);
4349 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4351 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4352 if (strcmp (cmethod->name, "Exchange") == 0) {
4354 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4356 if (fsig->params [0]->type == MONO_TYPE_I4)
4357 opcode = OP_ATOMIC_EXCHANGE_I4;
4358 #if SIZEOF_REGISTER == 8
4359 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4360 (fsig->params [0]->type == MONO_TYPE_I))
4361 opcode = OP_ATOMIC_EXCHANGE_I8;
4363 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4364 opcode = OP_ATOMIC_EXCHANGE_I4;
4369 MONO_INST_NEW (cfg, ins, opcode);
4370 ins->dreg = mono_alloc_ireg (cfg);
4371 ins->inst_basereg = args [0]->dreg;
4372 ins->inst_offset = 0;
4373 ins->sreg2 = args [1]->dreg;
4374 MONO_ADD_INS (cfg->cbb, ins);
4376 switch (fsig->params [0]->type) {
4378 ins->type = STACK_I4;
4382 ins->type = STACK_I8;
4384 case MONO_TYPE_OBJECT:
4385 ins->type = STACK_OBJ;
4388 g_assert_not_reached ();
4391 if (cfg->gen_write_barriers && is_ref) {
4392 MonoInst *dummy_use;
4393 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4394 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4395 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [1]);
4398 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4400 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4401 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4403 gboolean is_ref = MONO_TYPE_IS_REFERENCE (fsig->params [1]);
4404 if (fsig->params [1]->type == MONO_TYPE_I4)
4406 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4407 size = sizeof (gpointer);
4408 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
4411 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4412 ins->dreg = alloc_ireg (cfg);
4413 ins->sreg1 = args [0]->dreg;
4414 ins->sreg2 = args [1]->dreg;
4415 ins->sreg3 = args [2]->dreg;
4416 ins->type = STACK_I4;
4417 MONO_ADD_INS (cfg->cbb, ins);
4418 } else if (size == 8) {
4419 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4420 ins->dreg = alloc_ireg (cfg);
4421 ins->sreg1 = args [0]->dreg;
4422 ins->sreg2 = args [1]->dreg;
4423 ins->sreg3 = args [2]->dreg;
4424 ins->type = STACK_I8;
4425 MONO_ADD_INS (cfg->cbb, ins);
4427 /* g_assert_not_reached (); */
4429 if (cfg->gen_write_barriers && is_ref) {
4430 MonoInst *dummy_use;
4431 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4432 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4433 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [1]);
4436 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4440 } else if (cmethod->klass->image == mono_defaults.corlib) {
4441 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4442 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4443 if (should_insert_brekpoint (cfg->method))
4444 MONO_INST_NEW (cfg, ins, OP_BREAK);
4446 MONO_INST_NEW (cfg, ins, OP_NOP);
4447 MONO_ADD_INS (cfg->cbb, ins);
4450 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4451 && strcmp (cmethod->klass->name, "Environment") == 0) {
4453 EMIT_NEW_ICONST (cfg, ins, 1);
4455 EMIT_NEW_ICONST (cfg, ins, 0);
4459 } else if (cmethod->klass == mono_defaults.math_class) {
4461 * There is general branches code for Min/Max, but it does not work for
4463 * http://everything2.com/?node_id=1051618
4467 #ifdef MONO_ARCH_SIMD_INTRINSICS
4468 if (cfg->opt & MONO_OPT_SIMD) {
4469 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4475 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4479 * This entry point could be used later for arbitrary method
4482 inline static MonoInst*
4483 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4484 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4486 if (method->klass == mono_defaults.string_class) {
4487 /* managed string allocation support */
4488 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS)) {
4489 MonoInst *iargs [2];
4490 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4491 MonoMethod *managed_alloc = NULL;
4493 g_assert (vtable); /*Should not fail since it System.String*/
4494 #ifndef MONO_CROSS_COMPILE
4495 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4499 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4500 iargs [1] = args [0];
4501 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4508 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4510 MonoInst *store, *temp;
4513 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4514 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4517 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4518 * would be different than the MonoInst's used to represent arguments, and
4519 * the ldelema implementation can't deal with that.
4520 * Solution: When ldelema is used on an inline argument, create a var for
4521 * it, emit ldelema on that var, and emit the saving code below in
4522 * inline_method () if needed.
4524 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4525 cfg->args [i] = temp;
4526 /* This uses cfg->args [i] which is set by the preceeding line */
4527 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4528 store->cil_code = sp [0]->cil_code;
4533 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4534 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4536 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4538 check_inline_called_method_name_limit (MonoMethod *called_method)
4541 static char *limit = NULL;
4543 if (limit == NULL) {
4544 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4546 if (limit_string != NULL)
4547 limit = limit_string;
4549 limit = (char *) "";
4552 if (limit [0] != '\0') {
4553 char *called_method_name = mono_method_full_name (called_method, TRUE);
4555 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4556 g_free (called_method_name);
4558 //return (strncmp_result <= 0);
4559 return (strncmp_result == 0);
4566 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4568 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4571 static char *limit = NULL;
4573 if (limit == NULL) {
4574 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4575 if (limit_string != NULL) {
4576 limit = limit_string;
4578 limit = (char *) "";
4582 if (limit [0] != '\0') {
4583 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4585 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4586 g_free (caller_method_name);
4588 //return (strncmp_result <= 0);
4589 return (strncmp_result == 0);
4597 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4598 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4600 MonoInst *ins, *rvar = NULL;
4601 MonoMethodHeader *cheader;
4602 MonoBasicBlock *ebblock, *sbblock;
4604 MonoMethod *prev_inlined_method;
4605 MonoInst **prev_locals, **prev_args;
4606 MonoType **prev_arg_types;
4607 guint prev_real_offset;
4608 GHashTable *prev_cbb_hash;
4609 MonoBasicBlock **prev_cil_offset_to_bb;
4610 MonoBasicBlock *prev_cbb;
4611 unsigned char* prev_cil_start;
4612 guint32 prev_cil_offset_to_bb_len;
4613 MonoMethod *prev_current_method;
4614 MonoGenericContext *prev_generic_context;
4615 gboolean ret_var_set, prev_ret_var_set;
4617 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4619 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4620 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4623 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4624 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4628 if (cfg->verbose_level > 2)
4629 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4631 if (!cmethod->inline_info) {
4632 mono_jit_stats.inlineable_methods++;
4633 cmethod->inline_info = 1;
4636 /* allocate local variables */
4637 cheader = mono_method_get_header (cmethod);
4639 if (cheader == NULL || mono_loader_get_last_error ()) {
4641 mono_metadata_free_mh (cheader);
4642 mono_loader_clear_error ();
4646 /* allocate space to store the return value */
4647 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4648 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4652 prev_locals = cfg->locals;
4653 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4654 for (i = 0; i < cheader->num_locals; ++i)
4655 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4657 /* allocate start and end blocks */
4658 /* This is needed so if the inline is aborted, we can clean up */
4659 NEW_BBLOCK (cfg, sbblock);
4660 sbblock->real_offset = real_offset;
4662 NEW_BBLOCK (cfg, ebblock);
4663 ebblock->block_num = cfg->num_bblocks++;
4664 ebblock->real_offset = real_offset;
4666 prev_args = cfg->args;
4667 prev_arg_types = cfg->arg_types;
4668 prev_inlined_method = cfg->inlined_method;
4669 cfg->inlined_method = cmethod;
4670 cfg->ret_var_set = FALSE;
4671 cfg->inline_depth ++;
4672 prev_real_offset = cfg->real_offset;
4673 prev_cbb_hash = cfg->cbb_hash;
4674 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4675 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4676 prev_cil_start = cfg->cil_start;
4677 prev_cbb = cfg->cbb;
4678 prev_current_method = cfg->current_method;
4679 prev_generic_context = cfg->generic_context;
4680 prev_ret_var_set = cfg->ret_var_set;
4682 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4684 ret_var_set = cfg->ret_var_set;
4686 cfg->inlined_method = prev_inlined_method;
4687 cfg->real_offset = prev_real_offset;
4688 cfg->cbb_hash = prev_cbb_hash;
4689 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4690 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4691 cfg->cil_start = prev_cil_start;
4692 cfg->locals = prev_locals;
4693 cfg->args = prev_args;
4694 cfg->arg_types = prev_arg_types;
4695 cfg->current_method = prev_current_method;
4696 cfg->generic_context = prev_generic_context;
4697 cfg->ret_var_set = prev_ret_var_set;
4698 cfg->inline_depth --;
4700 if ((costs >= 0 && costs < 60) || inline_allways) {
4701 if (cfg->verbose_level > 2)
4702 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4704 mono_jit_stats.inlined_methods++;
4706 /* always add some code to avoid block split failures */
4707 MONO_INST_NEW (cfg, ins, OP_NOP);
4708 MONO_ADD_INS (prev_cbb, ins);
4710 prev_cbb->next_bb = sbblock;
4711 link_bblock (cfg, prev_cbb, sbblock);
4714 * Get rid of the begin and end bblocks if possible to aid local
4717 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4719 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4720 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4722 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4723 MonoBasicBlock *prev = ebblock->in_bb [0];
4724 mono_merge_basic_blocks (cfg, prev, ebblock);
4726 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4727 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4728 cfg->cbb = prev_cbb;
4736 * If the inlined method contains only a throw, then the ret var is not
4737 * set, so set it to a dummy value.
4740 static double r8_0 = 0.0;
4742 switch (rvar->type) {
4744 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4747 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4752 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4755 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4756 ins->type = STACK_R8;
4757 ins->inst_p0 = (void*)&r8_0;
4758 ins->dreg = rvar->dreg;
4759 MONO_ADD_INS (cfg->cbb, ins);
4762 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4765 g_assert_not_reached ();
4769 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4772 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4775 if (cfg->verbose_level > 2)
4776 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4777 cfg->exception_type = MONO_EXCEPTION_NONE;
4778 mono_loader_clear_error ();
4780 /* This gets rid of the newly added bblocks */
4781 cfg->cbb = prev_cbb;
4783 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4788 * Some of these comments may well be out-of-date.
4789 * Design decisions: we do a single pass over the IL code (and we do bblock
4790 * splitting/merging in the few cases when it's required: a back jump to an IL
4791 * address that was not already seen as bblock starting point).
4792 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4793 * Complex operations are decomposed in simpler ones right away. We need to let the
4794 * arch-specific code peek and poke inside this process somehow (except when the
4795 * optimizations can take advantage of the full semantic info of coarse opcodes).
4796 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4797 * MonoInst->opcode initially is the IL opcode or some simplification of that
4798 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4799 * opcode with value bigger than OP_LAST.
4800 * At this point the IR can be handed over to an interpreter, a dumb code generator
4801 * or to the optimizing code generator that will translate it to SSA form.
4803 * Profiling directed optimizations.
4804 * We may compile by default with few or no optimizations and instrument the code
4805 * or the user may indicate what methods to optimize the most either in a config file
4806 * or through repeated runs where the compiler applies offline the optimizations to
4807 * each method and then decides if it was worth it.
4810 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4811 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4812 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4813 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4814 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4815 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4816 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4817 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4819 /* offset from br.s -> br like opcodes */
4820 #define BIG_BRANCH_OFFSET 13
4823 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4825 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4827 return b == NULL || b == bb;
4831 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4833 unsigned char *ip = start;
4834 unsigned char *target;
4837 MonoBasicBlock *bblock;
4838 const MonoOpcode *opcode;
4841 cli_addr = ip - start;
4842 i = mono_opcode_value ((const guint8 **)&ip, end);
4845 opcode = &mono_opcodes [i];
4846 switch (opcode->argument) {
4847 case MonoInlineNone:
4850 case MonoInlineString:
4851 case MonoInlineType:
4852 case MonoInlineField:
4853 case MonoInlineMethod:
4856 case MonoShortInlineR:
4863 case MonoShortInlineVar:
4864 case MonoShortInlineI:
4867 case MonoShortInlineBrTarget:
4868 target = start + cli_addr + 2 + (signed char)ip [1];
4869 GET_BBLOCK (cfg, bblock, target);
4872 GET_BBLOCK (cfg, bblock, ip);
4874 case MonoInlineBrTarget:
4875 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4876 GET_BBLOCK (cfg, bblock, target);
4879 GET_BBLOCK (cfg, bblock, ip);
4881 case MonoInlineSwitch: {
4882 guint32 n = read32 (ip + 1);
4885 cli_addr += 5 + 4 * n;
4886 target = start + cli_addr;
4887 GET_BBLOCK (cfg, bblock, target);
4889 for (j = 0; j < n; ++j) {
4890 target = start + cli_addr + (gint32)read32 (ip);
4891 GET_BBLOCK (cfg, bblock, target);
4901 g_assert_not_reached ();
4904 if (i == CEE_THROW) {
4905 unsigned char *bb_start = ip - 1;
4907 /* Find the start of the bblock containing the throw */
4909 while ((bb_start >= start) && !bblock) {
4910 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4914 bblock->out_of_line = 1;
4923 static inline MonoMethod *
4924 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4928 if (m->wrapper_type != MONO_WRAPPER_NONE)
4929 return mono_method_get_wrapper_data (m, token);
4931 method = mono_get_method_full (m->klass->image, token, klass, context);
4936 static inline MonoMethod *
4937 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4939 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4941 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4947 static inline MonoClass*
4948 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4952 if (method->wrapper_type != MONO_WRAPPER_NONE)
4953 klass = mono_method_get_wrapper_data (method, token);
4955 klass = mono_class_get_full (method->klass->image, token, context);
4957 mono_class_init (klass);
4962 * Returns TRUE if the JIT should abort inlining because "callee"
4963 * is influenced by security attributes.
4966 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4970 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
4974 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
4975 if (result == MONO_JIT_SECURITY_OK)
4978 if (result == MONO_JIT_LINKDEMAND_ECMA) {
4979 /* Generate code to throw a SecurityException before the actual call/link */
4980 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4983 NEW_ICONST (cfg, args [0], 4);
4984 NEW_METHODCONST (cfg, args [1], caller);
4985 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
4986 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
4987 /* don't hide previous results */
4988 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
4989 cfg->exception_data = result;
4997 throw_exception (void)
4999 static MonoMethod *method = NULL;
5002 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5003 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
5010 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
5012 MonoMethod *thrower = throw_exception ();
5015 EMIT_NEW_PCONST (cfg, args [0], ex);
5016 mono_emit_method_call (cfg, thrower, args, NULL);
5020 * Return the original method is a wrapper is specified. We can only access
5021 * the custom attributes from the original method.
5024 get_original_method (MonoMethod *method)
5026 if (method->wrapper_type == MONO_WRAPPER_NONE)
5029 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
5030 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
5033 /* in other cases we need to find the original method */
5034 return mono_marshal_method_from_wrapper (method);
5038 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
5039 MonoBasicBlock *bblock, unsigned char *ip)
5041 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5042 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
5044 emit_throw_exception (cfg, ex);
5048 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
5049 MonoBasicBlock *bblock, unsigned char *ip)
5051 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5052 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
5054 emit_throw_exception (cfg, ex);
5058 * Check that the IL instructions at ip are the array initialization
5059 * sequence and return the pointer to the data and the size.
5062 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
5065 * newarr[System.Int32]
5067 * ldtoken field valuetype ...
5068 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
5070 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
5071 guint32 token = read32 (ip + 7);
5072 guint32 field_token = read32 (ip + 2);
5073 guint32 field_index = field_token & 0xffffff;
5075 const char *data_ptr;
5077 MonoMethod *cmethod;
5078 MonoClass *dummy_class;
5079 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
5085 *out_field_token = field_token;
5087 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
5090 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
5092 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
5093 case MONO_TYPE_BOOLEAN:
5097 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
5098 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
5099 case MONO_TYPE_CHAR:
5109 return NULL; /* stupid ARM FP swapped format */
5119 if (size > mono_type_size (field->type, &dummy_align))
5122 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
5123 if (!method->klass->image->dynamic) {
5124 field_index = read32 (ip + 2) & 0xffffff;
5125 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
5126 data_ptr = mono_image_rva_map (method->klass->image, rva);
5127 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
5128 /* for aot code we do the lookup on load */
5129 if (aot && data_ptr)
5130 return GUINT_TO_POINTER (rva);
5132 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
5134 data_ptr = mono_field_get_data (field);
5142 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
5144 char *method_fname = mono_method_full_name (method, TRUE);
5146 MonoMethodHeader *header = mono_method_get_header (method);
5148 if (header->code_size == 0)
5149 method_code = g_strdup ("method body is empty.");
5151 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
5152 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
5153 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
5154 g_free (method_fname);
5155 g_free (method_code);
5156 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
5160 set_exception_object (MonoCompile *cfg, MonoException *exception)
5162 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
5163 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
5164 cfg->exception_ptr = exception;
5168 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5172 if (cfg->generic_sharing_context)
5173 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
5175 type = &klass->byval_arg;
5176 return MONO_TYPE_IS_REFERENCE (type);
5180 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5183 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5184 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5185 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5186 /* Optimize reg-reg moves away */
5188 * Can't optimize other opcodes, since sp[0] might point to
5189 * the last ins of a decomposed opcode.
5191 sp [0]->dreg = (cfg)->locals [n]->dreg;
5193 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5198 * ldloca inhibits many optimizations so try to get rid of it in common
5201 static inline unsigned char *
5202 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5211 local = read16 (ip + 2);
5215 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5216 gboolean skip = FALSE;
5218 /* From the INITOBJ case */
5219 token = read32 (ip + 2);
5220 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5221 CHECK_TYPELOAD (klass);
5222 if (generic_class_is_reference_type (cfg, klass)) {
5223 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5224 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5225 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5226 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5227 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5240 is_exception_class (MonoClass *class)
5243 if (class == mono_defaults.exception_class)
5245 class = class->parent;
5251 * mono_method_to_ir:
5253 * Translate the .net IL into linear IR.
5256 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5257 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5258 guint inline_offset, gboolean is_virtual_call)
5261 MonoInst *ins, **sp, **stack_start;
5262 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5263 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
5264 MonoMethod *cmethod, *method_definition;
5265 MonoInst **arg_array;
5266 MonoMethodHeader *header;
5268 guint32 token, ins_flag;
5270 MonoClass *constrained_call = NULL;
5271 unsigned char *ip, *end, *target, *err_pos;
5272 static double r8_0 = 0.0;
5273 MonoMethodSignature *sig;
5274 MonoGenericContext *generic_context = NULL;
5275 MonoGenericContainer *generic_container = NULL;
5276 MonoType **param_types;
5277 int i, n, start_new_bblock, dreg;
5278 int num_calls = 0, inline_costs = 0;
5279 int breakpoint_id = 0;
5281 MonoBoolean security, pinvoke;
5282 MonoSecurityManager* secman = NULL;
5283 MonoDeclSecurityActions actions;
5284 GSList *class_inits = NULL;
5285 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5287 gboolean init_locals, seq_points, skip_dead_blocks;
5289 /* serialization and xdomain stuff may need access to private fields and methods */
5290 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5291 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5292 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5293 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5294 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5295 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5297 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5299 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5300 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5301 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5302 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5304 image = method->klass->image;
5305 header = mono_method_get_header (method);
5307 MonoLoaderError *error;
5309 if ((error = mono_loader_get_last_error ())) {
5310 cfg->exception_type = error->exception_type;
5312 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
5313 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
5315 goto exception_exit;
5317 generic_container = mono_method_get_generic_container (method);
5318 sig = mono_method_signature (method);
5319 num_args = sig->hasthis + sig->param_count;
5320 ip = (unsigned char*)header->code;
5321 cfg->cil_start = ip;
5322 end = ip + header->code_size;
5323 mono_jit_stats.cil_code_size += header->code_size;
5324 init_locals = header->init_locals;
5326 seq_points = cfg->gen_seq_points && cfg->method == method;
5329 * Methods without init_locals set could cause asserts in various passes
5334 method_definition = method;
5335 while (method_definition->is_inflated) {
5336 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5337 method_definition = imethod->declaring;
5340 /* SkipVerification is not allowed if core-clr is enabled */
5341 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5343 dont_verify_stloc = TRUE;
5346 if (!dont_verify && mini_method_verify (cfg, method_definition))
5347 goto exception_exit;
5349 if (mono_debug_using_mono_debugger ())
5350 cfg->keep_cil_nops = TRUE;
5352 if (sig->is_inflated)
5353 generic_context = mono_method_get_context (method);
5354 else if (generic_container)
5355 generic_context = &generic_container->context;
5356 cfg->generic_context = generic_context;
5358 if (!cfg->generic_sharing_context)
5359 g_assert (!sig->has_type_parameters);
5361 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5362 g_assert (method->is_inflated);
5363 g_assert (mono_method_get_context (method)->method_inst);
5365 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5366 g_assert (sig->generic_param_count);
5368 if (cfg->method == method) {
5369 cfg->real_offset = 0;
5371 cfg->real_offset = inline_offset;
5374 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5375 cfg->cil_offset_to_bb_len = header->code_size;
5377 cfg->current_method = method;
5379 if (cfg->verbose_level > 2)
5380 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5382 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5384 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5385 for (n = 0; n < sig->param_count; ++n)
5386 param_types [n + sig->hasthis] = sig->params [n];
5387 cfg->arg_types = param_types;
5389 dont_inline = g_list_prepend (dont_inline, method);
5390 if (cfg->method == method) {
5392 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5393 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5396 NEW_BBLOCK (cfg, start_bblock);
5397 cfg->bb_entry = start_bblock;
5398 start_bblock->cil_code = NULL;
5399 start_bblock->cil_length = 0;
5402 NEW_BBLOCK (cfg, end_bblock);
5403 cfg->bb_exit = end_bblock;
5404 end_bblock->cil_code = NULL;
5405 end_bblock->cil_length = 0;
5406 g_assert (cfg->num_bblocks == 2);
5408 arg_array = cfg->args;
5410 if (header->num_clauses) {
5411 cfg->spvars = g_hash_table_new (NULL, NULL);
5412 cfg->exvars = g_hash_table_new (NULL, NULL);
5414 /* handle exception clauses */
5415 for (i = 0; i < header->num_clauses; ++i) {
5416 MonoBasicBlock *try_bb;
5417 MonoExceptionClause *clause = &header->clauses [i];
5418 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5419 try_bb->real_offset = clause->try_offset;
5420 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5421 tblock->real_offset = clause->handler_offset;
5422 tblock->flags |= BB_EXCEPTION_HANDLER;
5424 link_bblock (cfg, try_bb, tblock);
5426 if (*(ip + clause->handler_offset) == CEE_POP)
5427 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5429 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5430 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5431 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5432 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5433 MONO_ADD_INS (tblock, ins);
5435 /* todo: is a fault block unsafe to optimize? */
5436 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5437 tblock->flags |= BB_EXCEPTION_UNSAFE;
5441 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5443 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5445 /* catch and filter blocks get the exception object on the stack */
5446 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5447 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5448 MonoInst *dummy_use;
5450 /* mostly like handle_stack_args (), but just sets the input args */
5451 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5452 tblock->in_scount = 1;
5453 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5454 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5457 * Add a dummy use for the exvar so its liveness info will be
5461 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5463 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5464 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5465 tblock->flags |= BB_EXCEPTION_HANDLER;
5466 tblock->real_offset = clause->data.filter_offset;
5467 tblock->in_scount = 1;
5468 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5469 /* The filter block shares the exvar with the handler block */
5470 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5471 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5472 MONO_ADD_INS (tblock, ins);
5476 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5477 clause->data.catch_class &&
5478 cfg->generic_sharing_context &&
5479 mono_class_check_context_used (clause->data.catch_class)) {
5481 * In shared generic code with catch
5482 * clauses containing type variables
5483 * the exception handling code has to
5484 * be able to get to the rgctx.
5485 * Therefore we have to make sure that
5486 * the vtable/mrgctx argument (for
5487 * static or generic methods) or the
5488 * "this" argument (for non-static
5489 * methods) are live.
5491 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5492 mini_method_get_context (method)->method_inst ||
5493 method->klass->valuetype) {
5494 mono_get_vtable_var (cfg);
5496 MonoInst *dummy_use;
5498 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5503 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5504 cfg->cbb = start_bblock;
5505 cfg->args = arg_array;
5506 mono_save_args (cfg, sig, inline_args);
5509 /* FIRST CODE BLOCK */
5510 NEW_BBLOCK (cfg, bblock);
5511 bblock->cil_code = ip;
5515 ADD_BBLOCK (cfg, bblock);
5517 if (cfg->method == method) {
5518 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5519 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5520 MONO_INST_NEW (cfg, ins, OP_BREAK);
5521 MONO_ADD_INS (bblock, ins);
5525 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5526 secman = mono_security_manager_get_methods ();
5528 security = (secman && mono_method_has_declsec (method));
5529 /* at this point having security doesn't mean we have any code to generate */
5530 if (security && (cfg->method == method)) {
5531 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5532 * And we do not want to enter the next section (with allocation) if we
5533 * have nothing to generate */
5534 security = mono_declsec_get_demands (method, &actions);
5537 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5538 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5540 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5541 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5542 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5544 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5545 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5549 mono_custom_attrs_free (custom);
5552 custom = mono_custom_attrs_from_class (wrapped->klass);
5553 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5557 mono_custom_attrs_free (custom);
5560 /* not a P/Invoke after all */
5565 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5566 /* we use a separate basic block for the initialization code */
5567 NEW_BBLOCK (cfg, init_localsbb);
5568 cfg->bb_init = init_localsbb;
5569 init_localsbb->real_offset = cfg->real_offset;
5570 start_bblock->next_bb = init_localsbb;
5571 init_localsbb->next_bb = bblock;
5572 link_bblock (cfg, start_bblock, init_localsbb);
5573 link_bblock (cfg, init_localsbb, bblock);
5575 cfg->cbb = init_localsbb;
5577 start_bblock->next_bb = bblock;
5578 link_bblock (cfg, start_bblock, bblock);
5581 /* at this point we know, if security is TRUE, that some code needs to be generated */
5582 if (security && (cfg->method == method)) {
5585 mono_jit_stats.cas_demand_generation++;
5587 if (actions.demand.blob) {
5588 /* Add code for SecurityAction.Demand */
5589 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5590 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5591 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5592 mono_emit_method_call (cfg, secman->demand, args, NULL);
5594 if (actions.noncasdemand.blob) {
5595 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5596 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5597 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5598 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5599 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5600 mono_emit_method_call (cfg, secman->demand, args, NULL);
5602 if (actions.demandchoice.blob) {
5603 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5604 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5605 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5606 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5607 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5611 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5613 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5616 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5617 /* check if this is native code, e.g. an icall or a p/invoke */
5618 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5619 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5621 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
5622 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
5624 /* if this ia a native call then it can only be JITted from platform code */
5625 if ((icall || pinvk) && method->klass && method->klass->image) {
5626 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
5627 MonoException *ex = icall ? mono_get_exception_security () :
5628 mono_get_exception_method_access ();
5629 emit_throw_exception (cfg, ex);
5636 if (header->code_size == 0)
5639 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5644 if (cfg->method == method)
5645 mono_debug_init_method (cfg, bblock, breakpoint_id);
5647 for (n = 0; n < header->num_locals; ++n) {
5648 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5653 /* We force the vtable variable here for all shared methods
5654 for the possibility that they might show up in a stack
5655 trace where their exact instantiation is needed. */
5656 if (cfg->generic_sharing_context && method == cfg->method) {
5657 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5658 mini_method_get_context (method)->method_inst ||
5659 method->klass->valuetype) {
5660 mono_get_vtable_var (cfg);
5662 /* FIXME: Is there a better way to do this?
5663 We need the variable live for the duration
5664 of the whole method. */
5665 cfg->args [0]->flags |= MONO_INST_INDIRECT;
5669 /* add a check for this != NULL to inlined methods */
5670 if (is_virtual_call) {
5673 NEW_ARGLOAD (cfg, arg_ins, 0);
5674 MONO_ADD_INS (cfg->cbb, arg_ins);
5675 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
5678 skip_dead_blocks = !dont_verify;
5679 if (skip_dead_blocks) {
5680 original_bb = bb = mono_basic_block_split (method, &error);
5681 if (!mono_error_ok (&error)) {
5682 mono_error_cleanup (&error);
5688 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5689 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5692 start_new_bblock = 0;
5695 if (cfg->method == method)
5696 cfg->real_offset = ip - header->code;
5698 cfg->real_offset = inline_offset;
5703 if (start_new_bblock) {
5704 bblock->cil_length = ip - bblock->cil_code;
5705 if (start_new_bblock == 2) {
5706 g_assert (ip == tblock->cil_code);
5708 GET_BBLOCK (cfg, tblock, ip);
5710 bblock->next_bb = tblock;
5713 start_new_bblock = 0;
5714 for (i = 0; i < bblock->in_scount; ++i) {
5715 if (cfg->verbose_level > 3)
5716 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5717 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5721 g_slist_free (class_inits);
5724 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5725 link_bblock (cfg, bblock, tblock);
5726 if (sp != stack_start) {
5727 handle_stack_args (cfg, stack_start, sp - stack_start);
5729 CHECK_UNVERIFIABLE (cfg);
5731 bblock->next_bb = tblock;
5734 for (i = 0; i < bblock->in_scount; ++i) {
5735 if (cfg->verbose_level > 3)
5736 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5737 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5740 g_slist_free (class_inits);
5745 if (skip_dead_blocks) {
5746 int ip_offset = ip - header->code;
5748 if (ip_offset == bb->end)
5752 int op_size = mono_opcode_size (ip, end);
5753 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
5755 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
5757 if (ip_offset + op_size == bb->end) {
5758 MONO_INST_NEW (cfg, ins, OP_NOP);
5759 MONO_ADD_INS (bblock, ins);
5760 start_new_bblock = 1;
5768 * Sequence points are points where the debugger can place a breakpoint.
5769 * Currently, we generate these automatically at points where the IL
5772 if (seq_points && sp == stack_start) {
5773 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
5774 MONO_ADD_INS (cfg->cbb, ins);
5777 bblock->real_offset = cfg->real_offset;
5779 if ((cfg->method == method) && cfg->coverage_info) {
5780 guint32 cil_offset = ip - header->code;
5781 cfg->coverage_info->data [cil_offset].cil_code = ip;
5783 /* TODO: Use an increment here */
5784 #if defined(TARGET_X86)
5785 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5786 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5788 MONO_ADD_INS (cfg->cbb, ins);
5790 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5791 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5795 if (cfg->verbose_level > 3)
5796 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5800 if (cfg->keep_cil_nops)
5801 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
5803 MONO_INST_NEW (cfg, ins, OP_NOP);
5805 MONO_ADD_INS (bblock, ins);
5808 if (should_insert_brekpoint (cfg->method))
5809 MONO_INST_NEW (cfg, ins, OP_BREAK);
5811 MONO_INST_NEW (cfg, ins, OP_NOP);
5813 MONO_ADD_INS (bblock, ins);
5819 CHECK_STACK_OVF (1);
5820 n = (*ip)-CEE_LDARG_0;
5822 EMIT_NEW_ARGLOAD (cfg, ins, n);
5830 CHECK_STACK_OVF (1);
5831 n = (*ip)-CEE_LDLOC_0;
5833 EMIT_NEW_LOCLOAD (cfg, ins, n);
5842 n = (*ip)-CEE_STLOC_0;
5845 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5847 emit_stloc_ir (cfg, sp, header, n);
5854 CHECK_STACK_OVF (1);
5857 EMIT_NEW_ARGLOAD (cfg, ins, n);
5863 CHECK_STACK_OVF (1);
5866 NEW_ARGLOADA (cfg, ins, n);
5867 MONO_ADD_INS (cfg->cbb, ins);
5877 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5879 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5884 CHECK_STACK_OVF (1);
5887 EMIT_NEW_LOCLOAD (cfg, ins, n);
5891 case CEE_LDLOCA_S: {
5892 unsigned char *tmp_ip;
5894 CHECK_STACK_OVF (1);
5895 CHECK_LOCAL (ip [1]);
5897 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
5903 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5912 CHECK_LOCAL (ip [1]);
5913 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5915 emit_stloc_ir (cfg, sp, header, ip [1]);
5920 CHECK_STACK_OVF (1);
5921 EMIT_NEW_PCONST (cfg, ins, NULL);
5922 ins->type = STACK_OBJ;
5927 CHECK_STACK_OVF (1);
5928 EMIT_NEW_ICONST (cfg, ins, -1);
5941 CHECK_STACK_OVF (1);
5942 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
5948 CHECK_STACK_OVF (1);
5950 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
5956 CHECK_STACK_OVF (1);
5957 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
5963 CHECK_STACK_OVF (1);
5964 MONO_INST_NEW (cfg, ins, OP_I8CONST);
5965 ins->type = STACK_I8;
5966 ins->dreg = alloc_dreg (cfg, STACK_I8);
5968 ins->inst_l = (gint64)read64 (ip);
5969 MONO_ADD_INS (bblock, ins);
5975 gboolean use_aotconst = FALSE;
5977 #ifdef TARGET_POWERPC
5978 /* FIXME: Clean this up */
5979 if (cfg->compile_aot)
5980 use_aotconst = TRUE;
5983 /* FIXME: we should really allocate this only late in the compilation process */
5984 f = mono_domain_alloc (cfg->domain, sizeof (float));
5986 CHECK_STACK_OVF (1);
5992 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
5994 dreg = alloc_freg (cfg);
5995 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
5996 ins->type = STACK_R8;
5998 MONO_INST_NEW (cfg, ins, OP_R4CONST);
5999 ins->type = STACK_R8;
6000 ins->dreg = alloc_dreg (cfg, STACK_R8);
6002 MONO_ADD_INS (bblock, ins);
6012 gboolean use_aotconst = FALSE;
6014 #ifdef TARGET_POWERPC
6015 /* FIXME: Clean this up */
6016 if (cfg->compile_aot)
6017 use_aotconst = TRUE;
6020 /* FIXME: we should really allocate this only late in the compilation process */
6021 d = mono_domain_alloc (cfg->domain, sizeof (double));
6023 CHECK_STACK_OVF (1);
6029 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
6031 dreg = alloc_freg (cfg);
6032 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
6033 ins->type = STACK_R8;
6035 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6036 ins->type = STACK_R8;
6037 ins->dreg = alloc_dreg (cfg, STACK_R8);
6039 MONO_ADD_INS (bblock, ins);
6048 MonoInst *temp, *store;
6050 CHECK_STACK_OVF (1);
6054 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
6055 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
6057 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6060 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6073 if (sp [0]->type == STACK_R8)
6074 /* we need to pop the value from the x86 FP stack */
6075 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
6084 if (stack_start != sp)
6086 token = read32 (ip + 1);
6087 /* FIXME: check the signature matches */
6088 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6093 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
6094 GENERIC_SHARING_FAILURE (CEE_JMP);
6096 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
6097 CHECK_CFG_EXCEPTION;
6099 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6101 MonoMethodSignature *fsig = mono_method_signature (cmethod);
6104 /* Handle tail calls similarly to calls */
6105 n = fsig->param_count + fsig->hasthis;
6107 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
6108 call->method = cmethod;
6109 call->tail_call = TRUE;
6110 call->signature = mono_method_signature (cmethod);
6111 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
6112 call->inst.inst_p0 = cmethod;
6113 for (i = 0; i < n; ++i)
6114 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
6116 mono_arch_emit_call (cfg, call);
6117 MONO_ADD_INS (bblock, (MonoInst*)call);
6120 for (i = 0; i < num_args; ++i)
6121 /* Prevent arguments from being optimized away */
6122 arg_array [i]->flags |= MONO_INST_VOLATILE;
6124 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6125 ins = (MonoInst*)call;
6126 ins->inst_p0 = cmethod;
6127 MONO_ADD_INS (bblock, ins);
6131 start_new_bblock = 1;
6136 case CEE_CALLVIRT: {
6137 MonoInst *addr = NULL;
6138 MonoMethodSignature *fsig = NULL;
6140 int virtual = *ip == CEE_CALLVIRT;
6141 int calli = *ip == CEE_CALLI;
6142 gboolean pass_imt_from_rgctx = FALSE;
6143 MonoInst *imt_arg = NULL;
6144 gboolean pass_vtable = FALSE;
6145 gboolean pass_mrgctx = FALSE;
6146 MonoInst *vtable_arg = NULL;
6147 gboolean check_this = FALSE;
6148 gboolean supported_tail_call = FALSE;
6151 token = read32 (ip + 1);
6158 if (method->wrapper_type != MONO_WRAPPER_NONE)
6159 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6161 fsig = mono_metadata_parse_signature (image, token);
6163 n = fsig->param_count + fsig->hasthis;
6165 if (method->dynamic && fsig->pinvoke) {
6169 * This is a call through a function pointer using a pinvoke
6170 * signature. Have to create a wrapper and call that instead.
6171 * FIXME: This is very slow, need to create a wrapper at JIT time
6172 * instead based on the signature.
6174 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
6175 EMIT_NEW_PCONST (cfg, args [1], fsig);
6177 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
6180 MonoMethod *cil_method;
6182 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6183 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6184 cil_method = cmethod;
6185 } else if (constrained_call) {
6186 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6188 * This is needed since get_method_constrained can't find
6189 * the method in klass representing a type var.
6190 * The type var is guaranteed to be a reference type in this
6193 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6194 cil_method = cmethod;
6195 g_assert (!cmethod->klass->valuetype);
6197 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6200 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6201 cil_method = cmethod;
6206 if (!dont_verify && !cfg->skip_visibility) {
6207 MonoMethod *target_method = cil_method;
6208 if (method->is_inflated) {
6209 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6211 if (!mono_method_can_access_method (method_definition, target_method) &&
6212 !mono_method_can_access_method (method, cil_method))
6213 METHOD_ACCESS_FAILURE;
6216 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6217 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6219 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6220 /* MS.NET seems to silently convert this to a callvirt */
6225 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
6226 * converts to a callvirt.
6228 * tests/bug-515884.il is an example of this behavior
6230 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
6231 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
6232 if (!virtual && cmethod->klass->marshalbyref && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
6236 if (!cmethod->klass->inited)
6237 if (!mono_class_init (cmethod->klass))
6240 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6241 mini_class_is_system_array (cmethod->klass)) {
6242 array_rank = cmethod->klass->rank;
6243 fsig = mono_method_signature (cmethod);
6245 fsig = mono_method_signature (cmethod);
6250 if (fsig->pinvoke) {
6251 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6252 check_for_pending_exc, FALSE);
6253 fsig = mono_method_signature (wrapper);
6254 } else if (constrained_call) {
6255 fsig = mono_method_signature (cmethod);
6257 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6261 mono_save_token_info (cfg, image, token, cil_method);
6263 n = fsig->param_count + fsig->hasthis;
6265 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6266 if (check_linkdemand (cfg, method, cmethod))
6268 CHECK_CFG_EXCEPTION;
6271 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6272 g_assert_not_reached ();
6275 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6278 if (!cfg->generic_sharing_context && cmethod)
6279 g_assert (!mono_method_check_context_used (cmethod));
6283 //g_assert (!virtual || fsig->hasthis);
6287 if (constrained_call) {
6289 * We have the `constrained.' prefix opcode.
6291 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6293 * The type parameter is instantiated as a valuetype,
6294 * but that type doesn't override the method we're
6295 * calling, so we need to box `this'.
6297 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
6298 ins->klass = constrained_call;
6299 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call));
6300 CHECK_CFG_EXCEPTION;
6301 } else if (!constrained_call->valuetype) {
6302 int dreg = alloc_preg (cfg);
6305 * The type parameter is instantiated as a reference
6306 * type. We have a managed pointer on the stack, so
6307 * we need to dereference it here.
6309 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6310 ins->type = STACK_OBJ;
6312 } else if (cmethod->klass->valuetype)
6314 constrained_call = NULL;
6317 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6321 * If the callee is a shared method, then its static cctor
6322 * might not get called after the call was patched.
6324 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6325 emit_generic_class_init (cfg, cmethod->klass);
6326 CHECK_TYPELOAD (cmethod->klass);
6329 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6330 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6331 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6332 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6333 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6336 * Pass vtable iff target method might
6337 * be shared, which means that sharing
6338 * is enabled for its class and its
6339 * context is sharable (and it's not a
6342 if (sharing_enabled && context_sharable &&
6343 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6347 if (cmethod && mini_method_get_context (cmethod) &&
6348 mini_method_get_context (cmethod)->method_inst) {
6349 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6350 MonoGenericContext *context = mini_method_get_context (cmethod);
6351 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6353 g_assert (!pass_vtable);
6355 if (sharing_enabled && context_sharable)
6359 if (cfg->generic_sharing_context && cmethod) {
6360 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6362 context_used = mono_method_check_context_used (cmethod);
6364 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6365 /* Generic method interface
6366 calls are resolved via a
6367 helper function and don't
6369 if (!cmethod_context || !cmethod_context->method_inst)
6370 pass_imt_from_rgctx = TRUE;
6374 * If a shared method calls another
6375 * shared method then the caller must
6376 * have a generic sharing context
6377 * because the magic trampoline
6378 * requires it. FIXME: We shouldn't
6379 * have to force the vtable/mrgctx
6380 * variable here. Instead there
6381 * should be a flag in the cfg to
6382 * request a generic sharing context.
6385 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6386 mono_get_vtable_var (cfg);
6391 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6393 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6395 CHECK_TYPELOAD (cmethod->klass);
6396 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6401 g_assert (!vtable_arg);
6403 if (!cfg->compile_aot) {
6405 * emit_get_rgctx_method () calls mono_class_vtable () so check
6406 * for type load errors before.
6408 mono_class_setup_vtable (cmethod->klass);
6409 CHECK_TYPELOAD (cmethod->klass);
6412 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6414 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6415 MONO_METHOD_IS_FINAL (cmethod)) {
6422 if (pass_imt_from_rgctx) {
6423 g_assert (!pass_vtable);
6426 imt_arg = emit_get_rgctx_method (cfg, context_used,
6427 cmethod, MONO_RGCTX_INFO_METHOD);
6431 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
6433 /* Calling virtual generic methods */
6434 if (cmethod && virtual &&
6435 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6436 !(MONO_METHOD_IS_FINAL (cmethod) &&
6437 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6438 mono_method_signature (cmethod)->generic_param_count) {
6439 MonoInst *this_temp, *this_arg_temp, *store;
6440 MonoInst *iargs [4];
6442 g_assert (mono_method_signature (cmethod)->is_inflated);
6444 /* Prevent inlining of methods that contain indirect calls */
6447 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
6448 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt) {
6449 g_assert (!imt_arg);
6451 g_assert (cmethod->is_inflated);
6452 imt_arg = emit_get_rgctx_method (cfg, context_used,
6453 cmethod, MONO_RGCTX_INFO_METHOD);
6454 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6458 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6459 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6460 MONO_ADD_INS (bblock, store);
6462 /* FIXME: This should be a managed pointer */
6463 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6465 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6466 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6467 cmethod, MONO_RGCTX_INFO_METHOD);
6468 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6469 addr = mono_emit_jit_icall (cfg,
6470 mono_helper_compile_generic_method, iargs);
6472 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6474 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6477 if (!MONO_TYPE_IS_VOID (fsig->ret))
6478 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6480 CHECK_CFG_EXCEPTION;
6487 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6488 supported_tail_call = cmethod && MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
6490 supported_tail_call = cmethod && mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6494 /* FIXME: runtime generic context pointer for jumps? */
6495 /* FIXME: handle this for generic sharing eventually */
6496 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) && supported_tail_call) {
6499 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6502 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6503 /* Handle tail calls similarly to calls */
6504 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE);
6506 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6507 call->tail_call = TRUE;
6508 call->method = cmethod;
6509 call->signature = mono_method_signature (cmethod);
6512 * We implement tail calls by storing the actual arguments into the
6513 * argument variables, then emitting a CEE_JMP.
6515 for (i = 0; i < n; ++i) {
6516 /* Prevent argument from being register allocated */
6517 arg_array [i]->flags |= MONO_INST_VOLATILE;
6518 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6522 ins = (MonoInst*)call;
6523 ins->inst_p0 = cmethod;
6524 ins->inst_p1 = arg_array [0];
6525 MONO_ADD_INS (bblock, ins);
6526 link_bblock (cfg, bblock, end_bblock);
6527 start_new_bblock = 1;
6529 CHECK_CFG_EXCEPTION;
6531 /* skip CEE_RET as well */
6537 /* Conversion to a JIT intrinsic */
6538 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6539 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6540 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6545 CHECK_CFG_EXCEPTION;
6553 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6554 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6555 mono_method_check_inlining (cfg, cmethod) &&
6556 !g_list_find (dont_inline, cmethod)) {
6558 gboolean allways = FALSE;
6560 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6561 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6562 /* Prevent inlining of methods that call wrappers */
6564 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6568 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6570 cfg->real_offset += 5;
6573 if (!MONO_TYPE_IS_VOID (fsig->ret))
6574 /* *sp is already set by inline_method */
6577 inline_costs += costs;
6583 inline_costs += 10 * num_calls++;
6585 /* Tail recursion elimination */
6586 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6587 gboolean has_vtargs = FALSE;
6590 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6593 /* keep it simple */
6594 for (i = fsig->param_count - 1; i >= 0; i--) {
6595 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6600 for (i = 0; i < n; ++i)
6601 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6602 MONO_INST_NEW (cfg, ins, OP_BR);
6603 MONO_ADD_INS (bblock, ins);
6604 tblock = start_bblock->out_bb [0];
6605 link_bblock (cfg, bblock, tblock);
6606 ins->inst_target_bb = tblock;
6607 start_new_bblock = 1;
6609 /* skip the CEE_RET, too */
6610 if (ip_in_bb (cfg, bblock, ip + 5))
6620 /* Generic sharing */
6621 /* FIXME: only do this for generic methods if
6622 they are not shared! */
6623 if (context_used && !imt_arg && !array_rank &&
6624 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6625 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6626 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6627 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6630 g_assert (cfg->generic_sharing_context && cmethod);
6634 * We are compiling a call to a
6635 * generic method from shared code,
6636 * which means that we have to look up
6637 * the method in the rgctx and do an
6640 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6643 /* Indirect calls */
6645 g_assert (!imt_arg);
6647 if (*ip == CEE_CALL)
6648 g_assert (context_used);
6649 else if (*ip == CEE_CALLI)
6650 g_assert (!vtable_arg);
6652 /* FIXME: what the hell is this??? */
6653 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6654 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6656 /* Prevent inlining of methods with indirect calls */
6661 int rgctx_reg = mono_alloc_preg (cfg);
6663 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6664 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6665 call = (MonoCallInst*)ins;
6666 set_rgctx_arg (cfg, call, rgctx_reg, vtable_arg);
6668 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6670 * Instead of emitting an indirect call, emit a direct call
6671 * with the contents of the aotconst as the patch info.
6673 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
6675 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6676 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
6679 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6682 if (!MONO_TYPE_IS_VOID (fsig->ret))
6683 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6685 CHECK_CFG_EXCEPTION;
6696 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6697 if (sp [fsig->param_count]->type == STACK_OBJ) {
6698 MonoInst *iargs [2];
6701 iargs [1] = sp [fsig->param_count];
6703 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6706 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6707 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6708 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6709 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6711 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6714 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6715 if (!cmethod->klass->element_class->valuetype && !readonly)
6716 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6717 CHECK_TYPELOAD (cmethod->klass);
6720 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6723 g_assert_not_reached ();
6726 CHECK_CFG_EXCEPTION;
6733 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6735 if (!MONO_TYPE_IS_VOID (fsig->ret))
6736 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6738 CHECK_CFG_EXCEPTION;
6748 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6750 } else if (imt_arg) {
6751 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6753 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6756 if (!MONO_TYPE_IS_VOID (fsig->ret))
6757 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6759 CHECK_CFG_EXCEPTION;
6766 if (cfg->method != method) {
6767 /* return from inlined method */
6769 * If in_count == 0, that means the ret is unreachable due to
6770 * being preceeded by a throw. In that case, inline_method () will
6771 * handle setting the return value
6772 * (test case: test_0_inline_throw ()).
6774 if (return_var && cfg->cbb->in_count) {
6778 //g_assert (returnvar != -1);
6779 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6780 cfg->ret_var_set = TRUE;
6784 MonoType *ret_type = mono_method_signature (method)->ret;
6788 * Place a seq point here too even through the IL stack is not
6789 * empty, so a step over on
6792 * will work correctly.
6794 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
6795 MONO_ADD_INS (cfg->cbb, ins);
6798 g_assert (!return_var);
6801 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6804 if (!cfg->vret_addr) {
6807 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6809 EMIT_NEW_RETLOADA (cfg, ret_addr);
6811 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6812 ins->klass = mono_class_from_mono_type (ret_type);
6815 #ifdef MONO_ARCH_SOFT_FLOAT
6816 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6817 MonoInst *iargs [1];
6821 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6822 mono_arch_emit_setret (cfg, method, conv);
6824 mono_arch_emit_setret (cfg, method, *sp);
6827 mono_arch_emit_setret (cfg, method, *sp);
6832 if (sp != stack_start)
6834 MONO_INST_NEW (cfg, ins, OP_BR);
6836 ins->inst_target_bb = end_bblock;
6837 MONO_ADD_INS (bblock, ins);
6838 link_bblock (cfg, bblock, end_bblock);
6839 start_new_bblock = 1;
6843 MONO_INST_NEW (cfg, ins, OP_BR);
6845 target = ip + 1 + (signed char)(*ip);
6847 GET_BBLOCK (cfg, tblock, target);
6848 link_bblock (cfg, bblock, tblock);
6849 ins->inst_target_bb = tblock;
6850 if (sp != stack_start) {
6851 handle_stack_args (cfg, stack_start, sp - stack_start);
6853 CHECK_UNVERIFIABLE (cfg);
6855 MONO_ADD_INS (bblock, ins);
6856 start_new_bblock = 1;
6857 inline_costs += BRANCH_COST;
6871 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6873 target = ip + 1 + *(signed char*)ip;
6879 inline_costs += BRANCH_COST;
6883 MONO_INST_NEW (cfg, ins, OP_BR);
6886 target = ip + 4 + (gint32)read32(ip);
6888 GET_BBLOCK (cfg, tblock, target);
6889 link_bblock (cfg, bblock, tblock);
6890 ins->inst_target_bb = tblock;
6891 if (sp != stack_start) {
6892 handle_stack_args (cfg, stack_start, sp - stack_start);
6894 CHECK_UNVERIFIABLE (cfg);
6897 MONO_ADD_INS (bblock, ins);
6899 start_new_bblock = 1;
6900 inline_costs += BRANCH_COST;
6907 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6908 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6909 guint32 opsize = is_short ? 1 : 4;
6911 CHECK_OPSIZE (opsize);
6913 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6916 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6921 GET_BBLOCK (cfg, tblock, target);
6922 link_bblock (cfg, bblock, tblock);
6923 GET_BBLOCK (cfg, tblock, ip);
6924 link_bblock (cfg, bblock, tblock);
6926 if (sp != stack_start) {
6927 handle_stack_args (cfg, stack_start, sp - stack_start);
6928 CHECK_UNVERIFIABLE (cfg);
6931 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6932 cmp->sreg1 = sp [0]->dreg;
6933 type_from_op (cmp, sp [0], NULL);
6936 #if SIZEOF_REGISTER == 4
6937 if (cmp->opcode == OP_LCOMPARE_IMM) {
6938 /* Convert it to OP_LCOMPARE */
6939 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6940 ins->type = STACK_I8;
6941 ins->dreg = alloc_dreg (cfg, STACK_I8);
6943 MONO_ADD_INS (bblock, ins);
6944 cmp->opcode = OP_LCOMPARE;
6945 cmp->sreg2 = ins->dreg;
6948 MONO_ADD_INS (bblock, cmp);
6950 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
6951 type_from_op (ins, sp [0], NULL);
6952 MONO_ADD_INS (bblock, ins);
6953 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
6954 GET_BBLOCK (cfg, tblock, target);
6955 ins->inst_true_bb = tblock;
6956 GET_BBLOCK (cfg, tblock, ip);
6957 ins->inst_false_bb = tblock;
6958 start_new_bblock = 2;
6961 inline_costs += BRANCH_COST;
6976 MONO_INST_NEW (cfg, ins, *ip);
6978 target = ip + 4 + (gint32)read32(ip);
6984 inline_costs += BRANCH_COST;
6988 MonoBasicBlock **targets;
6989 MonoBasicBlock *default_bblock;
6990 MonoJumpInfoBBTable *table;
6991 int offset_reg = alloc_preg (cfg);
6992 int target_reg = alloc_preg (cfg);
6993 int table_reg = alloc_preg (cfg);
6994 int sum_reg = alloc_preg (cfg);
6995 gboolean use_op_switch;
6999 n = read32 (ip + 1);
7002 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
7006 CHECK_OPSIZE (n * sizeof (guint32));
7007 target = ip + n * sizeof (guint32);
7009 GET_BBLOCK (cfg, default_bblock, target);
7011 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
7012 for (i = 0; i < n; ++i) {
7013 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
7014 targets [i] = tblock;
7018 if (sp != stack_start) {
7020 * Link the current bb with the targets as well, so handle_stack_args
7021 * will set their in_stack correctly.
7023 link_bblock (cfg, bblock, default_bblock);
7024 for (i = 0; i < n; ++i)
7025 link_bblock (cfg, bblock, targets [i]);
7027 handle_stack_args (cfg, stack_start, sp - stack_start);
7029 CHECK_UNVERIFIABLE (cfg);
7032 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
7033 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
7036 for (i = 0; i < n; ++i)
7037 link_bblock (cfg, bblock, targets [i]);
7039 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
7040 table->table = targets;
7041 table->table_size = n;
7043 use_op_switch = FALSE;
7045 /* ARM implements SWITCH statements differently */
7046 /* FIXME: Make it use the generic implementation */
7047 if (!cfg->compile_aot)
7048 use_op_switch = TRUE;
7051 if (COMPILE_LLVM (cfg))
7052 use_op_switch = TRUE;
7054 cfg->cbb->has_jump_table = 1;
7056 if (use_op_switch) {
7057 MONO_INST_NEW (cfg, ins, OP_SWITCH);
7058 ins->sreg1 = src1->dreg;
7059 ins->inst_p0 = table;
7060 ins->inst_many_bb = targets;
7061 ins->klass = GUINT_TO_POINTER (n);
7062 MONO_ADD_INS (cfg->cbb, ins);
7064 if (sizeof (gpointer) == 8)
7065 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
7067 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
7069 #if SIZEOF_REGISTER == 8
7070 /* The upper word might not be zero, and we add it to a 64 bit address later */
7071 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
7074 if (cfg->compile_aot) {
7075 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
7077 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
7078 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
7079 ins->inst_p0 = table;
7080 ins->dreg = table_reg;
7081 MONO_ADD_INS (cfg->cbb, ins);
7084 /* FIXME: Use load_memindex */
7085 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
7086 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
7087 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
7089 start_new_bblock = 1;
7090 inline_costs += (BRANCH_COST * 2);
7110 dreg = alloc_freg (cfg);
7113 dreg = alloc_lreg (cfg);
7116 dreg = alloc_preg (cfg);
7119 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
7120 ins->type = ldind_type [*ip - CEE_LDIND_I1];
7121 ins->flags |= ins_flag;
7123 MONO_ADD_INS (bblock, ins);
7138 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
7139 ins->flags |= ins_flag;
7141 MONO_ADD_INS (bblock, ins);
7143 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0))) {
7144 MonoInst *dummy_use;
7145 /* insert call to write barrier */
7146 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
7147 mono_emit_method_call (cfg, write_barrier, sp, NULL);
7148 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
7158 MONO_INST_NEW (cfg, ins, (*ip));
7160 ins->sreg1 = sp [0]->dreg;
7161 ins->sreg2 = sp [1]->dreg;
7162 type_from_op (ins, sp [0], sp [1]);
7164 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7166 /* Use the immediate opcodes if possible */
7167 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
7168 int imm_opcode = mono_op_to_op_imm (ins->opcode);
7169 if (imm_opcode != -1) {
7170 ins->opcode = imm_opcode;
7171 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7174 sp [1]->opcode = OP_NOP;
7178 MONO_ADD_INS ((cfg)->cbb, (ins));
7180 *sp++ = mono_decompose_opcode (cfg, ins);
7197 MONO_INST_NEW (cfg, ins, (*ip));
7199 ins->sreg1 = sp [0]->dreg;
7200 ins->sreg2 = sp [1]->dreg;
7201 type_from_op (ins, sp [0], sp [1]);
7203 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7204 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7206 /* FIXME: Pass opcode to is_inst_imm */
7208 /* Use the immediate opcodes if possible */
7209 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7212 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7213 if (imm_opcode != -1) {
7214 ins->opcode = imm_opcode;
7215 if (sp [1]->opcode == OP_I8CONST) {
7216 #if SIZEOF_REGISTER == 8
7217 ins->inst_imm = sp [1]->inst_l;
7219 ins->inst_ls_word = sp [1]->inst_ls_word;
7220 ins->inst_ms_word = sp [1]->inst_ms_word;
7224 ins->inst_imm = (gssize)(sp [1]->inst_c0);
7227 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7228 if (sp [1]->next == NULL)
7229 sp [1]->opcode = OP_NOP;
7232 MONO_ADD_INS ((cfg)->cbb, (ins));
7234 *sp++ = mono_decompose_opcode (cfg, ins);
7247 case CEE_CONV_OVF_I8:
7248 case CEE_CONV_OVF_U8:
7252 /* Special case this earlier so we have long constants in the IR */
7253 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7254 int data = sp [-1]->inst_c0;
7255 sp [-1]->opcode = OP_I8CONST;
7256 sp [-1]->type = STACK_I8;
7257 #if SIZEOF_REGISTER == 8
7258 if ((*ip) == CEE_CONV_U8)
7259 sp [-1]->inst_c0 = (guint32)data;
7261 sp [-1]->inst_c0 = data;
7263 sp [-1]->inst_ls_word = data;
7264 if ((*ip) == CEE_CONV_U8)
7265 sp [-1]->inst_ms_word = 0;
7267 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7269 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7276 case CEE_CONV_OVF_I4:
7277 case CEE_CONV_OVF_I1:
7278 case CEE_CONV_OVF_I2:
7279 case CEE_CONV_OVF_I:
7280 case CEE_CONV_OVF_U:
7283 if (sp [-1]->type == STACK_R8) {
7284 ADD_UNOP (CEE_CONV_OVF_I8);
7291 case CEE_CONV_OVF_U1:
7292 case CEE_CONV_OVF_U2:
7293 case CEE_CONV_OVF_U4:
7296 if (sp [-1]->type == STACK_R8) {
7297 ADD_UNOP (CEE_CONV_OVF_U8);
7304 case CEE_CONV_OVF_I1_UN:
7305 case CEE_CONV_OVF_I2_UN:
7306 case CEE_CONV_OVF_I4_UN:
7307 case CEE_CONV_OVF_I8_UN:
7308 case CEE_CONV_OVF_U1_UN:
7309 case CEE_CONV_OVF_U2_UN:
7310 case CEE_CONV_OVF_U4_UN:
7311 case CEE_CONV_OVF_U8_UN:
7312 case CEE_CONV_OVF_I_UN:
7313 case CEE_CONV_OVF_U_UN:
7320 CHECK_CFG_EXCEPTION;
7324 case CEE_ADD_OVF_UN:
7326 case CEE_MUL_OVF_UN:
7328 case CEE_SUB_OVF_UN:
7336 token = read32 (ip + 1);
7337 klass = mini_get_class (method, token, generic_context);
7338 CHECK_TYPELOAD (klass);
7340 if (generic_class_is_reference_type (cfg, klass)) {
7341 MonoInst *store, *load;
7342 int dreg = alloc_preg (cfg);
7344 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7345 load->flags |= ins_flag;
7346 MONO_ADD_INS (cfg->cbb, load);
7348 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7349 store->flags |= ins_flag;
7350 MONO_ADD_INS (cfg->cbb, store);
7352 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER) {
7353 MonoInst *dummy_use;
7354 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
7355 mono_emit_method_call (cfg, write_barrier, sp, NULL);
7356 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
7359 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7371 token = read32 (ip + 1);
7372 klass = mini_get_class (method, token, generic_context);
7373 CHECK_TYPELOAD (klass);
7375 /* Optimize the common ldobj+stloc combination */
7385 loc_index = ip [5] - CEE_STLOC_0;
7392 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7393 CHECK_LOCAL (loc_index);
7395 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7396 ins->dreg = cfg->locals [loc_index]->dreg;
7402 /* Optimize the ldobj+stobj combination */
7403 /* The reference case ends up being a load+store anyway */
7404 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7409 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7416 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7425 CHECK_STACK_OVF (1);
7427 n = read32 (ip + 1);
7429 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7430 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7431 ins->type = STACK_OBJ;
7434 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7435 MonoInst *iargs [1];
7437 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7438 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7440 if (cfg->opt & MONO_OPT_SHARED) {
7441 MonoInst *iargs [3];
7443 if (cfg->compile_aot) {
7444 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7446 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7447 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7448 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7449 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7450 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7452 if (bblock->out_of_line) {
7453 MonoInst *iargs [2];
7455 if (image == mono_defaults.corlib) {
7457 * Avoid relocations in AOT and save some space by using a
7458 * version of helper_ldstr specialized to mscorlib.
7460 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7461 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7463 /* Avoid creating the string object */
7464 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7465 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7466 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7470 if (cfg->compile_aot) {
7471 NEW_LDSTRCONST (cfg, ins, image, n);
7473 MONO_ADD_INS (bblock, ins);
7476 NEW_PCONST (cfg, ins, NULL);
7477 ins->type = STACK_OBJ;
7478 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7480 MONO_ADD_INS (bblock, ins);
7489 MonoInst *iargs [2];
7490 MonoMethodSignature *fsig;
7493 MonoInst *vtable_arg = NULL;
7496 token = read32 (ip + 1);
7497 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7500 fsig = mono_method_get_signature (cmethod, image, token);
7504 mono_save_token_info (cfg, image, token, cmethod);
7506 if (!mono_class_init (cmethod->klass))
7509 if (cfg->generic_sharing_context)
7510 context_used = mono_method_check_context_used (cmethod);
7512 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7513 if (check_linkdemand (cfg, method, cmethod))
7515 CHECK_CFG_EXCEPTION;
7516 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7517 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7520 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7521 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7522 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7523 mono_class_vtable (cfg->domain, cmethod->klass);
7524 CHECK_TYPELOAD (cmethod->klass);
7526 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7527 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7530 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7531 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7533 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7535 CHECK_TYPELOAD (cmethod->klass);
7536 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7541 n = fsig->param_count;
7545 * Generate smaller code for the common newobj <exception> instruction in
7546 * argument checking code.
7548 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7549 is_exception_class (cmethod->klass) && n <= 2 &&
7550 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7551 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7552 MonoInst *iargs [3];
7554 g_assert (!vtable_arg);
7558 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7561 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7565 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7570 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7573 g_assert_not_reached ();
7581 /* move the args to allow room for 'this' in the first position */
7587 /* check_call_signature () requires sp[0] to be set */
7588 this_ins.type = STACK_OBJ;
7590 if (check_call_signature (cfg, fsig, sp))
7595 if (mini_class_is_system_array (cmethod->klass)) {
7596 g_assert (!vtable_arg);
7598 *sp = emit_get_rgctx_method (cfg, context_used,
7599 cmethod, MONO_RGCTX_INFO_METHOD);
7601 /* Avoid varargs in the common case */
7602 if (fsig->param_count == 1)
7603 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7604 else if (fsig->param_count == 2)
7605 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7606 else if (fsig->param_count == 3)
7607 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
7609 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7610 } else if (cmethod->string_ctor) {
7611 g_assert (!context_used);
7612 g_assert (!vtable_arg);
7613 /* we simply pass a null pointer */
7614 EMIT_NEW_PCONST (cfg, *sp, NULL);
7615 /* now call the string ctor */
7616 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7618 MonoInst* callvirt_this_arg = NULL;
7620 if (cmethod->klass->valuetype) {
7621 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7622 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7623 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7628 * The code generated by mini_emit_virtual_call () expects
7629 * iargs [0] to be a boxed instance, but luckily the vcall
7630 * will be transformed into a normal call there.
7632 } else if (context_used) {
7633 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
7636 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7638 CHECK_TYPELOAD (cmethod->klass);
7641 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7642 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7643 * As a workaround, we call class cctors before allocating objects.
7645 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7646 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7647 if (cfg->verbose_level > 2)
7648 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7649 class_inits = g_slist_prepend (class_inits, vtable);
7652 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
7655 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
7658 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7660 /* Now call the actual ctor */
7661 /* Avoid virtual calls to ctors if possible */
7662 if (cmethod->klass->marshalbyref)
7663 callvirt_this_arg = sp [0];
7666 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7667 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7668 type_to_eval_stack_type ((cfg), fsig->ret, ins);
7673 CHECK_CFG_EXCEPTION;
7678 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7679 mono_method_check_inlining (cfg, cmethod) &&
7680 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7681 !g_list_find (dont_inline, cmethod)) {
7684 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7685 cfg->real_offset += 5;
7688 inline_costs += costs - 5;
7691 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7693 } else if (context_used &&
7694 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7695 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7696 MonoInst *cmethod_addr;
7698 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7699 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7701 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7704 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7705 callvirt_this_arg, NULL, vtable_arg);
7709 if (alloc == NULL) {
7711 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7712 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7726 token = read32 (ip + 1);
7727 klass = mini_get_class (method, token, generic_context);
7728 CHECK_TYPELOAD (klass);
7729 if (sp [0]->type != STACK_OBJ)
7732 if (cfg->generic_sharing_context)
7733 context_used = mono_class_check_context_used (klass);
7735 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7742 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7744 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7748 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7749 MonoMethod *mono_castclass;
7750 MonoInst *iargs [1];
7753 mono_castclass = mono_marshal_get_castclass (klass);
7756 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7757 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7758 g_assert (costs > 0);
7761 cfg->real_offset += 5;
7766 inline_costs += costs;
7769 ins = handle_castclass (cfg, klass, *sp, context_used);
7770 CHECK_CFG_EXCEPTION;
7780 token = read32 (ip + 1);
7781 klass = mini_get_class (method, token, generic_context);
7782 CHECK_TYPELOAD (klass);
7783 if (sp [0]->type != STACK_OBJ)
7786 if (cfg->generic_sharing_context)
7787 context_used = mono_class_check_context_used (klass);
7789 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7796 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7798 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7802 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7803 MonoMethod *mono_isinst;
7804 MonoInst *iargs [1];
7807 mono_isinst = mono_marshal_get_isinst (klass);
7810 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7811 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7812 g_assert (costs > 0);
7815 cfg->real_offset += 5;
7820 inline_costs += costs;
7823 ins = handle_isinst (cfg, klass, *sp, context_used);
7824 CHECK_CFG_EXCEPTION;
7831 case CEE_UNBOX_ANY: {
7835 token = read32 (ip + 1);
7836 klass = mini_get_class (method, token, generic_context);
7837 CHECK_TYPELOAD (klass);
7839 mono_save_token_info (cfg, image, token, klass);
7841 if (cfg->generic_sharing_context)
7842 context_used = mono_class_check_context_used (klass);
7844 if (generic_class_is_reference_type (cfg, klass)) {
7845 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
7847 MonoInst *iargs [2];
7852 iargs [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7853 ins = mono_emit_jit_icall (cfg, mono_object_castclass, iargs);
7857 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7858 MonoMethod *mono_castclass;
7859 MonoInst *iargs [1];
7862 mono_castclass = mono_marshal_get_castclass (klass);
7865 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7866 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7868 g_assert (costs > 0);
7871 cfg->real_offset += 5;
7875 inline_costs += costs;
7877 ins = handle_castclass (cfg, klass, *sp, 0);
7878 CHECK_CFG_EXCEPTION;
7886 if (mono_class_is_nullable (klass)) {
7887 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
7894 ins = handle_unbox (cfg, klass, sp, context_used);
7900 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7913 token = read32 (ip + 1);
7914 klass = mini_get_class (method, token, generic_context);
7915 CHECK_TYPELOAD (klass);
7917 mono_save_token_info (cfg, image, token, klass);
7919 if (cfg->generic_sharing_context)
7920 context_used = mono_class_check_context_used (klass);
7922 if (generic_class_is_reference_type (cfg, klass)) {
7928 if (klass == mono_defaults.void_class)
7930 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7932 /* frequent check in generic code: box (struct), brtrue */
7933 if (!mono_class_is_nullable (klass) &&
7934 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) && (ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S)) {
7935 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7937 MONO_INST_NEW (cfg, ins, OP_BR);
7938 if (*ip == CEE_BRTRUE_S) {
7941 target = ip + 1 + (signed char)(*ip);
7946 target = ip + 4 + (gint)(read32 (ip));
7949 GET_BBLOCK (cfg, tblock, target);
7950 link_bblock (cfg, bblock, tblock);
7951 ins->inst_target_bb = tblock;
7952 GET_BBLOCK (cfg, tblock, ip);
7954 * This leads to some inconsistency, since the two bblocks are
7955 * not really connected, but it is needed for handling stack
7956 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
7957 * FIXME: This should only be needed if sp != stack_start, but that
7958 * doesn't work for some reason (test failure in mcs/tests on x86).
7960 link_bblock (cfg, bblock, tblock);
7961 if (sp != stack_start) {
7962 handle_stack_args (cfg, stack_start, sp - stack_start);
7964 CHECK_UNVERIFIABLE (cfg);
7966 MONO_ADD_INS (bblock, ins);
7967 start_new_bblock = 1;
7971 *sp++ = handle_box (cfg, val, klass, context_used);
7973 CHECK_CFG_EXCEPTION;
7982 token = read32 (ip + 1);
7983 klass = mini_get_class (method, token, generic_context);
7984 CHECK_TYPELOAD (klass);
7986 mono_save_token_info (cfg, image, token, klass);
7988 if (cfg->generic_sharing_context)
7989 context_used = mono_class_check_context_used (klass);
7991 if (mono_class_is_nullable (klass)) {
7994 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
7995 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
7999 ins = handle_unbox (cfg, klass, sp, context_used);
8009 MonoClassField *field;
8013 if (*ip == CEE_STFLD) {
8020 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
8022 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
8025 token = read32 (ip + 1);
8026 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8027 field = mono_method_get_wrapper_data (method, token);
8028 klass = field->parent;
8031 field = mono_field_from_token (image, token, &klass, generic_context);
8035 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8036 FIELD_ACCESS_FAILURE;
8037 mono_class_init (klass);
8039 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
8040 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
8041 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8042 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8045 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
8046 if (*ip == CEE_STFLD) {
8047 if (target_type_is_incompatible (cfg, field->type, sp [1]))
8049 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8050 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
8051 MonoInst *iargs [5];
8054 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8055 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8056 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
8060 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8061 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
8062 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8063 g_assert (costs > 0);
8065 cfg->real_offset += 5;
8068 inline_costs += costs;
8070 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
8075 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8077 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
8078 if (sp [0]->opcode != OP_LDADDR)
8079 store->flags |= MONO_INST_FAULT;
8081 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
8082 /* insert call to write barrier */
8083 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
8084 MonoInst *iargs [2], *dummy_use;
8087 dreg = alloc_preg (cfg);
8088 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8090 mono_emit_method_call (cfg, write_barrier, iargs, NULL);
8092 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
8095 store->flags |= ins_flag;
8102 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8103 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
8104 MonoInst *iargs [4];
8107 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8108 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8109 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
8110 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8111 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
8112 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8114 g_assert (costs > 0);
8116 cfg->real_offset += 5;
8120 inline_costs += costs;
8122 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
8126 if (sp [0]->type == STACK_VTYPE) {
8129 /* Have to compute the address of the variable */
8131 var = get_vreg_to_inst (cfg, sp [0]->dreg);
8133 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
8135 g_assert (var->klass == klass);
8137 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
8141 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8143 if (*ip == CEE_LDFLDA) {
8144 dreg = alloc_preg (cfg);
8146 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8147 ins->klass = mono_class_from_mono_type (field->type);
8148 ins->type = STACK_MP;
8153 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
8154 load->flags |= ins_flag;
8155 if (sp [0]->opcode != OP_LDADDR)
8156 load->flags |= MONO_INST_FAULT;
8167 MonoClassField *field;
8168 gpointer addr = NULL;
8169 gboolean is_special_static;
8172 token = read32 (ip + 1);
8174 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8175 field = mono_method_get_wrapper_data (method, token);
8176 klass = field->parent;
8179 field = mono_field_from_token (image, token, &klass, generic_context);
8182 mono_class_init (klass);
8183 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8184 FIELD_ACCESS_FAILURE;
8186 /* if the class is Critical then transparent code cannot access it's fields */
8187 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8188 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8191 * We can only support shared generic static
8192 * field access on architectures where the
8193 * trampoline code has been extended to handle
8194 * the generic class init.
8196 #ifndef MONO_ARCH_VTABLE_REG
8197 GENERIC_SHARING_FAILURE (*ip);
8200 if (cfg->generic_sharing_context)
8201 context_used = mono_class_check_context_used (klass);
8203 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
8205 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8206 * to be called here.
8208 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8209 mono_class_vtable (cfg->domain, klass);
8210 CHECK_TYPELOAD (klass);
8212 mono_domain_lock (cfg->domain);
8213 if (cfg->domain->special_static_fields)
8214 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8215 mono_domain_unlock (cfg->domain);
8217 is_special_static = mono_class_field_is_special_static (field);
8219 /* Generate IR to compute the field address */
8220 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
8222 * Fast access to TLS data
8223 * Inline version of get_thread_static_data () in
8227 int idx, static_data_reg, array_reg, dreg;
8228 MonoInst *thread_ins;
8230 // offset &= 0x7fffffff;
8231 // idx = (offset >> 24) - 1;
8232 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
8234 thread_ins = mono_get_thread_intrinsic (cfg);
8235 MONO_ADD_INS (cfg->cbb, thread_ins);
8236 static_data_reg = alloc_ireg (cfg);
8237 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
8239 if (cfg->compile_aot) {
8240 int offset_reg, offset2_reg, idx_reg;
8242 /* For TLS variables, this will return the TLS offset */
8243 EMIT_NEW_SFLDACONST (cfg, ins, field);
8244 offset_reg = ins->dreg;
8245 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
8246 idx_reg = alloc_ireg (cfg);
8247 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
8248 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
8249 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
8250 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
8251 array_reg = alloc_ireg (cfg);
8252 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
8253 offset2_reg = alloc_ireg (cfg);
8254 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
8255 dreg = alloc_ireg (cfg);
8256 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
8258 offset = (gsize)addr & 0x7fffffff;
8259 idx = (offset >> 24) - 1;
8261 array_reg = alloc_ireg (cfg);
8262 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
8263 dreg = alloc_ireg (cfg);
8264 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
8266 } else if ((cfg->opt & MONO_OPT_SHARED) ||
8267 (cfg->compile_aot && is_special_static) ||
8268 (context_used && is_special_static)) {
8269 MonoInst *iargs [2];
8271 g_assert (field->parent);
8272 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8274 iargs [1] = emit_get_rgctx_field (cfg, context_used,
8275 field, MONO_RGCTX_INFO_CLASS_FIELD);
8277 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8279 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8280 } else if (context_used) {
8281 MonoInst *static_data;
8284 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8285 method->klass->name_space, method->klass->name, method->name,
8286 depth, field->offset);
8289 if (mono_class_needs_cctor_run (klass, method))
8290 emit_generic_class_init (cfg, klass);
8293 * The pointer we're computing here is
8295 * super_info.static_data + field->offset
8297 static_data = emit_get_rgctx_klass (cfg, context_used,
8298 klass, MONO_RGCTX_INFO_STATIC_DATA);
8300 if (field->offset == 0) {
8303 int addr_reg = mono_alloc_preg (cfg);
8304 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8306 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8307 MonoInst *iargs [2];
8309 g_assert (field->parent);
8310 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8311 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8312 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8314 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8316 CHECK_TYPELOAD (klass);
8318 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8319 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8320 if (cfg->verbose_level > 2)
8321 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8322 class_inits = g_slist_prepend (class_inits, vtable);
8324 if (cfg->run_cctors) {
8326 /* This makes so that inline cannot trigger */
8327 /* .cctors: too many apps depend on them */
8328 /* running with a specific order... */
8329 if (! vtable->initialized)
8331 ex = mono_runtime_class_init_full (vtable, FALSE);
8333 set_exception_object (cfg, ex);
8334 goto exception_exit;
8338 addr = (char*)vtable->data + field->offset;
8340 if (cfg->compile_aot)
8341 EMIT_NEW_SFLDACONST (cfg, ins, field);
8343 EMIT_NEW_PCONST (cfg, ins, addr);
8345 MonoInst *iargs [1];
8346 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8347 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8351 /* Generate IR to do the actual load/store operation */
8353 if (*ip == CEE_LDSFLDA) {
8354 ins->klass = mono_class_from_mono_type (field->type);
8355 ins->type = STACK_PTR;
8357 } else if (*ip == CEE_STSFLD) {
8362 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
8363 store->flags |= ins_flag;
8365 gboolean is_const = FALSE;
8366 MonoVTable *vtable = NULL;
8368 if (!context_used) {
8369 vtable = mono_class_vtable (cfg->domain, klass);
8370 CHECK_TYPELOAD (klass);
8372 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8373 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8374 gpointer addr = (char*)vtable->data + field->offset;
8375 int ro_type = field->type->type;
8376 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
8377 ro_type = mono_class_enum_basetype (field->type->data.klass)->type;
8379 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8382 case MONO_TYPE_BOOLEAN:
8384 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8388 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8391 case MONO_TYPE_CHAR:
8393 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8397 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8402 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8406 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8412 case MONO_TYPE_FNPTR:
8413 #ifndef HAVE_MOVING_COLLECTOR
8414 case MONO_TYPE_STRING:
8415 case MONO_TYPE_OBJECT:
8416 case MONO_TYPE_CLASS:
8417 case MONO_TYPE_SZARRAY:
8418 case MONO_TYPE_ARRAY:
8420 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8421 type_to_eval_stack_type ((cfg), field->type, *sp);
8426 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8431 case MONO_TYPE_VALUETYPE:
8441 CHECK_STACK_OVF (1);
8443 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8444 load->flags |= ins_flag;
8457 token = read32 (ip + 1);
8458 klass = mini_get_class (method, token, generic_context);
8459 CHECK_TYPELOAD (klass);
8460 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8461 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8462 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
8463 generic_class_is_reference_type (cfg, klass)) {
8464 MonoInst *dummy_use;
8465 /* insert call to write barrier */
8466 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
8467 mono_emit_method_call (cfg, write_barrier, sp, NULL);
8468 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
8480 const char *data_ptr;
8482 guint32 field_token;
8488 token = read32 (ip + 1);
8490 klass = mini_get_class (method, token, generic_context);
8491 CHECK_TYPELOAD (klass);
8493 if (cfg->generic_sharing_context)
8494 context_used = mono_class_check_context_used (klass);
8496 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
8497 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
8498 ins->sreg1 = sp [0]->dreg;
8499 ins->type = STACK_I4;
8500 ins->dreg = alloc_ireg (cfg);
8501 MONO_ADD_INS (cfg->cbb, ins);
8502 *sp = mono_decompose_opcode (cfg, ins);
8507 MonoClass *array_class = mono_array_class_get (klass, 1);
8508 /* FIXME: we cannot get a managed
8509 allocator because we can't get the
8510 open generic class's vtable. We
8511 have the same problem in
8512 handle_alloc(). This
8513 needs to be solved so that we can
8514 have managed allocs of shared
8517 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
8518 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
8520 MonoMethod *managed_alloc = NULL;
8522 /* FIXME: Decompose later to help abcrem */
8525 args [0] = emit_get_rgctx_klass (cfg, context_used,
8526 array_class, MONO_RGCTX_INFO_VTABLE);
8531 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
8533 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8535 if (cfg->opt & MONO_OPT_SHARED) {
8536 /* Decompose now to avoid problems with references to the domainvar */
8537 MonoInst *iargs [3];
8539 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8540 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8543 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8545 /* Decompose later since it is needed by abcrem */
8546 MonoClass *array_type = mono_array_class_get (klass, 1);
8547 mono_class_vtable (cfg->domain, array_type);
8548 CHECK_TYPELOAD (array_type);
8550 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8551 ins->dreg = alloc_preg (cfg);
8552 ins->sreg1 = sp [0]->dreg;
8553 ins->inst_newa_class = klass;
8554 ins->type = STACK_OBJ;
8556 MONO_ADD_INS (cfg->cbb, ins);
8557 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8558 cfg->cbb->has_array_access = TRUE;
8560 /* Needed so mono_emit_load_get_addr () gets called */
8561 mono_get_got_var (cfg);
8571 * we inline/optimize the initialization sequence if possible.
8572 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8573 * for small sizes open code the memcpy
8574 * ensure the rva field is big enough
8576 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8577 MonoMethod *memcpy_method = get_memcpy_method ();
8578 MonoInst *iargs [3];
8579 int add_reg = alloc_preg (cfg);
8581 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8582 if (cfg->compile_aot) {
8583 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8585 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8587 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8588 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8597 if (sp [0]->type != STACK_OBJ)
8600 dreg = alloc_preg (cfg);
8601 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8602 ins->dreg = alloc_preg (cfg);
8603 ins->sreg1 = sp [0]->dreg;
8604 ins->type = STACK_I4;
8605 /* This flag will be inherited by the decomposition */
8606 ins->flags |= MONO_INST_FAULT;
8607 MONO_ADD_INS (cfg->cbb, ins);
8608 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8609 cfg->cbb->has_array_access = TRUE;
8617 if (sp [0]->type != STACK_OBJ)
8620 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8622 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8623 CHECK_TYPELOAD (klass);
8624 /* we need to make sure that this array is exactly the type it needs
8625 * to be for correctness. the wrappers are lax with their usage
8626 * so we need to ignore them here
8628 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
8629 MonoClass *array_class = mono_array_class_get (klass, 1);
8630 mini_emit_check_array_type (cfg, sp [0], array_class);
8631 CHECK_TYPELOAD (array_class);
8635 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8650 case CEE_LDELEM_REF: {
8656 if (*ip == CEE_LDELEM) {
8658 token = read32 (ip + 1);
8659 klass = mini_get_class (method, token, generic_context);
8660 CHECK_TYPELOAD (klass);
8661 mono_class_init (klass);
8664 klass = array_access_to_klass (*ip);
8666 if (sp [0]->type != STACK_OBJ)
8669 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8671 if (sp [1]->opcode == OP_ICONST) {
8672 int array_reg = sp [0]->dreg;
8673 int index_reg = sp [1]->dreg;
8674 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8676 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8677 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8679 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8680 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8683 if (*ip == CEE_LDELEM)
8696 case CEE_STELEM_REF:
8703 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8705 if (*ip == CEE_STELEM) {
8707 token = read32 (ip + 1);
8708 klass = mini_get_class (method, token, generic_context);
8709 CHECK_TYPELOAD (klass);
8710 mono_class_init (klass);
8713 klass = array_access_to_klass (*ip);
8715 if (sp [0]->type != STACK_OBJ)
8718 /* storing a NULL doesn't need any of the complex checks in stelemref */
8719 if (generic_class_is_reference_type (cfg, klass) &&
8720 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8721 MonoMethod* helper = mono_marshal_get_stelemref ();
8722 MonoInst *iargs [3];
8724 if (sp [0]->type != STACK_OBJ)
8726 if (sp [2]->type != STACK_OBJ)
8733 mono_emit_method_call (cfg, helper, iargs, NULL);
8735 if (sp [1]->opcode == OP_ICONST) {
8736 int array_reg = sp [0]->dreg;
8737 int index_reg = sp [1]->dreg;
8738 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8740 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8741 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8743 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8744 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8748 if (*ip == CEE_STELEM)
8755 case CEE_CKFINITE: {
8759 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8760 ins->sreg1 = sp [0]->dreg;
8761 ins->dreg = alloc_freg (cfg);
8762 ins->type = STACK_R8;
8763 MONO_ADD_INS (bblock, ins);
8765 *sp++ = mono_decompose_opcode (cfg, ins);
8770 case CEE_REFANYVAL: {
8771 MonoInst *src_var, *src;
8773 int klass_reg = alloc_preg (cfg);
8774 int dreg = alloc_preg (cfg);
8777 MONO_INST_NEW (cfg, ins, *ip);
8780 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8781 CHECK_TYPELOAD (klass);
8782 mono_class_init (klass);
8784 if (cfg->generic_sharing_context)
8785 context_used = mono_class_check_context_used (klass);
8788 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8790 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8791 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8792 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8795 MonoInst *klass_ins;
8797 klass_ins = emit_get_rgctx_klass (cfg, context_used,
8798 klass, MONO_RGCTX_INFO_KLASS);
8801 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8802 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8804 mini_emit_class_check (cfg, klass_reg, klass);
8806 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8807 ins->type = STACK_MP;
8812 case CEE_MKREFANY: {
8813 MonoInst *loc, *addr;
8816 MONO_INST_NEW (cfg, ins, *ip);
8819 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8820 CHECK_TYPELOAD (klass);
8821 mono_class_init (klass);
8823 if (cfg->generic_sharing_context)
8824 context_used = mono_class_check_context_used (klass);
8826 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8827 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8830 MonoInst *const_ins;
8831 int type_reg = alloc_preg (cfg);
8833 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
8834 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8835 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8836 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8837 } else if (cfg->compile_aot) {
8838 int const_reg = alloc_preg (cfg);
8839 int type_reg = alloc_preg (cfg);
8841 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8842 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8843 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8844 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8846 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8847 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8849 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8851 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8852 ins->type = STACK_VTYPE;
8853 ins->klass = mono_defaults.typed_reference_class;
8860 MonoClass *handle_class;
8862 CHECK_STACK_OVF (1);
8865 n = read32 (ip + 1);
8867 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
8868 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8869 handle = mono_method_get_wrapper_data (method, n);
8870 handle_class = mono_method_get_wrapper_data (method, n + 1);
8871 if (handle_class == mono_defaults.typehandle_class)
8872 handle = &((MonoClass*)handle)->byval_arg;
8875 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8879 mono_class_init (handle_class);
8880 if (cfg->generic_sharing_context) {
8881 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
8882 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
8883 /* This case handles ldtoken
8884 of an open type, like for
8887 } else if (handle_class == mono_defaults.typehandle_class) {
8888 /* If we get a MONO_TYPE_CLASS
8889 then we need to provide the
8891 instantiation of it. */
8892 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8895 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8896 } else if (handle_class == mono_defaults.fieldhandle_class)
8897 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8898 else if (handle_class == mono_defaults.methodhandle_class)
8899 context_used = mono_method_check_context_used (handle);
8901 g_assert_not_reached ();
8904 if ((cfg->opt & MONO_OPT_SHARED) &&
8905 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
8906 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
8907 MonoInst *addr, *vtvar, *iargs [3];
8908 int method_context_used;
8910 if (cfg->generic_sharing_context)
8911 method_context_used = mono_method_check_context_used (method);
8913 method_context_used = 0;
8915 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8917 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8918 EMIT_NEW_ICONST (cfg, iargs [1], n);
8919 if (method_context_used) {
8920 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
8921 method, MONO_RGCTX_INFO_METHOD);
8922 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
8924 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
8925 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
8927 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8929 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8931 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8933 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
8934 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
8935 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
8936 (cmethod->klass == mono_defaults.monotype_class->parent) &&
8937 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
8938 MonoClass *tclass = mono_class_from_mono_type (handle);
8940 mono_class_init (tclass);
8942 ins = emit_get_rgctx_klass (cfg, context_used,
8943 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
8944 } else if (cfg->compile_aot) {
8945 if (method->wrapper_type) {
8946 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
8947 /* Special case for static synchronized wrappers */
8948 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
8950 /* FIXME: n is not a normal token */
8951 cfg->disable_aot = TRUE;
8952 EMIT_NEW_PCONST (cfg, ins, NULL);
8955 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
8958 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
8960 ins->type = STACK_OBJ;
8961 ins->klass = cmethod->klass;
8964 MonoInst *addr, *vtvar;
8966 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8969 if (handle_class == mono_defaults.typehandle_class) {
8970 ins = emit_get_rgctx_klass (cfg, context_used,
8971 mono_class_from_mono_type (handle),
8972 MONO_RGCTX_INFO_TYPE);
8973 } else if (handle_class == mono_defaults.methodhandle_class) {
8974 ins = emit_get_rgctx_method (cfg, context_used,
8975 handle, MONO_RGCTX_INFO_METHOD);
8976 } else if (handle_class == mono_defaults.fieldhandle_class) {
8977 ins = emit_get_rgctx_field (cfg, context_used,
8978 handle, MONO_RGCTX_INFO_CLASS_FIELD);
8980 g_assert_not_reached ();
8982 } else if (cfg->compile_aot) {
8983 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
8985 EMIT_NEW_PCONST (cfg, ins, handle);
8987 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8988 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8989 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8999 MONO_INST_NEW (cfg, ins, OP_THROW);
9001 ins->sreg1 = sp [0]->dreg;
9003 bblock->out_of_line = TRUE;
9004 MONO_ADD_INS (bblock, ins);
9005 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
9006 MONO_ADD_INS (bblock, ins);
9009 link_bblock (cfg, bblock, end_bblock);
9010 start_new_bblock = 1;
9012 case CEE_ENDFINALLY:
9013 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
9014 MONO_ADD_INS (bblock, ins);
9016 start_new_bblock = 1;
9019 * Control will leave the method so empty the stack, otherwise
9020 * the next basic block will start with a nonempty stack.
9022 while (sp != stack_start) {
9030 if (*ip == CEE_LEAVE) {
9032 target = ip + 5 + (gint32)read32(ip + 1);
9035 target = ip + 2 + (signed char)(ip [1]);
9038 /* empty the stack */
9039 while (sp != stack_start) {
9044 * If this leave statement is in a catch block, check for a
9045 * pending exception, and rethrow it if necessary.
9046 * We avoid doing this in runtime invoke wrappers, since those are called
9047 * by native code which excepts the wrapper to catch all exceptions.
9049 for (i = 0; i < header->num_clauses; ++i) {
9050 MonoExceptionClause *clause = &header->clauses [i];
9053 * Use <= in the final comparison to handle clauses with multiple
9054 * leave statements, like in bug #78024.
9055 * The ordering of the exception clauses guarantees that we find the
9058 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
9060 MonoBasicBlock *dont_throw;
9065 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
9068 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
9070 NEW_BBLOCK (cfg, dont_throw);
9073 * Currently, we allways rethrow the abort exception, despite the
9074 * fact that this is not correct. See thread6.cs for an example.
9075 * But propagating the abort exception is more important than
9076 * getting the sematics right.
9078 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
9079 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
9080 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
9082 MONO_START_BB (cfg, dont_throw);
9087 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
9089 MonoExceptionClause *clause;
9091 for (tmp = handlers; tmp; tmp = tmp->next) {
9093 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
9095 link_bblock (cfg, bblock, tblock);
9096 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
9097 ins->inst_target_bb = tblock;
9098 ins->inst_eh_block = clause;
9099 MONO_ADD_INS (bblock, ins);
9100 bblock->has_call_handler = 1;
9101 if (COMPILE_LLVM (cfg)) {
9102 MonoBasicBlock *target_bb;
9105 * Link the finally bblock with the target, since it will
9106 * conceptually branch there.
9107 * FIXME: Have to link the bblock containing the endfinally.
9109 GET_BBLOCK (cfg, target_bb, target);
9110 link_bblock (cfg, tblock, target_bb);
9113 g_list_free (handlers);
9116 MONO_INST_NEW (cfg, ins, OP_BR);
9117 MONO_ADD_INS (bblock, ins);
9118 GET_BBLOCK (cfg, tblock, target);
9119 link_bblock (cfg, bblock, tblock);
9120 ins->inst_target_bb = tblock;
9121 start_new_bblock = 1;
9123 if (*ip == CEE_LEAVE)
9132 * Mono specific opcodes
9134 case MONO_CUSTOM_PREFIX: {
9136 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
9140 case CEE_MONO_ICALL: {
9142 MonoJitICallInfo *info;
9144 token = read32 (ip + 2);
9145 func = mono_method_get_wrapper_data (method, token);
9146 info = mono_find_jit_icall_by_addr (func);
9149 CHECK_STACK (info->sig->param_count);
9150 sp -= info->sig->param_count;
9152 ins = mono_emit_jit_icall (cfg, info->func, sp);
9153 if (!MONO_TYPE_IS_VOID (info->sig->ret))
9157 inline_costs += 10 * num_calls++;
9161 case CEE_MONO_LDPTR: {
9164 CHECK_STACK_OVF (1);
9166 token = read32 (ip + 2);
9168 ptr = mono_method_get_wrapper_data (method, token);
9169 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
9170 MonoJitICallInfo *callinfo;
9171 const char *icall_name;
9173 icall_name = method->name + strlen ("__icall_wrapper_");
9174 g_assert (icall_name);
9175 callinfo = mono_find_jit_icall_by_name (icall_name);
9176 g_assert (callinfo);
9178 if (ptr == callinfo->func) {
9179 /* Will be transformed into an AOTCONST later */
9180 EMIT_NEW_PCONST (cfg, ins, ptr);
9186 /* FIXME: Generalize this */
9187 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
9188 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
9193 EMIT_NEW_PCONST (cfg, ins, ptr);
9196 inline_costs += 10 * num_calls++;
9197 /* Can't embed random pointers into AOT code */
9198 cfg->disable_aot = 1;
9201 case CEE_MONO_ICALL_ADDR: {
9202 MonoMethod *cmethod;
9205 CHECK_STACK_OVF (1);
9207 token = read32 (ip + 2);
9209 cmethod = mono_method_get_wrapper_data (method, token);
9211 if (cfg->compile_aot) {
9212 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
9214 ptr = mono_lookup_internal_call (cmethod);
9216 EMIT_NEW_PCONST (cfg, ins, ptr);
9222 case CEE_MONO_VTADDR: {
9223 MonoInst *src_var, *src;
9229 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9230 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
9235 case CEE_MONO_NEWOBJ: {
9236 MonoInst *iargs [2];
9238 CHECK_STACK_OVF (1);
9240 token = read32 (ip + 2);
9241 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9242 mono_class_init (klass);
9243 NEW_DOMAINCONST (cfg, iargs [0]);
9244 MONO_ADD_INS (cfg->cbb, iargs [0]);
9245 NEW_CLASSCONST (cfg, iargs [1], klass);
9246 MONO_ADD_INS (cfg->cbb, iargs [1]);
9247 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
9249 inline_costs += 10 * num_calls++;
9252 case CEE_MONO_OBJADDR:
9255 MONO_INST_NEW (cfg, ins, OP_MOVE);
9256 ins->dreg = alloc_preg (cfg);
9257 ins->sreg1 = sp [0]->dreg;
9258 ins->type = STACK_MP;
9259 MONO_ADD_INS (cfg->cbb, ins);
9263 case CEE_MONO_LDNATIVEOBJ:
9265 * Similar to LDOBJ, but instead load the unmanaged
9266 * representation of the vtype to the stack.
9271 token = read32 (ip + 2);
9272 klass = mono_method_get_wrapper_data (method, token);
9273 g_assert (klass->valuetype);
9274 mono_class_init (klass);
9277 MonoInst *src, *dest, *temp;
9280 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
9281 temp->backend.is_pinvoke = 1;
9282 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
9283 mini_emit_stobj (cfg, dest, src, klass, TRUE);
9285 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
9286 dest->type = STACK_VTYPE;
9287 dest->klass = klass;
9293 case CEE_MONO_RETOBJ: {
9295 * Same as RET, but return the native representation of a vtype
9298 g_assert (cfg->ret);
9299 g_assert (mono_method_signature (method)->pinvoke);
9304 token = read32 (ip + 2);
9305 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9307 if (!cfg->vret_addr) {
9308 g_assert (cfg->ret_var_is_local);
9310 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
9312 EMIT_NEW_RETLOADA (cfg, ins);
9314 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
9316 if (sp != stack_start)
9319 MONO_INST_NEW (cfg, ins, OP_BR);
9320 ins->inst_target_bb = end_bblock;
9321 MONO_ADD_INS (bblock, ins);
9322 link_bblock (cfg, bblock, end_bblock);
9323 start_new_bblock = 1;
9327 case CEE_MONO_CISINST:
9328 case CEE_MONO_CCASTCLASS: {
9333 token = read32 (ip + 2);
9334 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9335 if (ip [1] == CEE_MONO_CISINST)
9336 ins = handle_cisinst (cfg, klass, sp [0]);
9338 ins = handle_ccastclass (cfg, klass, sp [0]);
9344 case CEE_MONO_SAVE_LMF:
9345 case CEE_MONO_RESTORE_LMF:
9346 #ifdef MONO_ARCH_HAVE_LMF_OPS
9347 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9348 MONO_ADD_INS (bblock, ins);
9349 cfg->need_lmf_area = TRUE;
9353 case CEE_MONO_CLASSCONST:
9354 CHECK_STACK_OVF (1);
9356 token = read32 (ip + 2);
9357 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9360 inline_costs += 10 * num_calls++;
9362 case CEE_MONO_NOT_TAKEN:
9363 bblock->out_of_line = TRUE;
9367 CHECK_STACK_OVF (1);
9369 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9370 ins->dreg = alloc_preg (cfg);
9371 ins->inst_offset = (gint32)read32 (ip + 2);
9372 ins->type = STACK_PTR;
9373 MONO_ADD_INS (bblock, ins);
9377 case CEE_MONO_DYN_CALL: {
9380 /* It would be easier to call a trampoline, but that would put an
9381 * extra frame on the stack, confusing exception handling. So
9382 * implement it inline using an opcode for now.
9385 if (!cfg->dyn_call_var) {
9386 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9387 /* prevent it from being register allocated */
9388 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
9391 /* Has to use a call inst since it local regalloc expects it */
9392 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
9393 ins = (MonoInst*)call;
9395 ins->sreg1 = sp [0]->dreg;
9396 ins->sreg2 = sp [1]->dreg;
9397 MONO_ADD_INS (bblock, ins);
9399 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
9400 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
9404 inline_costs += 10 * num_calls++;
9409 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9419 /* somewhat similar to LDTOKEN */
9420 MonoInst *addr, *vtvar;
9421 CHECK_STACK_OVF (1);
9422 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9424 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9425 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9427 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9428 ins->type = STACK_VTYPE;
9429 ins->klass = mono_defaults.argumenthandle_class;
9442 * The following transforms:
9443 * CEE_CEQ into OP_CEQ
9444 * CEE_CGT into OP_CGT
9445 * CEE_CGT_UN into OP_CGT_UN
9446 * CEE_CLT into OP_CLT
9447 * CEE_CLT_UN into OP_CLT_UN
9449 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9451 MONO_INST_NEW (cfg, ins, cmp->opcode);
9453 cmp->sreg1 = sp [0]->dreg;
9454 cmp->sreg2 = sp [1]->dreg;
9455 type_from_op (cmp, sp [0], sp [1]);
9457 if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9458 cmp->opcode = OP_LCOMPARE;
9459 else if (sp [0]->type == STACK_R8)
9460 cmp->opcode = OP_FCOMPARE;
9462 cmp->opcode = OP_ICOMPARE;
9463 MONO_ADD_INS (bblock, cmp);
9464 ins->type = STACK_I4;
9465 ins->dreg = alloc_dreg (cfg, ins->type);
9466 type_from_op (ins, sp [0], sp [1]);
9468 if (cmp->opcode == OP_FCOMPARE) {
9470 * The backends expect the fceq opcodes to do the
9473 cmp->opcode = OP_NOP;
9474 ins->sreg1 = cmp->sreg1;
9475 ins->sreg2 = cmp->sreg2;
9477 MONO_ADD_INS (bblock, ins);
9484 MonoMethod *cil_method;
9485 gboolean needs_static_rgctx_invoke;
9487 CHECK_STACK_OVF (1);
9489 n = read32 (ip + 2);
9490 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9493 mono_class_init (cmethod->klass);
9495 mono_save_token_info (cfg, image, n, cmethod);
9497 if (cfg->generic_sharing_context)
9498 context_used = mono_method_check_context_used (cmethod);
9500 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9502 cil_method = cmethod;
9503 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9504 METHOD_ACCESS_FAILURE;
9506 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9507 if (check_linkdemand (cfg, method, cmethod))
9509 CHECK_CFG_EXCEPTION;
9510 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9511 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9515 * Optimize the common case of ldftn+delegate creation
9517 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9518 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9519 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9521 int invoke_context_used = 0;
9523 invoke = mono_get_delegate_invoke (ctor_method->klass);
9524 if (!invoke || !mono_method_signature (invoke))
9527 if (cfg->generic_sharing_context)
9528 invoke_context_used = mono_method_check_context_used (invoke);
9530 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
9531 /* FIXME: SGEN support */
9532 if (!cfg->gen_write_barriers && invoke_context_used == 0) {
9533 MonoInst *target_ins;
9536 if (cfg->verbose_level > 3)
9537 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9538 target_ins = sp [-1];
9540 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
9541 CHECK_CFG_EXCEPTION;
9550 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
9551 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
9555 inline_costs += 10 * num_calls++;
9558 case CEE_LDVIRTFTN: {
9563 n = read32 (ip + 2);
9564 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9567 mono_class_init (cmethod->klass);
9569 if (cfg->generic_sharing_context)
9570 context_used = mono_method_check_context_used (cmethod);
9572 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9573 if (check_linkdemand (cfg, method, cmethod))
9575 CHECK_CFG_EXCEPTION;
9576 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9577 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9583 args [1] = emit_get_rgctx_method (cfg, context_used,
9584 cmethod, MONO_RGCTX_INFO_METHOD);
9587 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
9589 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
9592 inline_costs += 10 * num_calls++;
9596 CHECK_STACK_OVF (1);
9598 n = read16 (ip + 2);
9600 EMIT_NEW_ARGLOAD (cfg, ins, n);
9605 CHECK_STACK_OVF (1);
9607 n = read16 (ip + 2);
9609 NEW_ARGLOADA (cfg, ins, n);
9610 MONO_ADD_INS (cfg->cbb, ins);
9618 n = read16 (ip + 2);
9620 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9622 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9626 CHECK_STACK_OVF (1);
9628 n = read16 (ip + 2);
9630 EMIT_NEW_LOCLOAD (cfg, ins, n);
9635 unsigned char *tmp_ip;
9636 CHECK_STACK_OVF (1);
9638 n = read16 (ip + 2);
9641 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9647 EMIT_NEW_LOCLOADA (cfg, ins, n);
9656 n = read16 (ip + 2);
9658 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9660 emit_stloc_ir (cfg, sp, header, n);
9667 if (sp != stack_start)
9669 if (cfg->method != method)
9671 * Inlining this into a loop in a parent could lead to
9672 * stack overflows which is different behavior than the
9673 * non-inlined case, thus disable inlining in this case.
9675 goto inline_failure;
9677 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9678 ins->dreg = alloc_preg (cfg);
9679 ins->sreg1 = sp [0]->dreg;
9680 ins->type = STACK_PTR;
9681 MONO_ADD_INS (cfg->cbb, ins);
9683 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9685 ins->flags |= MONO_INST_INIT;
9690 case CEE_ENDFILTER: {
9691 MonoExceptionClause *clause, *nearest;
9692 int cc, nearest_num;
9696 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9698 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9699 ins->sreg1 = (*sp)->dreg;
9700 MONO_ADD_INS (bblock, ins);
9701 start_new_bblock = 1;
9706 for (cc = 0; cc < header->num_clauses; ++cc) {
9707 clause = &header->clauses [cc];
9708 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9709 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9710 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9716 if ((ip - header->code) != nearest->handler_offset)
9721 case CEE_UNALIGNED_:
9722 ins_flag |= MONO_INST_UNALIGNED;
9723 /* FIXME: record alignment? we can assume 1 for now */
9728 ins_flag |= MONO_INST_VOLATILE;
9732 ins_flag |= MONO_INST_TAILCALL;
9733 cfg->flags |= MONO_CFG_HAS_TAIL;
9734 /* Can't inline tail calls at this time */
9735 inline_costs += 100000;
9742 token = read32 (ip + 2);
9743 klass = mini_get_class (method, token, generic_context);
9744 CHECK_TYPELOAD (klass);
9745 if (generic_class_is_reference_type (cfg, klass))
9746 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9748 mini_emit_initobj (cfg, *sp, NULL, klass);
9752 case CEE_CONSTRAINED_:
9754 token = read32 (ip + 2);
9755 if (method->wrapper_type != MONO_WRAPPER_NONE)
9756 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
9758 constrained_call = mono_class_get_full (image, token, generic_context);
9759 CHECK_TYPELOAD (constrained_call);
9764 MonoInst *iargs [3];
9768 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9769 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9770 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9771 /* emit_memset only works when val == 0 */
9772 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9777 if (ip [1] == CEE_CPBLK) {
9778 MonoMethod *memcpy_method = get_memcpy_method ();
9779 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9781 MonoMethod *memset_method = get_memset_method ();
9782 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9792 ins_flag |= MONO_INST_NOTYPECHECK;
9794 ins_flag |= MONO_INST_NORANGECHECK;
9795 /* we ignore the no-nullcheck for now since we
9796 * really do it explicitly only when doing callvirt->call
9802 int handler_offset = -1;
9804 for (i = 0; i < header->num_clauses; ++i) {
9805 MonoExceptionClause *clause = &header->clauses [i];
9806 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9807 handler_offset = clause->handler_offset;
9812 bblock->flags |= BB_EXCEPTION_UNSAFE;
9814 g_assert (handler_offset != -1);
9816 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9817 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9818 ins->sreg1 = load->dreg;
9819 MONO_ADD_INS (bblock, ins);
9821 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
9822 MONO_ADD_INS (bblock, ins);
9825 link_bblock (cfg, bblock, end_bblock);
9826 start_new_bblock = 1;
9834 CHECK_STACK_OVF (1);
9836 token = read32 (ip + 2);
9837 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic) {
9838 MonoType *type = mono_type_create_from_typespec (image, token);
9839 token = mono_type_size (type, &ialign);
9841 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9842 CHECK_TYPELOAD (klass);
9843 mono_class_init (klass);
9844 token = mono_class_value_size (klass, &align);
9846 EMIT_NEW_ICONST (cfg, ins, token);
9851 case CEE_REFANYTYPE: {
9852 MonoInst *src_var, *src;
9858 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9860 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9861 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9862 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9880 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
9890 g_warning ("opcode 0x%02x not handled", *ip);
9894 if (start_new_bblock != 1)
9897 bblock->cil_length = ip - bblock->cil_code;
9898 bblock->next_bb = end_bblock;
9900 if (cfg->method == method && cfg->domainvar) {
9902 MonoInst *get_domain;
9904 cfg->cbb = init_localsbb;
9906 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9907 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9910 get_domain->dreg = alloc_preg (cfg);
9911 MONO_ADD_INS (cfg->cbb, get_domain);
9913 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9914 MONO_ADD_INS (cfg->cbb, store);
9917 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
9918 if (cfg->compile_aot)
9919 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
9920 mono_get_got_var (cfg);
9923 if (cfg->method == method && cfg->got_var)
9924 mono_emit_load_got_addr (cfg);
9929 cfg->cbb = init_localsbb;
9931 for (i = 0; i < header->num_locals; ++i) {
9932 MonoType *ptype = header->locals [i];
9933 int t = ptype->type;
9934 dreg = cfg->locals [i]->dreg;
9936 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
9937 t = mono_class_enum_basetype (ptype->data.klass)->type;
9939 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9940 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
9941 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
9942 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
9943 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
9944 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
9945 MONO_INST_NEW (cfg, ins, OP_R8CONST);
9946 ins->type = STACK_R8;
9947 ins->inst_p0 = (void*)&r8_0;
9948 ins->dreg = alloc_dreg (cfg, STACK_R8);
9949 MONO_ADD_INS (init_localsbb, ins);
9950 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
9951 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
9952 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
9953 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
9955 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9960 if (cfg->init_ref_vars && cfg->method == method) {
9961 /* Emit initialization for ref vars */
9962 // FIXME: Avoid duplication initialization for IL locals.
9963 for (i = 0; i < cfg->num_varinfo; ++i) {
9964 MonoInst *ins = cfg->varinfo [i];
9966 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
9967 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
9971 /* Add a sequence point for method entry/exit events */
9973 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
9974 MONO_ADD_INS (init_localsbb, ins);
9975 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
9976 MONO_ADD_INS (cfg->bb_exit, ins);
9981 if (cfg->method == method) {
9983 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9984 bb->region = mono_find_block_region (cfg, bb->real_offset);
9986 mono_create_spvar_for_region (cfg, bb->region);
9987 if (cfg->verbose_level > 2)
9988 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
9992 g_slist_free (class_inits);
9993 dont_inline = g_list_remove (dont_inline, method);
9995 if (inline_costs < 0) {
9998 /* Method is too large */
9999 mname = mono_method_full_name (method, TRUE);
10000 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
10001 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
10003 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10004 mono_basic_block_free (original_bb);
10008 if ((cfg->verbose_level > 2) && (cfg->method == method))
10009 mono_print_code (cfg, "AFTER METHOD-TO-IR");
10011 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10012 mono_basic_block_free (original_bb);
10013 return inline_costs;
10016 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
10023 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
10027 set_exception_type_from_invalid_il (cfg, method, ip);
10031 g_slist_free (class_inits);
10032 mono_basic_block_free (original_bb);
10033 dont_inline = g_list_remove (dont_inline, method);
10034 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10039 store_membase_reg_to_store_membase_imm (int opcode)
10042 case OP_STORE_MEMBASE_REG:
10043 return OP_STORE_MEMBASE_IMM;
10044 case OP_STOREI1_MEMBASE_REG:
10045 return OP_STOREI1_MEMBASE_IMM;
10046 case OP_STOREI2_MEMBASE_REG:
10047 return OP_STOREI2_MEMBASE_IMM;
10048 case OP_STOREI4_MEMBASE_REG:
10049 return OP_STOREI4_MEMBASE_IMM;
10050 case OP_STOREI8_MEMBASE_REG:
10051 return OP_STOREI8_MEMBASE_IMM;
10053 g_assert_not_reached ();
10059 #endif /* DISABLE_JIT */
10062 mono_op_to_op_imm (int opcode)
10066 return OP_IADD_IMM;
10068 return OP_ISUB_IMM;
10070 return OP_IDIV_IMM;
10072 return OP_IDIV_UN_IMM;
10074 return OP_IREM_IMM;
10076 return OP_IREM_UN_IMM;
10078 return OP_IMUL_IMM;
10080 return OP_IAND_IMM;
10084 return OP_IXOR_IMM;
10086 return OP_ISHL_IMM;
10088 return OP_ISHR_IMM;
10090 return OP_ISHR_UN_IMM;
10093 return OP_LADD_IMM;
10095 return OP_LSUB_IMM;
10097 return OP_LAND_IMM;
10101 return OP_LXOR_IMM;
10103 return OP_LSHL_IMM;
10105 return OP_LSHR_IMM;
10107 return OP_LSHR_UN_IMM;
10110 return OP_COMPARE_IMM;
10112 return OP_ICOMPARE_IMM;
10114 return OP_LCOMPARE_IMM;
10116 case OP_STORE_MEMBASE_REG:
10117 return OP_STORE_MEMBASE_IMM;
10118 case OP_STOREI1_MEMBASE_REG:
10119 return OP_STOREI1_MEMBASE_IMM;
10120 case OP_STOREI2_MEMBASE_REG:
10121 return OP_STOREI2_MEMBASE_IMM;
10122 case OP_STOREI4_MEMBASE_REG:
10123 return OP_STOREI4_MEMBASE_IMM;
10125 #if defined(TARGET_X86) || defined (TARGET_AMD64)
10127 return OP_X86_PUSH_IMM;
10128 case OP_X86_COMPARE_MEMBASE_REG:
10129 return OP_X86_COMPARE_MEMBASE_IMM;
10131 #if defined(TARGET_AMD64)
10132 case OP_AMD64_ICOMPARE_MEMBASE_REG:
10133 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10135 case OP_VOIDCALL_REG:
10136 return OP_VOIDCALL;
10144 return OP_LOCALLOC_IMM;
10151 ldind_to_load_membase (int opcode)
10155 return OP_LOADI1_MEMBASE;
10157 return OP_LOADU1_MEMBASE;
10159 return OP_LOADI2_MEMBASE;
10161 return OP_LOADU2_MEMBASE;
10163 return OP_LOADI4_MEMBASE;
10165 return OP_LOADU4_MEMBASE;
10167 return OP_LOAD_MEMBASE;
10168 case CEE_LDIND_REF:
10169 return OP_LOAD_MEMBASE;
10171 return OP_LOADI8_MEMBASE;
10173 return OP_LOADR4_MEMBASE;
10175 return OP_LOADR8_MEMBASE;
10177 g_assert_not_reached ();
10184 stind_to_store_membase (int opcode)
10188 return OP_STOREI1_MEMBASE_REG;
10190 return OP_STOREI2_MEMBASE_REG;
10192 return OP_STOREI4_MEMBASE_REG;
10194 case CEE_STIND_REF:
10195 return OP_STORE_MEMBASE_REG;
10197 return OP_STOREI8_MEMBASE_REG;
10199 return OP_STORER4_MEMBASE_REG;
10201 return OP_STORER8_MEMBASE_REG;
10203 g_assert_not_reached ();
10210 mono_load_membase_to_load_mem (int opcode)
10212 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
10213 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10215 case OP_LOAD_MEMBASE:
10216 return OP_LOAD_MEM;
10217 case OP_LOADU1_MEMBASE:
10218 return OP_LOADU1_MEM;
10219 case OP_LOADU2_MEMBASE:
10220 return OP_LOADU2_MEM;
10221 case OP_LOADI4_MEMBASE:
10222 return OP_LOADI4_MEM;
10223 case OP_LOADU4_MEMBASE:
10224 return OP_LOADU4_MEM;
10225 #if SIZEOF_REGISTER == 8
10226 case OP_LOADI8_MEMBASE:
10227 return OP_LOADI8_MEM;
10236 op_to_op_dest_membase (int store_opcode, int opcode)
10238 #if defined(TARGET_X86)
10239 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
10244 return OP_X86_ADD_MEMBASE_REG;
10246 return OP_X86_SUB_MEMBASE_REG;
10248 return OP_X86_AND_MEMBASE_REG;
10250 return OP_X86_OR_MEMBASE_REG;
10252 return OP_X86_XOR_MEMBASE_REG;
10255 return OP_X86_ADD_MEMBASE_IMM;
10258 return OP_X86_SUB_MEMBASE_IMM;
10261 return OP_X86_AND_MEMBASE_IMM;
10264 return OP_X86_OR_MEMBASE_IMM;
10267 return OP_X86_XOR_MEMBASE_IMM;
10273 #if defined(TARGET_AMD64)
10274 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
10279 return OP_X86_ADD_MEMBASE_REG;
10281 return OP_X86_SUB_MEMBASE_REG;
10283 return OP_X86_AND_MEMBASE_REG;
10285 return OP_X86_OR_MEMBASE_REG;
10287 return OP_X86_XOR_MEMBASE_REG;
10289 return OP_X86_ADD_MEMBASE_IMM;
10291 return OP_X86_SUB_MEMBASE_IMM;
10293 return OP_X86_AND_MEMBASE_IMM;
10295 return OP_X86_OR_MEMBASE_IMM;
10297 return OP_X86_XOR_MEMBASE_IMM;
10299 return OP_AMD64_ADD_MEMBASE_REG;
10301 return OP_AMD64_SUB_MEMBASE_REG;
10303 return OP_AMD64_AND_MEMBASE_REG;
10305 return OP_AMD64_OR_MEMBASE_REG;
10307 return OP_AMD64_XOR_MEMBASE_REG;
10310 return OP_AMD64_ADD_MEMBASE_IMM;
10313 return OP_AMD64_SUB_MEMBASE_IMM;
10316 return OP_AMD64_AND_MEMBASE_IMM;
10319 return OP_AMD64_OR_MEMBASE_IMM;
10322 return OP_AMD64_XOR_MEMBASE_IMM;
10332 op_to_op_store_membase (int store_opcode, int opcode)
10334 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10337 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10338 return OP_X86_SETEQ_MEMBASE;
10340 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10341 return OP_X86_SETNE_MEMBASE;
10349 op_to_op_src1_membase (int load_opcode, int opcode)
10352 /* FIXME: This has sign extension issues */
10354 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10355 return OP_X86_COMPARE_MEMBASE8_IMM;
10358 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10363 return OP_X86_PUSH_MEMBASE;
10364 case OP_COMPARE_IMM:
10365 case OP_ICOMPARE_IMM:
10366 return OP_X86_COMPARE_MEMBASE_IMM;
10369 return OP_X86_COMPARE_MEMBASE_REG;
10373 #ifdef TARGET_AMD64
10374 /* FIXME: This has sign extension issues */
10376 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10377 return OP_X86_COMPARE_MEMBASE8_IMM;
10382 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10383 return OP_X86_PUSH_MEMBASE;
10385 /* FIXME: This only works for 32 bit immediates
10386 case OP_COMPARE_IMM:
10387 case OP_LCOMPARE_IMM:
10388 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10389 return OP_AMD64_COMPARE_MEMBASE_IMM;
10391 case OP_ICOMPARE_IMM:
10392 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10393 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10397 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10398 return OP_AMD64_COMPARE_MEMBASE_REG;
10401 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10402 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10411 op_to_op_src2_membase (int load_opcode, int opcode)
10414 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10420 return OP_X86_COMPARE_REG_MEMBASE;
10422 return OP_X86_ADD_REG_MEMBASE;
10424 return OP_X86_SUB_REG_MEMBASE;
10426 return OP_X86_AND_REG_MEMBASE;
10428 return OP_X86_OR_REG_MEMBASE;
10430 return OP_X86_XOR_REG_MEMBASE;
10434 #ifdef TARGET_AMD64
10437 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10438 return OP_AMD64_ICOMPARE_REG_MEMBASE;
10442 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10443 return OP_AMD64_COMPARE_REG_MEMBASE;
10446 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10447 return OP_X86_ADD_REG_MEMBASE;
10449 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10450 return OP_X86_SUB_REG_MEMBASE;
10452 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10453 return OP_X86_AND_REG_MEMBASE;
10455 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10456 return OP_X86_OR_REG_MEMBASE;
10458 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10459 return OP_X86_XOR_REG_MEMBASE;
10461 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10462 return OP_AMD64_ADD_REG_MEMBASE;
10464 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10465 return OP_AMD64_SUB_REG_MEMBASE;
10467 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10468 return OP_AMD64_AND_REG_MEMBASE;
10470 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10471 return OP_AMD64_OR_REG_MEMBASE;
10473 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10474 return OP_AMD64_XOR_REG_MEMBASE;
10482 mono_op_to_op_imm_noemul (int opcode)
10485 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10491 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10499 return mono_op_to_op_imm (opcode);
10503 #ifndef DISABLE_JIT
10506 * mono_handle_global_vregs:
10508 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10512 mono_handle_global_vregs (MonoCompile *cfg)
10514 gint32 *vreg_to_bb;
10515 MonoBasicBlock *bb;
10518 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
10520 #ifdef MONO_ARCH_SIMD_INTRINSICS
10521 if (cfg->uses_simd_intrinsics)
10522 mono_simd_simplify_indirection (cfg);
10525 /* Find local vregs used in more than one bb */
10526 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10527 MonoInst *ins = bb->code;
10528 int block_num = bb->block_num;
10530 if (cfg->verbose_level > 2)
10531 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
10534 for (; ins; ins = ins->next) {
10535 const char *spec = INS_INFO (ins->opcode);
10536 int regtype = 0, regindex;
10539 if (G_UNLIKELY (cfg->verbose_level > 2))
10540 mono_print_ins (ins);
10542 g_assert (ins->opcode >= MONO_CEE_LAST);
10544 for (regindex = 0; regindex < 4; regindex ++) {
10547 if (regindex == 0) {
10548 regtype = spec [MONO_INST_DEST];
10549 if (regtype == ' ')
10552 } else if (regindex == 1) {
10553 regtype = spec [MONO_INST_SRC1];
10554 if (regtype == ' ')
10557 } else if (regindex == 2) {
10558 regtype = spec [MONO_INST_SRC2];
10559 if (regtype == ' ')
10562 } else if (regindex == 3) {
10563 regtype = spec [MONO_INST_SRC3];
10564 if (regtype == ' ')
10569 #if SIZEOF_REGISTER == 4
10570 /* In the LLVM case, the long opcodes are not decomposed */
10571 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
10573 * Since some instructions reference the original long vreg,
10574 * and some reference the two component vregs, it is quite hard
10575 * to determine when it needs to be global. So be conservative.
10577 if (!get_vreg_to_inst (cfg, vreg)) {
10578 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10580 if (cfg->verbose_level > 2)
10581 printf ("LONG VREG R%d made global.\n", vreg);
10585 * Make the component vregs volatile since the optimizations can
10586 * get confused otherwise.
10588 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
10589 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
10593 g_assert (vreg != -1);
10595 prev_bb = vreg_to_bb [vreg];
10596 if (prev_bb == 0) {
10597 /* 0 is a valid block num */
10598 vreg_to_bb [vreg] = block_num + 1;
10599 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
10600 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
10603 if (!get_vreg_to_inst (cfg, vreg)) {
10604 if (G_UNLIKELY (cfg->verbose_level > 2))
10605 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
10609 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
10612 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10615 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
10618 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
10621 g_assert_not_reached ();
10625 /* Flag as having been used in more than one bb */
10626 vreg_to_bb [vreg] = -1;
10632 /* If a variable is used in only one bblock, convert it into a local vreg */
10633 for (i = 0; i < cfg->num_varinfo; i++) {
10634 MonoInst *var = cfg->varinfo [i];
10635 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
10637 switch (var->type) {
10643 #if SIZEOF_REGISTER == 8
10646 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
10647 /* Enabling this screws up the fp stack on x86 */
10650 /* Arguments are implicitly global */
10651 /* Putting R4 vars into registers doesn't work currently */
10652 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
10654 * Make that the variable's liveness interval doesn't contain a call, since
10655 * that would cause the lvreg to be spilled, making the whole optimization
10658 /* This is too slow for JIT compilation */
10660 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
10662 int def_index, call_index, ins_index;
10663 gboolean spilled = FALSE;
10668 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
10669 const char *spec = INS_INFO (ins->opcode);
10671 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
10672 def_index = ins_index;
10674 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
10675 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
10676 if (call_index > def_index) {
10682 if (MONO_IS_CALL (ins))
10683 call_index = ins_index;
10693 if (G_UNLIKELY (cfg->verbose_level > 2))
10694 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10695 var->flags |= MONO_INST_IS_DEAD;
10696 cfg->vreg_to_inst [var->dreg] = NULL;
10703 * Compress the varinfo and vars tables so the liveness computation is faster and
10704 * takes up less space.
10707 for (i = 0; i < cfg->num_varinfo; ++i) {
10708 MonoInst *var = cfg->varinfo [i];
10709 if (pos < i && cfg->locals_start == i)
10710 cfg->locals_start = pos;
10711 if (!(var->flags & MONO_INST_IS_DEAD)) {
10713 cfg->varinfo [pos] = cfg->varinfo [i];
10714 cfg->varinfo [pos]->inst_c0 = pos;
10715 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10716 cfg->vars [pos].idx = pos;
10717 #if SIZEOF_REGISTER == 4
10718 if (cfg->varinfo [pos]->type == STACK_I8) {
10719 /* Modify the two component vars too */
10722 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10723 var1->inst_c0 = pos;
10724 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10725 var1->inst_c0 = pos;
10732 cfg->num_varinfo = pos;
10733 if (cfg->locals_start > cfg->num_varinfo)
10734 cfg->locals_start = cfg->num_varinfo;
10738 * mono_spill_global_vars:
10740 * Generate spill code for variables which are not allocated to registers,
10741 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10742 * code is generated which could be optimized by the local optimization passes.
10745 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10747 MonoBasicBlock *bb;
10749 int orig_next_vreg;
10750 guint32 *vreg_to_lvreg;
10752 guint32 i, lvregs_len;
10753 gboolean dest_has_lvreg = FALSE;
10754 guint32 stacktypes [128];
10755 MonoInst **live_range_start, **live_range_end;
10756 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
10758 *need_local_opts = FALSE;
10760 memset (spec2, 0, sizeof (spec2));
10762 /* FIXME: Move this function to mini.c */
10763 stacktypes ['i'] = STACK_PTR;
10764 stacktypes ['l'] = STACK_I8;
10765 stacktypes ['f'] = STACK_R8;
10766 #ifdef MONO_ARCH_SIMD_INTRINSICS
10767 stacktypes ['x'] = STACK_VTYPE;
10770 #if SIZEOF_REGISTER == 4
10771 /* Create MonoInsts for longs */
10772 for (i = 0; i < cfg->num_varinfo; i++) {
10773 MonoInst *ins = cfg->varinfo [i];
10775 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10776 switch (ins->type) {
10781 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
10784 g_assert (ins->opcode == OP_REGOFFSET);
10786 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10788 tree->opcode = OP_REGOFFSET;
10789 tree->inst_basereg = ins->inst_basereg;
10790 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10792 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10794 tree->opcode = OP_REGOFFSET;
10795 tree->inst_basereg = ins->inst_basereg;
10796 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10806 /* FIXME: widening and truncation */
10809 * As an optimization, when a variable allocated to the stack is first loaded into
10810 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10811 * the variable again.
10813 orig_next_vreg = cfg->next_vreg;
10814 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10815 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10819 * These arrays contain the first and last instructions accessing a given
10821 * Since we emit bblocks in the same order we process them here, and we
10822 * don't split live ranges, these will precisely describe the live range of
10823 * the variable, i.e. the instruction range where a valid value can be found
10824 * in the variables location.
10825 * The live range is computed using the liveness info computed by the liveness pass.
10826 * We can't use vmv->range, since that is an abstract live range, and we need
10827 * one which is instruction precise.
10828 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
10830 /* FIXME: Only do this if debugging info is requested */
10831 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
10832 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
10833 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10834 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10836 /* Add spill loads/stores */
10837 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10840 if (cfg->verbose_level > 2)
10841 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10843 /* Clear vreg_to_lvreg array */
10844 for (i = 0; i < lvregs_len; i++)
10845 vreg_to_lvreg [lvregs [i]] = 0;
10849 MONO_BB_FOR_EACH_INS (bb, ins) {
10850 const char *spec = INS_INFO (ins->opcode);
10851 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
10852 gboolean store, no_lvreg;
10853 int sregs [MONO_MAX_SRC_REGS];
10855 if (G_UNLIKELY (cfg->verbose_level > 2))
10856 mono_print_ins (ins);
10858 if (ins->opcode == OP_NOP)
10862 * We handle LDADDR here as well, since it can only be decomposed
10863 * when variable addresses are known.
10865 if (ins->opcode == OP_LDADDR) {
10866 MonoInst *var = ins->inst_p0;
10868 if (var->opcode == OP_VTARG_ADDR) {
10869 /* Happens on SPARC/S390 where vtypes are passed by reference */
10870 MonoInst *vtaddr = var->inst_left;
10871 if (vtaddr->opcode == OP_REGVAR) {
10872 ins->opcode = OP_MOVE;
10873 ins->sreg1 = vtaddr->dreg;
10875 else if (var->inst_left->opcode == OP_REGOFFSET) {
10876 ins->opcode = OP_LOAD_MEMBASE;
10877 ins->inst_basereg = vtaddr->inst_basereg;
10878 ins->inst_offset = vtaddr->inst_offset;
10882 g_assert (var->opcode == OP_REGOFFSET);
10884 ins->opcode = OP_ADD_IMM;
10885 ins->sreg1 = var->inst_basereg;
10886 ins->inst_imm = var->inst_offset;
10889 *need_local_opts = TRUE;
10890 spec = INS_INFO (ins->opcode);
10893 if (ins->opcode < MONO_CEE_LAST) {
10894 mono_print_ins (ins);
10895 g_assert_not_reached ();
10899 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10903 if (MONO_IS_STORE_MEMBASE (ins)) {
10904 tmp_reg = ins->dreg;
10905 ins->dreg = ins->sreg2;
10906 ins->sreg2 = tmp_reg;
10909 spec2 [MONO_INST_DEST] = ' ';
10910 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10911 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10912 spec2 [MONO_INST_SRC3] = ' ';
10914 } else if (MONO_IS_STORE_MEMINDEX (ins))
10915 g_assert_not_reached ();
10920 if (G_UNLIKELY (cfg->verbose_level > 2)) {
10921 printf ("\t %.3s %d", spec, ins->dreg);
10922 num_sregs = mono_inst_get_src_registers (ins, sregs);
10923 for (srcindex = 0; srcindex < 3; ++srcindex)
10924 printf (" %d", sregs [srcindex]);
10931 regtype = spec [MONO_INST_DEST];
10932 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
10935 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
10936 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
10937 MonoInst *store_ins;
10939 MonoInst *def_ins = ins;
10940 int dreg = ins->dreg; /* The original vreg */
10942 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
10944 if (var->opcode == OP_REGVAR) {
10945 ins->dreg = var->dreg;
10946 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
10948 * Instead of emitting a load+store, use a _membase opcode.
10950 g_assert (var->opcode == OP_REGOFFSET);
10951 if (ins->opcode == OP_MOVE) {
10955 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
10956 ins->inst_basereg = var->inst_basereg;
10957 ins->inst_offset = var->inst_offset;
10960 spec = INS_INFO (ins->opcode);
10964 g_assert (var->opcode == OP_REGOFFSET);
10966 prev_dreg = ins->dreg;
10968 /* Invalidate any previous lvreg for this vreg */
10969 vreg_to_lvreg [ins->dreg] = 0;
10973 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
10975 store_opcode = OP_STOREI8_MEMBASE_REG;
10978 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
10980 if (regtype == 'l') {
10981 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
10982 mono_bblock_insert_after_ins (bb, ins, store_ins);
10983 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
10984 mono_bblock_insert_after_ins (bb, ins, store_ins);
10985 def_ins = store_ins;
10988 g_assert (store_opcode != OP_STOREV_MEMBASE);
10990 /* Try to fuse the store into the instruction itself */
10991 /* FIXME: Add more instructions */
10992 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
10993 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
10994 ins->inst_imm = ins->inst_c0;
10995 ins->inst_destbasereg = var->inst_basereg;
10996 ins->inst_offset = var->inst_offset;
10997 spec = INS_INFO (ins->opcode);
10998 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
10999 ins->opcode = store_opcode;
11000 ins->inst_destbasereg = var->inst_basereg;
11001 ins->inst_offset = var->inst_offset;
11005 tmp_reg = ins->dreg;
11006 ins->dreg = ins->sreg2;
11007 ins->sreg2 = tmp_reg;
11010 spec2 [MONO_INST_DEST] = ' ';
11011 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11012 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11013 spec2 [MONO_INST_SRC3] = ' ';
11015 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
11016 // FIXME: The backends expect the base reg to be in inst_basereg
11017 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
11019 ins->inst_basereg = var->inst_basereg;
11020 ins->inst_offset = var->inst_offset;
11021 spec = INS_INFO (ins->opcode);
11023 /* printf ("INS: "); mono_print_ins (ins); */
11024 /* Create a store instruction */
11025 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
11027 /* Insert it after the instruction */
11028 mono_bblock_insert_after_ins (bb, ins, store_ins);
11030 def_ins = store_ins;
11033 * We can't assign ins->dreg to var->dreg here, since the
11034 * sregs could use it. So set a flag, and do it after
11037 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
11038 dest_has_lvreg = TRUE;
11043 if (def_ins && !live_range_start [dreg]) {
11044 live_range_start [dreg] = def_ins;
11045 live_range_start_bb [dreg] = bb;
11052 num_sregs = mono_inst_get_src_registers (ins, sregs);
11053 for (srcindex = 0; srcindex < 3; ++srcindex) {
11054 regtype = spec [MONO_INST_SRC1 + srcindex];
11055 sreg = sregs [srcindex];
11057 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
11058 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
11059 MonoInst *var = get_vreg_to_inst (cfg, sreg);
11060 MonoInst *use_ins = ins;
11061 MonoInst *load_ins;
11062 guint32 load_opcode;
11064 if (var->opcode == OP_REGVAR) {
11065 sregs [srcindex] = var->dreg;
11066 //mono_inst_set_src_registers (ins, sregs);
11067 live_range_end [sreg] = use_ins;
11068 live_range_end_bb [sreg] = bb;
11072 g_assert (var->opcode == OP_REGOFFSET);
11074 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
11076 g_assert (load_opcode != OP_LOADV_MEMBASE);
11078 if (vreg_to_lvreg [sreg]) {
11079 g_assert (vreg_to_lvreg [sreg] != -1);
11081 /* The variable is already loaded to an lvreg */
11082 if (G_UNLIKELY (cfg->verbose_level > 2))
11083 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
11084 sregs [srcindex] = vreg_to_lvreg [sreg];
11085 //mono_inst_set_src_registers (ins, sregs);
11089 /* Try to fuse the load into the instruction */
11090 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
11091 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
11092 sregs [0] = var->inst_basereg;
11093 //mono_inst_set_src_registers (ins, sregs);
11094 ins->inst_offset = var->inst_offset;
11095 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
11096 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
11097 sregs [1] = var->inst_basereg;
11098 //mono_inst_set_src_registers (ins, sregs);
11099 ins->inst_offset = var->inst_offset;
11101 if (MONO_IS_REAL_MOVE (ins)) {
11102 ins->opcode = OP_NOP;
11105 //printf ("%d ", srcindex); mono_print_ins (ins);
11107 sreg = alloc_dreg (cfg, stacktypes [regtype]);
11109 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
11110 if (var->dreg == prev_dreg) {
11112 * sreg refers to the value loaded by the load
11113 * emitted below, but we need to use ins->dreg
11114 * since it refers to the store emitted earlier.
11118 g_assert (sreg != -1);
11119 vreg_to_lvreg [var->dreg] = sreg;
11120 g_assert (lvregs_len < 1024);
11121 lvregs [lvregs_len ++] = var->dreg;
11125 sregs [srcindex] = sreg;
11126 //mono_inst_set_src_registers (ins, sregs);
11128 if (regtype == 'l') {
11129 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
11130 mono_bblock_insert_before_ins (bb, ins, load_ins);
11131 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
11132 mono_bblock_insert_before_ins (bb, ins, load_ins);
11133 use_ins = load_ins;
11136 #if SIZEOF_REGISTER == 4
11137 g_assert (load_opcode != OP_LOADI8_MEMBASE);
11139 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
11140 mono_bblock_insert_before_ins (bb, ins, load_ins);
11141 use_ins = load_ins;
11145 if (var->dreg < orig_next_vreg) {
11146 live_range_end [var->dreg] = use_ins;
11147 live_range_end_bb [var->dreg] = bb;
11151 mono_inst_set_src_registers (ins, sregs);
11153 if (dest_has_lvreg) {
11154 g_assert (ins->dreg != -1);
11155 vreg_to_lvreg [prev_dreg] = ins->dreg;
11156 g_assert (lvregs_len < 1024);
11157 lvregs [lvregs_len ++] = prev_dreg;
11158 dest_has_lvreg = FALSE;
11162 tmp_reg = ins->dreg;
11163 ins->dreg = ins->sreg2;
11164 ins->sreg2 = tmp_reg;
11167 if (MONO_IS_CALL (ins)) {
11168 /* Clear vreg_to_lvreg array */
11169 for (i = 0; i < lvregs_len; i++)
11170 vreg_to_lvreg [lvregs [i]] = 0;
11172 } else if (ins->opcode == OP_NOP) {
11174 MONO_INST_NULLIFY_SREGS (ins);
11177 if (cfg->verbose_level > 2)
11178 mono_print_ins_index (1, ins);
11181 /* Extend the live range based on the liveness info */
11182 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
11183 for (i = 0; i < cfg->num_varinfo; i ++) {
11184 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
11186 if (vreg_is_volatile (cfg, vi->vreg))
11187 /* The liveness info is incomplete */
11190 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
11191 /* Live from at least the first ins of this bb */
11192 live_range_start [vi->vreg] = bb->code;
11193 live_range_start_bb [vi->vreg] = bb;
11196 if (mono_bitset_test_fast (bb->live_out_set, i)) {
11197 /* Live at least until the last ins of this bb */
11198 live_range_end [vi->vreg] = bb->last_ins;
11199 live_range_end_bb [vi->vreg] = bb;
11205 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
11207 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
11208 * by storing the current native offset into MonoMethodVar->live_range_start/end.
11210 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
11211 for (i = 0; i < cfg->num_varinfo; ++i) {
11212 int vreg = MONO_VARINFO (cfg, i)->vreg;
11215 if (live_range_start [vreg]) {
11216 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
11218 ins->inst_c1 = vreg;
11219 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
11221 if (live_range_end [vreg]) {
11222 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
11224 ins->inst_c1 = vreg;
11225 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
11226 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
11228 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
11234 g_free (live_range_start);
11235 g_free (live_range_end);
11236 g_free (live_range_start_bb);
11237 g_free (live_range_end_bb);
11242 * - use 'iadd' instead of 'int_add'
11243 * - handling ovf opcodes: decompose in method_to_ir.
11244 * - unify iregs/fregs
11245 * -> partly done, the missing parts are:
11246 * - a more complete unification would involve unifying the hregs as well, so
11247 * code wouldn't need if (fp) all over the place. but that would mean the hregs
11248 * would no longer map to the machine hregs, so the code generators would need to
11249 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
11250 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
11251 * fp/non-fp branches speeds it up by about 15%.
11252 * - use sext/zext opcodes instead of shifts
11254 * - get rid of TEMPLOADs if possible and use vregs instead
11255 * - clean up usage of OP_P/OP_ opcodes
11256 * - cleanup usage of DUMMY_USE
11257 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
11259 * - set the stack type and allocate a dreg in the EMIT_NEW macros
11260 * - get rid of all the <foo>2 stuff when the new JIT is ready.
11261 * - make sure handle_stack_args () is called before the branch is emitted
11262 * - when the new IR is done, get rid of all unused stuff
11263 * - COMPARE/BEQ as separate instructions or unify them ?
11264 * - keeping them separate allows specialized compare instructions like
11265 * compare_imm, compare_membase
11266 * - most back ends unify fp compare+branch, fp compare+ceq
11267 * - integrate mono_save_args into inline_method
11268 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
11269 * - handle long shift opts on 32 bit platforms somehow: they require
11270 * 3 sregs (2 for arg1 and 1 for arg2)
11271 * - make byref a 'normal' type.
11272 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
11273 * variable if needed.
11274 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
11275 * like inline_method.
11276 * - remove inlining restrictions
11277 * - fix LNEG and enable cfold of INEG
11278 * - generalize x86 optimizations like ldelema as a peephole optimization
11279 * - add store_mem_imm for amd64
11280 * - optimize the loading of the interruption flag in the managed->native wrappers
11281 * - avoid special handling of OP_NOP in passes
11282 * - move code inserting instructions into one function/macro.
11283 * - try a coalescing phase after liveness analysis
11284 * - add float -> vreg conversion + local optimizations on !x86
11285 * - figure out how to handle decomposed branches during optimizations, ie.
11286 * compare+branch, op_jump_table+op_br etc.
11287 * - promote RuntimeXHandles to vregs
11288 * - vtype cleanups:
11289 * - add a NEW_VARLOADA_VREG macro
11290 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11291 * accessing vtype fields.
11292 * - get rid of I8CONST on 64 bit platforms
11293 * - dealing with the increase in code size due to branches created during opcode
11295 * - use extended basic blocks
11296 * - all parts of the JIT
11297 * - handle_global_vregs () && local regalloc
11298 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
11299 * - sources of increase in code size:
11302 * - isinst and castclass
11303 * - lvregs not allocated to global registers even if used multiple times
11304 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
11306 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
11307 * - add all micro optimizations from the old JIT
11308 * - put tree optimizations into the deadce pass
11309 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
11310 * specific function.
11311 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
11312 * fcompare + branchCC.
11313 * - create a helper function for allocating a stack slot, taking into account
11314 * MONO_CFG_HAS_SPILLUP.
11316 * - merge the ia64 switch changes.
11317 * - optimize mono_regstate2_alloc_int/float.
11318 * - fix the pessimistic handling of variables accessed in exception handler blocks.
11319 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
11320 * parts of the tree could be separated by other instructions, killing the tree
11321 * arguments, or stores killing loads etc. Also, should we fold loads into other
11322 * instructions if the result of the load is used multiple times ?
11323 * - make the REM_IMM optimization in mini-x86.c arch-independent.
11324 * - LAST MERGE: 108395.
11325 * - when returning vtypes in registers, generate IR and append it to the end of the
11326 * last bb instead of doing it in the epilog.
11327 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
11335 - When to decompose opcodes:
11336 - earlier: this makes some optimizations hard to implement, since the low level IR
11337 no longer contains the neccessary information. But it is easier to do.
11338 - later: harder to implement, enables more optimizations.
11339 - Branches inside bblocks:
11340 - created when decomposing complex opcodes.
11341 - branches to another bblock: harmless, but not tracked by the branch
11342 optimizations, so need to branch to a label at the start of the bblock.
11343 - branches to inside the same bblock: very problematic, trips up the local
11344 reg allocator. Can be fixed by spitting the current bblock, but that is a
11345 complex operation, since some local vregs can become global vregs etc.
11346 - Local/global vregs:
11347 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11348 local register allocator.
11349 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11350 structure, created by mono_create_var (). Assigned to hregs or the stack by
11351 the global register allocator.
11352 - When to do optimizations like alu->alu_imm:
11353 - earlier -> saves work later on since the IR will be smaller/simpler
11354 - later -> can work on more instructions
11355 - Handling of valuetypes:
11356 - When a vtype is pushed on the stack, a new temporary is created, an
11357 instruction computing its address (LDADDR) is emitted and pushed on
11358 the stack. Need to optimize cases when the vtype is used immediately as in
11359 argument passing, stloc etc.
11360 - Instead of the to_end stuff in the old JIT, simply call the function handling
11361 the values on the stack before emitting the last instruction of the bb.
11364 #endif /* DISABLE_JIT */