2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
30 #include <mono/utils/memcheck.h>
32 #include <mono/metadata/assembly.h>
33 #include <mono/metadata/loader.h>
34 #include <mono/metadata/tabledefs.h>
35 #include <mono/metadata/class.h>
36 #include <mono/metadata/object.h>
37 #include <mono/metadata/exception.h>
38 #include <mono/metadata/opcodes.h>
39 #include <mono/metadata/mono-endian.h>
40 #include <mono/metadata/tokentype.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/marshal.h>
43 #include <mono/metadata/debug-helpers.h>
44 #include <mono/metadata/mono-debug.h>
45 #include <mono/metadata/gc-internal.h>
46 #include <mono/metadata/security-manager.h>
47 #include <mono/metadata/threads-types.h>
48 #include <mono/metadata/security-core-clr.h>
49 #include <mono/metadata/monitor.h>
50 #include <mono/metadata/profiler-private.h>
51 #include <mono/metadata/profiler.h>
52 #include <mono/utils/mono-compiler.h>
53 #include <mono/metadata/mono-basic-block.h>
60 #include "jit-icalls.h"
62 #include "debugger-agent.h"
64 #define BRANCH_COST 10
65 #define INLINE_LENGTH_LIMIT 20
66 #define INLINE_FAILURE do {\
67 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
70 #define CHECK_CFG_EXCEPTION do {\
71 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
74 #define METHOD_ACCESS_FAILURE do { \
75 char *method_fname = mono_method_full_name (method, TRUE); \
76 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
77 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
78 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
79 g_free (method_fname); \
80 g_free (cil_method_fname); \
81 goto exception_exit; \
83 #define FIELD_ACCESS_FAILURE do { \
84 char *method_fname = mono_method_full_name (method, TRUE); \
85 char *field_fname = mono_field_full_name (field); \
86 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
87 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
88 g_free (method_fname); \
89 g_free (field_fname); \
90 goto exception_exit; \
92 #define GENERIC_SHARING_FAILURE(opcode) do { \
93 if (cfg->generic_sharing_context) { \
94 if (cfg->verbose_level > 2) \
95 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
96 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
97 goto exception_exit; \
101 /* Determine whenever 'ins' represents a load of the 'this' argument */
102 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
104 static int ldind_to_load_membase (int opcode);
105 static int stind_to_store_membase (int opcode);
107 int mono_op_to_op_imm (int opcode);
108 int mono_op_to_op_imm_noemul (int opcode);
110 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
111 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
112 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
114 /* helper methods signature */
115 extern MonoMethodSignature *helper_sig_class_init_trampoline;
116 extern MonoMethodSignature *helper_sig_domain_get;
117 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
118 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm;
119 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
120 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
121 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
124 * Instruction metadata
132 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
133 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
139 #if SIZEOF_REGISTER == 8
144 /* keep in sync with the enum in mini.h */
147 #include "mini-ops.h"
152 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
153 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
155 * This should contain the index of the last sreg + 1. This is not the same
156 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
158 const gint8 ins_sreg_counts[] = {
159 #include "mini-ops.h"
164 #define MONO_INIT_VARINFO(vi,id) do { \
165 (vi)->range.first_use.pos.bid = 0xffff; \
171 mono_inst_set_src_registers (MonoInst *ins, int *regs)
173 ins->sreg1 = regs [0];
174 ins->sreg2 = regs [1];
175 ins->sreg3 = regs [2];
179 mono_alloc_ireg (MonoCompile *cfg)
181 return alloc_ireg (cfg);
185 mono_alloc_freg (MonoCompile *cfg)
187 return alloc_freg (cfg);
191 mono_alloc_preg (MonoCompile *cfg)
193 return alloc_preg (cfg);
197 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
199 return alloc_dreg (cfg, stack_type);
203 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
209 switch (type->type) {
212 case MONO_TYPE_BOOLEAN:
224 case MONO_TYPE_FNPTR:
226 case MONO_TYPE_CLASS:
227 case MONO_TYPE_STRING:
228 case MONO_TYPE_OBJECT:
229 case MONO_TYPE_SZARRAY:
230 case MONO_TYPE_ARRAY:
234 #if SIZEOF_REGISTER == 8
243 case MONO_TYPE_VALUETYPE:
244 if (type->data.klass->enumtype) {
245 type = mono_class_enum_basetype (type->data.klass);
248 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
251 case MONO_TYPE_TYPEDBYREF:
253 case MONO_TYPE_GENERICINST:
254 type = &type->data.generic_class->container_class->byval_arg;
258 g_assert (cfg->generic_sharing_context);
261 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
267 mono_print_bb (MonoBasicBlock *bb, const char *msg)
272 printf ("\n%s %d: [IN: ", msg, bb->block_num);
273 for (i = 0; i < bb->in_count; ++i)
274 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
276 for (i = 0; i < bb->out_count; ++i)
277 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
279 for (tree = bb->code; tree; tree = tree->next)
280 mono_print_ins_index (-1, tree);
284 * Can't put this at the beginning, since other files reference stuff from this
289 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
291 #define GET_BBLOCK(cfg,tblock,ip) do { \
292 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
294 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
295 NEW_BBLOCK (cfg, (tblock)); \
296 (tblock)->cil_code = (ip); \
297 ADD_BBLOCK (cfg, (tblock)); \
301 #if defined(TARGET_X86) || defined(TARGET_AMD64)
302 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
303 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
304 (dest)->dreg = alloc_preg ((cfg)); \
305 (dest)->sreg1 = (sr1); \
306 (dest)->sreg2 = (sr2); \
307 (dest)->inst_imm = (imm); \
308 (dest)->backend.shift_amount = (shift); \
309 MONO_ADD_INS ((cfg)->cbb, (dest)); \
313 #if SIZEOF_REGISTER == 8
314 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
315 /* FIXME: Need to add many more cases */ \
316 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
318 int dr = alloc_preg (cfg); \
319 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
320 (ins)->sreg2 = widen->dreg; \
324 #define ADD_WIDEN_OP(ins, arg1, arg2)
327 #define ADD_BINOP(op) do { \
328 MONO_INST_NEW (cfg, ins, (op)); \
330 ins->sreg1 = sp [0]->dreg; \
331 ins->sreg2 = sp [1]->dreg; \
332 type_from_op (ins, sp [0], sp [1]); \
334 /* Have to insert a widening op */ \
335 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
336 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
337 MONO_ADD_INS ((cfg)->cbb, (ins)); \
338 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
341 #define ADD_UNOP(op) do { \
342 MONO_INST_NEW (cfg, ins, (op)); \
344 ins->sreg1 = sp [0]->dreg; \
345 type_from_op (ins, sp [0], NULL); \
347 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
348 MONO_ADD_INS ((cfg)->cbb, (ins)); \
349 *sp++ = mono_decompose_opcode (cfg, ins); \
352 #define ADD_BINCOND(next_block) do { \
355 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
356 cmp->sreg1 = sp [0]->dreg; \
357 cmp->sreg2 = sp [1]->dreg; \
358 type_from_op (cmp, sp [0], sp [1]); \
360 type_from_op (ins, sp [0], sp [1]); \
361 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
362 GET_BBLOCK (cfg, tblock, target); \
363 link_bblock (cfg, bblock, tblock); \
364 ins->inst_true_bb = tblock; \
365 if ((next_block)) { \
366 link_bblock (cfg, bblock, (next_block)); \
367 ins->inst_false_bb = (next_block); \
368 start_new_bblock = 1; \
370 GET_BBLOCK (cfg, tblock, ip); \
371 link_bblock (cfg, bblock, tblock); \
372 ins->inst_false_bb = tblock; \
373 start_new_bblock = 2; \
375 if (sp != stack_start) { \
376 handle_stack_args (cfg, stack_start, sp - stack_start); \
377 CHECK_UNVERIFIABLE (cfg); \
379 MONO_ADD_INS (bblock, cmp); \
380 MONO_ADD_INS (bblock, ins); \
384 * link_bblock: Links two basic blocks
386 * links two basic blocks in the control flow graph, the 'from'
387 * argument is the starting block and the 'to' argument is the block
388 * the control flow ends to after 'from'.
391 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
393 MonoBasicBlock **newa;
397 if (from->cil_code) {
399 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
401 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
404 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
406 printf ("edge from entry to exit\n");
411 for (i = 0; i < from->out_count; ++i) {
412 if (to == from->out_bb [i]) {
418 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
419 for (i = 0; i < from->out_count; ++i) {
420 newa [i] = from->out_bb [i];
428 for (i = 0; i < to->in_count; ++i) {
429 if (from == to->in_bb [i]) {
435 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
436 for (i = 0; i < to->in_count; ++i) {
437 newa [i] = to->in_bb [i];
446 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
448 link_bblock (cfg, from, to);
452 * mono_find_block_region:
454 * We mark each basic block with a region ID. We use that to avoid BB
455 * optimizations when blocks are in different regions.
458 * A region token that encodes where this region is, and information
459 * about the clause owner for this block.
461 * The region encodes the try/catch/filter clause that owns this block
462 * as well as the type. -1 is a special value that represents a block
463 * that is in none of try/catch/filter.
466 mono_find_block_region (MonoCompile *cfg, int offset)
468 MonoMethodHeader *header = cfg->header;
469 MonoExceptionClause *clause;
472 for (i = 0; i < header->num_clauses; ++i) {
473 clause = &header->clauses [i];
474 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
475 (offset < (clause->handler_offset)))
476 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
478 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
479 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
480 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
481 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
482 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
484 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
487 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
488 return ((i + 1) << 8) | clause->flags;
495 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
497 MonoMethodHeader *header = cfg->header;
498 MonoExceptionClause *clause;
502 for (i = 0; i < header->num_clauses; ++i) {
503 clause = &header->clauses [i];
504 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
505 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
506 if (clause->flags == type)
507 res = g_list_append (res, clause);
514 mono_create_spvar_for_region (MonoCompile *cfg, int region)
518 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
522 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
523 /* prevent it from being register allocated */
524 var->flags |= MONO_INST_INDIRECT;
526 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
530 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
532 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
536 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
540 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
544 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
545 /* prevent it from being register allocated */
546 var->flags |= MONO_INST_INDIRECT;
548 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
554 * Returns the type used in the eval stack when @type is loaded.
555 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
558 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
562 inst->klass = klass = mono_class_from_mono_type (type);
564 inst->type = STACK_MP;
569 switch (type->type) {
571 inst->type = STACK_INV;
575 case MONO_TYPE_BOOLEAN:
581 inst->type = STACK_I4;
586 case MONO_TYPE_FNPTR:
587 inst->type = STACK_PTR;
589 case MONO_TYPE_CLASS:
590 case MONO_TYPE_STRING:
591 case MONO_TYPE_OBJECT:
592 case MONO_TYPE_SZARRAY:
593 case MONO_TYPE_ARRAY:
594 inst->type = STACK_OBJ;
598 inst->type = STACK_I8;
602 inst->type = STACK_R8;
604 case MONO_TYPE_VALUETYPE:
605 if (type->data.klass->enumtype) {
606 type = mono_class_enum_basetype (type->data.klass);
610 inst->type = STACK_VTYPE;
613 case MONO_TYPE_TYPEDBYREF:
614 inst->klass = mono_defaults.typed_reference_class;
615 inst->type = STACK_VTYPE;
617 case MONO_TYPE_GENERICINST:
618 type = &type->data.generic_class->container_class->byval_arg;
621 case MONO_TYPE_MVAR :
622 /* FIXME: all the arguments must be references for now,
623 * later look inside cfg and see if the arg num is
626 g_assert (cfg->generic_sharing_context);
627 inst->type = STACK_OBJ;
630 g_error ("unknown type 0x%02x in eval stack type", type->type);
635 * The following tables are used to quickly validate the IL code in type_from_op ().
638 bin_num_table [STACK_MAX] [STACK_MAX] = {
639 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
640 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
641 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
642 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
643 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
644 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
645 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
646 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
651 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
654 /* reduce the size of this table */
656 bin_int_table [STACK_MAX] [STACK_MAX] = {
657 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
658 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
659 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
660 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
661 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
662 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
663 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
664 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
668 bin_comp_table [STACK_MAX] [STACK_MAX] = {
669 /* Inv i L p F & O vt */
671 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
672 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
673 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
674 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
675 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
676 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
677 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
680 /* reduce the size of this table */
682 shift_table [STACK_MAX] [STACK_MAX] = {
683 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
684 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
685 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
686 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
687 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
688 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
689 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
690 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
694 * Tables to map from the non-specific opcode to the matching
695 * type-specific opcode.
697 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
699 binops_op_map [STACK_MAX] = {
700 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
703 /* handles from CEE_NEG to CEE_CONV_U8 */
705 unops_op_map [STACK_MAX] = {
706 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
709 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
711 ovfops_op_map [STACK_MAX] = {
712 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
715 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
717 ovf2ops_op_map [STACK_MAX] = {
718 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
721 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
723 ovf3ops_op_map [STACK_MAX] = {
724 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
727 /* handles from CEE_BEQ to CEE_BLT_UN */
729 beqops_op_map [STACK_MAX] = {
730 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
733 /* handles from CEE_CEQ to CEE_CLT_UN */
735 ceqops_op_map [STACK_MAX] = {
736 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
740 * Sets ins->type (the type on the eval stack) according to the
741 * type of the opcode and the arguments to it.
742 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
744 * FIXME: this function sets ins->type unconditionally in some cases, but
745 * it should set it to invalid for some types (a conv.x on an object)
748 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
750 switch (ins->opcode) {
757 /* FIXME: check unverifiable args for STACK_MP */
758 ins->type = bin_num_table [src1->type] [src2->type];
759 ins->opcode += binops_op_map [ins->type];
766 ins->type = bin_int_table [src1->type] [src2->type];
767 ins->opcode += binops_op_map [ins->type];
772 ins->type = shift_table [src1->type] [src2->type];
773 ins->opcode += binops_op_map [ins->type];
778 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
779 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
780 ins->opcode = OP_LCOMPARE;
781 else if (src1->type == STACK_R8)
782 ins->opcode = OP_FCOMPARE;
784 ins->opcode = OP_ICOMPARE;
786 case OP_ICOMPARE_IMM:
787 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
788 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
789 ins->opcode = OP_LCOMPARE_IMM;
801 ins->opcode += beqops_op_map [src1->type];
804 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
805 ins->opcode += ceqops_op_map [src1->type];
811 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
812 ins->opcode += ceqops_op_map [src1->type];
816 ins->type = neg_table [src1->type];
817 ins->opcode += unops_op_map [ins->type];
820 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
821 ins->type = src1->type;
823 ins->type = STACK_INV;
824 ins->opcode += unops_op_map [ins->type];
830 ins->type = STACK_I4;
831 ins->opcode += unops_op_map [src1->type];
834 ins->type = STACK_R8;
835 switch (src1->type) {
838 ins->opcode = OP_ICONV_TO_R_UN;
841 ins->opcode = OP_LCONV_TO_R_UN;
845 case CEE_CONV_OVF_I1:
846 case CEE_CONV_OVF_U1:
847 case CEE_CONV_OVF_I2:
848 case CEE_CONV_OVF_U2:
849 case CEE_CONV_OVF_I4:
850 case CEE_CONV_OVF_U4:
851 ins->type = STACK_I4;
852 ins->opcode += ovf3ops_op_map [src1->type];
854 case CEE_CONV_OVF_I_UN:
855 case CEE_CONV_OVF_U_UN:
856 ins->type = STACK_PTR;
857 ins->opcode += ovf2ops_op_map [src1->type];
859 case CEE_CONV_OVF_I1_UN:
860 case CEE_CONV_OVF_I2_UN:
861 case CEE_CONV_OVF_I4_UN:
862 case CEE_CONV_OVF_U1_UN:
863 case CEE_CONV_OVF_U2_UN:
864 case CEE_CONV_OVF_U4_UN:
865 ins->type = STACK_I4;
866 ins->opcode += ovf2ops_op_map [src1->type];
869 ins->type = STACK_PTR;
870 switch (src1->type) {
872 ins->opcode = OP_ICONV_TO_U;
876 #if SIZEOF_REGISTER == 8
877 ins->opcode = OP_LCONV_TO_U;
879 ins->opcode = OP_MOVE;
883 ins->opcode = OP_LCONV_TO_U;
886 ins->opcode = OP_FCONV_TO_U;
892 ins->type = STACK_I8;
893 ins->opcode += unops_op_map [src1->type];
895 case CEE_CONV_OVF_I8:
896 case CEE_CONV_OVF_U8:
897 ins->type = STACK_I8;
898 ins->opcode += ovf3ops_op_map [src1->type];
900 case CEE_CONV_OVF_U8_UN:
901 case CEE_CONV_OVF_I8_UN:
902 ins->type = STACK_I8;
903 ins->opcode += ovf2ops_op_map [src1->type];
907 ins->type = STACK_R8;
908 ins->opcode += unops_op_map [src1->type];
911 ins->type = STACK_R8;
915 ins->type = STACK_I4;
916 ins->opcode += ovfops_op_map [src1->type];
921 ins->type = STACK_PTR;
922 ins->opcode += ovfops_op_map [src1->type];
930 ins->type = bin_num_table [src1->type] [src2->type];
931 ins->opcode += ovfops_op_map [src1->type];
932 if (ins->type == STACK_R8)
933 ins->type = STACK_INV;
935 case OP_LOAD_MEMBASE:
936 ins->type = STACK_PTR;
938 case OP_LOADI1_MEMBASE:
939 case OP_LOADU1_MEMBASE:
940 case OP_LOADI2_MEMBASE:
941 case OP_LOADU2_MEMBASE:
942 case OP_LOADI4_MEMBASE:
943 case OP_LOADU4_MEMBASE:
944 ins->type = STACK_PTR;
946 case OP_LOADI8_MEMBASE:
947 ins->type = STACK_I8;
949 case OP_LOADR4_MEMBASE:
950 case OP_LOADR8_MEMBASE:
951 ins->type = STACK_R8;
954 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
958 if (ins->type == STACK_MP)
959 ins->klass = mono_defaults.object_class;
964 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
970 param_table [STACK_MAX] [STACK_MAX] = {
975 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
979 switch (args->type) {
989 for (i = 0; i < sig->param_count; ++i) {
990 switch (args [i].type) {
994 if (!sig->params [i]->byref)
998 if (sig->params [i]->byref)
1000 switch (sig->params [i]->type) {
1001 case MONO_TYPE_CLASS:
1002 case MONO_TYPE_STRING:
1003 case MONO_TYPE_OBJECT:
1004 case MONO_TYPE_SZARRAY:
1005 case MONO_TYPE_ARRAY:
1012 if (sig->params [i]->byref)
1014 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1023 /*if (!param_table [args [i].type] [sig->params [i]->type])
1031 * When we need a pointer to the current domain many times in a method, we
1032 * call mono_domain_get() once and we store the result in a local variable.
1033 * This function returns the variable that represents the MonoDomain*.
1035 inline static MonoInst *
1036 mono_get_domainvar (MonoCompile *cfg)
1038 if (!cfg->domainvar)
1039 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1040 return cfg->domainvar;
1044 * The got_var contains the address of the Global Offset Table when AOT
1048 mono_get_got_var (MonoCompile *cfg)
1050 #ifdef MONO_ARCH_NEED_GOT_VAR
1051 if (!cfg->compile_aot)
1053 if (!cfg->got_var) {
1054 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1056 return cfg->got_var;
1063 mono_get_vtable_var (MonoCompile *cfg)
1065 g_assert (cfg->generic_sharing_context);
1067 if (!cfg->rgctx_var) {
1068 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1069 /* force the var to be stack allocated */
1070 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1073 return cfg->rgctx_var;
1077 type_from_stack_type (MonoInst *ins) {
1078 switch (ins->type) {
1079 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1080 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1081 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1082 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1084 return &ins->klass->this_arg;
1085 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1086 case STACK_VTYPE: return &ins->klass->byval_arg;
1088 g_error ("stack type %d to monotype not handled\n", ins->type);
1093 static G_GNUC_UNUSED int
1094 type_to_stack_type (MonoType *t)
1096 t = mono_type_get_underlying_type (t);
1100 case MONO_TYPE_BOOLEAN:
1103 case MONO_TYPE_CHAR:
1110 case MONO_TYPE_FNPTR:
1112 case MONO_TYPE_CLASS:
1113 case MONO_TYPE_STRING:
1114 case MONO_TYPE_OBJECT:
1115 case MONO_TYPE_SZARRAY:
1116 case MONO_TYPE_ARRAY:
1124 case MONO_TYPE_VALUETYPE:
1125 case MONO_TYPE_TYPEDBYREF:
1127 case MONO_TYPE_GENERICINST:
1128 if (mono_type_generic_inst_is_valuetype (t))
1134 g_assert_not_reached ();
1141 array_access_to_klass (int opcode)
1145 return mono_defaults.byte_class;
1147 return mono_defaults.uint16_class;
1150 return mono_defaults.int_class;
1153 return mono_defaults.sbyte_class;
1156 return mono_defaults.int16_class;
1159 return mono_defaults.int32_class;
1161 return mono_defaults.uint32_class;
1164 return mono_defaults.int64_class;
1167 return mono_defaults.single_class;
1170 return mono_defaults.double_class;
1171 case CEE_LDELEM_REF:
1172 case CEE_STELEM_REF:
1173 return mono_defaults.object_class;
1175 g_assert_not_reached ();
1181 * We try to share variables when possible
1184 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1189 /* inlining can result in deeper stacks */
1190 if (slot >= cfg->header->max_stack)
1191 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1193 pos = ins->type - 1 + slot * STACK_MAX;
1195 switch (ins->type) {
1202 if ((vnum = cfg->intvars [pos]))
1203 return cfg->varinfo [vnum];
1204 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1205 cfg->intvars [pos] = res->inst_c0;
1208 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1214 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1217 * Don't use this if a generic_context is set, since that means AOT can't
1218 * look up the method using just the image+token.
1219 * table == 0 means this is a reference made from a wrapper.
1221 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1222 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1223 jump_info_token->image = image;
1224 jump_info_token->token = token;
1225 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1230 * This function is called to handle items that are left on the evaluation stack
1231 * at basic block boundaries. What happens is that we save the values to local variables
1232 * and we reload them later when first entering the target basic block (with the
1233 * handle_loaded_temps () function).
1234 * A single joint point will use the same variables (stored in the array bb->out_stack or
1235 * bb->in_stack, if the basic block is before or after the joint point).
1237 * This function needs to be called _before_ emitting the last instruction of
1238 * the bb (i.e. before emitting a branch).
1239 * If the stack merge fails at a join point, cfg->unverifiable is set.
1242 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1245 MonoBasicBlock *bb = cfg->cbb;
1246 MonoBasicBlock *outb;
1247 MonoInst *inst, **locals;
1252 if (cfg->verbose_level > 3)
1253 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1254 if (!bb->out_scount) {
1255 bb->out_scount = count;
1256 //printf ("bblock %d has out:", bb->block_num);
1258 for (i = 0; i < bb->out_count; ++i) {
1259 outb = bb->out_bb [i];
1260 /* exception handlers are linked, but they should not be considered for stack args */
1261 if (outb->flags & BB_EXCEPTION_HANDLER)
1263 //printf (" %d", outb->block_num);
1264 if (outb->in_stack) {
1266 bb->out_stack = outb->in_stack;
1272 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1273 for (i = 0; i < count; ++i) {
1275 * try to reuse temps already allocated for this purpouse, if they occupy the same
1276 * stack slot and if they are of the same type.
1277 * This won't cause conflicts since if 'local' is used to
1278 * store one of the values in the in_stack of a bblock, then
1279 * the same variable will be used for the same outgoing stack
1281 * This doesn't work when inlining methods, since the bblocks
1282 * in the inlined methods do not inherit their in_stack from
1283 * the bblock they are inlined to. See bug #58863 for an
1286 if (cfg->inlined_method)
1287 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1289 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1294 for (i = 0; i < bb->out_count; ++i) {
1295 outb = bb->out_bb [i];
1296 /* exception handlers are linked, but they should not be considered for stack args */
1297 if (outb->flags & BB_EXCEPTION_HANDLER)
1299 if (outb->in_scount) {
1300 if (outb->in_scount != bb->out_scount) {
1301 cfg->unverifiable = TRUE;
1304 continue; /* check they are the same locals */
1306 outb->in_scount = count;
1307 outb->in_stack = bb->out_stack;
1310 locals = bb->out_stack;
1312 for (i = 0; i < count; ++i) {
1313 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1314 inst->cil_code = sp [i]->cil_code;
1315 sp [i] = locals [i];
1316 if (cfg->verbose_level > 3)
1317 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1321 * It is possible that the out bblocks already have in_stack assigned, and
1322 * the in_stacks differ. In this case, we will store to all the different
1329 /* Find a bblock which has a different in_stack */
1331 while (bindex < bb->out_count) {
1332 outb = bb->out_bb [bindex];
1333 /* exception handlers are linked, but they should not be considered for stack args */
1334 if (outb->flags & BB_EXCEPTION_HANDLER) {
1338 if (outb->in_stack != locals) {
1339 for (i = 0; i < count; ++i) {
1340 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1341 inst->cil_code = sp [i]->cil_code;
1342 sp [i] = locals [i];
1343 if (cfg->verbose_level > 3)
1344 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1346 locals = outb->in_stack;
1355 /* Emit code which loads interface_offsets [klass->interface_id]
1356 * The array is stored in memory before vtable.
1359 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1361 if (cfg->compile_aot) {
1362 int ioffset_reg = alloc_preg (cfg);
1363 int iid_reg = alloc_preg (cfg);
1365 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1366 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1367 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1370 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1375 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1377 int ibitmap_reg = alloc_preg (cfg);
1378 #ifdef COMPRESSED_INTERFACE_BITMAP
1380 MonoInst *res, *ins;
1381 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1382 MONO_ADD_INS (cfg->cbb, ins);
1384 if (cfg->compile_aot)
1385 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1387 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1388 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1389 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1391 int ibitmap_byte_reg = alloc_preg (cfg);
1393 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1395 if (cfg->compile_aot) {
1396 int iid_reg = alloc_preg (cfg);
1397 int shifted_iid_reg = alloc_preg (cfg);
1398 int ibitmap_byte_address_reg = alloc_preg (cfg);
1399 int masked_iid_reg = alloc_preg (cfg);
1400 int iid_one_bit_reg = alloc_preg (cfg);
1401 int iid_bit_reg = alloc_preg (cfg);
1402 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1403 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1404 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1405 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1406 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1407 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1408 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1409 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1411 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1412 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1418 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1419 * stored in "klass_reg" implements the interface "klass".
1422 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1424 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1428 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1429 * stored in "vtable_reg" implements the interface "klass".
1432 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1434 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1438 * Emit code which checks whenever the interface id of @klass is smaller than
1439 * than the value given by max_iid_reg.
1442 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1443 MonoBasicBlock *false_target)
1445 if (cfg->compile_aot) {
1446 int iid_reg = alloc_preg (cfg);
1447 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1448 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1451 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1453 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1455 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1458 /* Same as above, but obtains max_iid from a vtable */
1460 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1461 MonoBasicBlock *false_target)
1463 int max_iid_reg = alloc_preg (cfg);
1465 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1466 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1469 /* Same as above, but obtains max_iid from a klass */
1471 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1472 MonoBasicBlock *false_target)
1474 int max_iid_reg = alloc_preg (cfg);
1476 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1477 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1481 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1483 int idepth_reg = alloc_preg (cfg);
1484 int stypes_reg = alloc_preg (cfg);
1485 int stype = alloc_preg (cfg);
1487 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1488 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1489 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1490 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1492 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1493 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1495 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1496 } else if (cfg->compile_aot) {
1497 int const_reg = alloc_preg (cfg);
1498 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1499 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1501 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1503 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1507 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1509 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1513 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1515 int intf_reg = alloc_preg (cfg);
1517 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1518 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1519 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1521 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1523 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1527 * Variant of the above that takes a register to the class, not the vtable.
1530 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1532 int intf_bit_reg = alloc_preg (cfg);
1534 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1535 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1536 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1538 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1540 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1544 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1547 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1548 } else if (cfg->compile_aot) {
1549 int const_reg = alloc_preg (cfg);
1550 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1551 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1553 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1555 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1559 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1561 return mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1565 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1567 if (cfg->compile_aot) {
1568 int const_reg = alloc_preg (cfg);
1569 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1570 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1572 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1574 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1578 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1581 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1584 int rank_reg = alloc_preg (cfg);
1585 int eclass_reg = alloc_preg (cfg);
1587 g_assert (!klass_inst);
1588 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1589 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1590 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1591 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1592 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1593 if (klass->cast_class == mono_defaults.object_class) {
1594 int parent_reg = alloc_preg (cfg);
1595 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1596 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1597 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1598 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1599 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1600 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1601 } else if (klass->cast_class == mono_defaults.enum_class) {
1602 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1603 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1604 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1606 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1607 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1610 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1611 /* Check that the object is a vector too */
1612 int bounds_reg = alloc_preg (cfg);
1613 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1614 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1615 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1618 int idepth_reg = alloc_preg (cfg);
1619 int stypes_reg = alloc_preg (cfg);
1620 int stype = alloc_preg (cfg);
1622 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1623 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1624 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1625 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1627 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1628 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1629 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1634 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1636 return mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1640 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1644 g_assert (val == 0);
1649 if ((size <= 4) && (size <= align)) {
1652 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1655 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1658 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1660 #if SIZEOF_REGISTER == 8
1662 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1668 val_reg = alloc_preg (cfg);
1670 if (SIZEOF_REGISTER == 8)
1671 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1673 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1676 /* This could be optimized further if neccesary */
1678 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1685 #if !NO_UNALIGNED_ACCESS
1686 if (SIZEOF_REGISTER == 8) {
1688 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1693 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1701 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1706 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1711 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1718 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1725 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1726 g_assert (size < 10000);
1729 /* This could be optimized further if neccesary */
1731 cur_reg = alloc_preg (cfg);
1732 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1733 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1740 #if !NO_UNALIGNED_ACCESS
1741 if (SIZEOF_REGISTER == 8) {
1743 cur_reg = alloc_preg (cfg);
1744 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1745 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1754 cur_reg = alloc_preg (cfg);
1755 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1756 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1762 cur_reg = alloc_preg (cfg);
1763 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1764 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1770 cur_reg = alloc_preg (cfg);
1771 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1772 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1780 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1783 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1786 type = mini_get_basic_type_from_generic (gsctx, type);
1787 switch (type->type) {
1788 case MONO_TYPE_VOID:
1789 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1792 case MONO_TYPE_BOOLEAN:
1795 case MONO_TYPE_CHAR:
1798 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1802 case MONO_TYPE_FNPTR:
1803 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1804 case MONO_TYPE_CLASS:
1805 case MONO_TYPE_STRING:
1806 case MONO_TYPE_OBJECT:
1807 case MONO_TYPE_SZARRAY:
1808 case MONO_TYPE_ARRAY:
1809 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1812 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1815 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1816 case MONO_TYPE_VALUETYPE:
1817 if (type->data.klass->enumtype) {
1818 type = mono_class_enum_basetype (type->data.klass);
1821 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1822 case MONO_TYPE_TYPEDBYREF:
1823 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1824 case MONO_TYPE_GENERICINST:
1825 type = &type->data.generic_class->container_class->byval_arg;
1828 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1834 * target_type_is_incompatible:
1835 * @cfg: MonoCompile context
1837 * Check that the item @arg on the evaluation stack can be stored
1838 * in the target type (can be a local, or field, etc).
1839 * The cfg arg can be used to check if we need verification or just
1842 * Returns: non-0 value if arg can't be stored on a target.
1845 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1847 MonoType *simple_type;
1850 if (target->byref) {
1851 /* FIXME: check that the pointed to types match */
1852 if (arg->type == STACK_MP)
1853 return arg->klass != mono_class_from_mono_type (target);
1854 if (arg->type == STACK_PTR)
1859 simple_type = mono_type_get_underlying_type (target);
1860 switch (simple_type->type) {
1861 case MONO_TYPE_VOID:
1865 case MONO_TYPE_BOOLEAN:
1868 case MONO_TYPE_CHAR:
1871 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1875 /* STACK_MP is needed when setting pinned locals */
1876 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1881 case MONO_TYPE_FNPTR:
1882 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1885 case MONO_TYPE_CLASS:
1886 case MONO_TYPE_STRING:
1887 case MONO_TYPE_OBJECT:
1888 case MONO_TYPE_SZARRAY:
1889 case MONO_TYPE_ARRAY:
1890 if (arg->type != STACK_OBJ)
1892 /* FIXME: check type compatibility */
1896 if (arg->type != STACK_I8)
1901 if (arg->type != STACK_R8)
1904 case MONO_TYPE_VALUETYPE:
1905 if (arg->type != STACK_VTYPE)
1907 klass = mono_class_from_mono_type (simple_type);
1908 if (klass != arg->klass)
1911 case MONO_TYPE_TYPEDBYREF:
1912 if (arg->type != STACK_VTYPE)
1914 klass = mono_class_from_mono_type (simple_type);
1915 if (klass != arg->klass)
1918 case MONO_TYPE_GENERICINST:
1919 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1920 if (arg->type != STACK_VTYPE)
1922 klass = mono_class_from_mono_type (simple_type);
1923 if (klass != arg->klass)
1927 if (arg->type != STACK_OBJ)
1929 /* FIXME: check type compatibility */
1933 case MONO_TYPE_MVAR:
1934 /* FIXME: all the arguments must be references for now,
1935 * later look inside cfg and see if the arg num is
1936 * really a reference
1938 g_assert (cfg->generic_sharing_context);
1939 if (arg->type != STACK_OBJ)
1943 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1949 * Prepare arguments for passing to a function call.
1950 * Return a non-zero value if the arguments can't be passed to the given
1952 * The type checks are not yet complete and some conversions may need
1953 * casts on 32 or 64 bit architectures.
1955 * FIXME: implement this using target_type_is_incompatible ()
1958 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1960 MonoType *simple_type;
1964 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1968 for (i = 0; i < sig->param_count; ++i) {
1969 if (sig->params [i]->byref) {
1970 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1974 simple_type = sig->params [i];
1975 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1977 switch (simple_type->type) {
1978 case MONO_TYPE_VOID:
1983 case MONO_TYPE_BOOLEAN:
1986 case MONO_TYPE_CHAR:
1989 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
1995 case MONO_TYPE_FNPTR:
1996 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
1999 case MONO_TYPE_CLASS:
2000 case MONO_TYPE_STRING:
2001 case MONO_TYPE_OBJECT:
2002 case MONO_TYPE_SZARRAY:
2003 case MONO_TYPE_ARRAY:
2004 if (args [i]->type != STACK_OBJ)
2009 if (args [i]->type != STACK_I8)
2014 if (args [i]->type != STACK_R8)
2017 case MONO_TYPE_VALUETYPE:
2018 if (simple_type->data.klass->enumtype) {
2019 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2022 if (args [i]->type != STACK_VTYPE)
2025 case MONO_TYPE_TYPEDBYREF:
2026 if (args [i]->type != STACK_VTYPE)
2029 case MONO_TYPE_GENERICINST:
2030 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2034 g_error ("unknown type 0x%02x in check_call_signature",
2042 callvirt_to_call (int opcode)
2047 case OP_VOIDCALLVIRT:
2056 g_assert_not_reached ();
2063 callvirt_to_call_membase (int opcode)
2067 return OP_CALL_MEMBASE;
2068 case OP_VOIDCALLVIRT:
2069 return OP_VOIDCALL_MEMBASE;
2071 return OP_FCALL_MEMBASE;
2073 return OP_LCALL_MEMBASE;
2075 return OP_VCALL_MEMBASE;
2077 g_assert_not_reached ();
2083 #ifdef MONO_ARCH_HAVE_IMT
2085 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2089 if (COMPILE_LLVM (cfg)) {
2090 method_reg = alloc_preg (cfg);
2093 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2094 } else if (cfg->compile_aot) {
2095 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2098 MONO_INST_NEW (cfg, ins, OP_PCONST);
2099 ins->inst_p0 = call->method;
2100 ins->dreg = method_reg;
2101 MONO_ADD_INS (cfg->cbb, ins);
2105 call->imt_arg_reg = method_reg;
2107 #ifdef MONO_ARCH_IMT_REG
2108 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2110 /* Need this to keep the IMT arg alive */
2111 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2116 #ifdef MONO_ARCH_IMT_REG
2117 method_reg = alloc_preg (cfg);
2120 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2121 } else if (cfg->compile_aot) {
2122 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2125 MONO_INST_NEW (cfg, ins, OP_PCONST);
2126 ins->inst_p0 = call->method;
2127 ins->dreg = method_reg;
2128 MONO_ADD_INS (cfg->cbb, ins);
2131 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2133 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2138 static MonoJumpInfo *
2139 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2141 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2145 ji->data.target = target;
2150 inline static MonoCallInst *
2151 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2152 MonoInst **args, int calli, int virtual, int tail)
2155 #ifdef MONO_ARCH_SOFT_FLOAT
2160 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2162 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2165 call->signature = sig;
2167 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2170 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2171 call->vret_var = cfg->vret_addr;
2172 //g_assert_not_reached ();
2174 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2175 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2178 temp->backend.is_pinvoke = sig->pinvoke;
2181 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2182 * address of return value to increase optimization opportunities.
2183 * Before vtype decomposition, the dreg of the call ins itself represents the
2184 * fact the call modifies the return value. After decomposition, the call will
2185 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2186 * will be transformed into an LDADDR.
2188 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2189 loada->dreg = alloc_preg (cfg);
2190 loada->inst_p0 = temp;
2191 /* We reference the call too since call->dreg could change during optimization */
2192 loada->inst_p1 = call;
2193 MONO_ADD_INS (cfg->cbb, loada);
2195 call->inst.dreg = temp->dreg;
2197 call->vret_var = loada;
2198 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2199 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2201 #ifdef MONO_ARCH_SOFT_FLOAT
2202 if (COMPILE_SOFT_FLOAT (cfg)) {
2204 * If the call has a float argument, we would need to do an r8->r4 conversion using
2205 * an icall, but that cannot be done during the call sequence since it would clobber
2206 * the call registers + the stack. So we do it before emitting the call.
2208 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2210 MonoInst *in = call->args [i];
2212 if (i >= sig->hasthis)
2213 t = sig->params [i - sig->hasthis];
2215 t = &mono_defaults.int_class->byval_arg;
2216 t = mono_type_get_underlying_type (t);
2218 if (!t->byref && t->type == MONO_TYPE_R4) {
2219 MonoInst *iargs [1];
2223 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2225 /* The result will be in an int vreg */
2226 call->args [i] = conv;
2233 if (COMPILE_LLVM (cfg))
2234 mono_llvm_emit_call (cfg, call);
2236 mono_arch_emit_call (cfg, call);
2238 mono_arch_emit_call (cfg, call);
2241 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2242 cfg->flags |= MONO_CFG_HAS_CALLS;
2247 inline static MonoInst*
2248 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2250 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE);
2252 call->inst.sreg1 = addr->dreg;
2254 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2256 return (MonoInst*)call;
2260 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2262 #ifdef MONO_ARCH_RGCTX_REG
2263 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2264 cfg->uses_rgctx_reg = TRUE;
2265 call->rgctx_reg = TRUE;
2267 call->rgctx_arg_reg = rgctx_reg;
2274 inline static MonoInst*
2275 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2281 rgctx_reg = mono_alloc_preg (cfg);
2282 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2284 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2286 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2287 return (MonoInst*)call;
2291 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, int rgctx_type);
2293 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, int rgctx_type);
2296 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2297 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2299 gboolean might_be_remote;
2300 gboolean virtual = this != NULL;
2301 gboolean enable_for_aot = TRUE;
2305 if (method->string_ctor) {
2306 /* Create the real signature */
2307 /* FIXME: Cache these */
2308 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2309 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2314 might_be_remote = this && sig->hasthis &&
2315 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2316 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this);
2318 context_used = mono_method_check_context_used (method);
2319 if (might_be_remote && context_used) {
2322 g_assert (cfg->generic_sharing_context);
2324 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2326 return mono_emit_calli (cfg, sig, args, addr);
2329 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE);
2331 if (might_be_remote)
2332 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2334 call->method = method;
2335 call->inst.flags |= MONO_INST_HAS_METHOD;
2336 call->inst.inst_left = this;
2339 int vtable_reg, slot_reg, this_reg;
2341 this_reg = this->dreg;
2343 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2344 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2345 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2347 /* Make a call to delegate->invoke_impl */
2348 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2349 call->inst.inst_basereg = this_reg;
2350 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2351 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2353 return (MonoInst*)call;
2357 if ((!cfg->compile_aot || enable_for_aot) &&
2358 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2359 (MONO_METHOD_IS_FINAL (method) &&
2360 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2361 !(method->klass->marshalbyref && context_used)) {
2363 * the method is not virtual, we just need to ensure this is not null
2364 * and then we can call the method directly.
2366 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2368 * The check above ensures method is not gshared, this is needed since
2369 * gshared methods can't have wrappers.
2371 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2374 if (!method->string_ctor)
2375 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2377 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2379 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2381 return (MonoInst*)call;
2384 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2386 * the method is virtual, but we can statically dispatch since either
2387 * it's class or the method itself are sealed.
2388 * But first we need to ensure it's not a null reference.
2390 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2392 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2393 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2395 return (MonoInst*)call;
2398 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2400 vtable_reg = alloc_preg (cfg);
2401 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2402 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2404 #ifdef MONO_ARCH_HAVE_IMT
2406 guint32 imt_slot = mono_method_get_imt_slot (method);
2407 emit_imt_argument (cfg, call, imt_arg);
2408 slot_reg = vtable_reg;
2409 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2412 if (slot_reg == -1) {
2413 slot_reg = alloc_preg (cfg);
2414 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2415 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2418 slot_reg = vtable_reg;
2419 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2420 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2421 #ifdef MONO_ARCH_HAVE_IMT
2423 g_assert (mono_method_signature (method)->generic_param_count);
2424 emit_imt_argument (cfg, call, imt_arg);
2429 call->inst.sreg1 = slot_reg;
2430 call->virtual = TRUE;
2433 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2435 return (MonoInst*)call;
2439 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2440 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2447 rgctx_reg = mono_alloc_preg (cfg);
2448 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2450 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2452 call = (MonoCallInst*)ins;
2454 set_rgctx_arg (cfg, call, rgctx_reg, vtable_arg);
2460 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2462 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2466 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2473 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE);
2476 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2478 return (MonoInst*)call;
2482 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2484 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2488 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2492 * mono_emit_abs_call:
2494 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2496 inline static MonoInst*
2497 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2498 MonoMethodSignature *sig, MonoInst **args)
2500 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2504 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2507 if (cfg->abs_patches == NULL)
2508 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2509 g_hash_table_insert (cfg->abs_patches, ji, ji);
2510 ins = mono_emit_native_call (cfg, ji, sig, args);
2511 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2516 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2518 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2519 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2523 * Native code might return non register sized integers
2524 * without initializing the upper bits.
2526 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2527 case OP_LOADI1_MEMBASE:
2528 widen_op = OP_ICONV_TO_I1;
2530 case OP_LOADU1_MEMBASE:
2531 widen_op = OP_ICONV_TO_U1;
2533 case OP_LOADI2_MEMBASE:
2534 widen_op = OP_ICONV_TO_I2;
2536 case OP_LOADU2_MEMBASE:
2537 widen_op = OP_ICONV_TO_U2;
2543 if (widen_op != -1) {
2544 int dreg = alloc_preg (cfg);
2547 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2548 widen->type = ins->type;
2558 get_memcpy_method (void)
2560 static MonoMethod *memcpy_method = NULL;
2561 if (!memcpy_method) {
2562 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2564 g_error ("Old corlib found. Install a new one");
2566 return memcpy_method;
2570 create_write_barrier_bitmap (MonoClass *klass, unsigned *wb_bitmap, int offset)
2572 MonoClassField *field;
2573 gpointer iter = NULL;
2575 while ((field = mono_class_get_fields (klass, &iter))) {
2578 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2580 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2581 if (mono_type_is_reference (field->type)) {
2582 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2583 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2585 /*FIXME support nested value types so this works for: struct X { Y y; int z;} struct Y { object a,b; }*/
2586 MonoClass *field_class = mono_class_from_mono_type (field->type);
2587 if (field_class->has_references)
2588 create_write_barrier_bitmap (field_class, wb_bitmap, offset + foffset);
2594 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
2596 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
2597 unsigned need_wb = 0;
2602 /*types with references can't have alignment smaller than sizeof(void*) */
2603 if (align < SIZEOF_VOID_P)
2606 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
2607 if (size > 32 * SIZEOF_VOID_P)
2610 create_write_barrier_bitmap (klass, &need_wb, 0);
2612 /* We don't unroll more than 5 stores to avoid code bloat. */
2613 if (size > 5 * SIZEOF_VOID_P) {
2614 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
2615 size += (SIZEOF_VOID_P - 1);
2616 size &= ~(SIZEOF_VOID_P - 1);
2618 EMIT_NEW_ICONST (cfg, iargs [2], size);
2619 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
2620 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
2624 destreg = iargs [0]->dreg;
2625 srcreg = iargs [1]->dreg;
2628 dest_ptr_reg = alloc_preg (cfg);
2629 tmp_reg = alloc_preg (cfg);
2632 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
2634 while (size >= SIZEOF_VOID_P) {
2635 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, tmp_reg, srcreg, offset);
2636 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
2638 if (need_wb & 0x1) {
2639 MonoInst *dummy_use;
2641 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2642 mono_emit_method_call (cfg, write_barrier, &iargs [0], NULL);
2644 MONO_INST_NEW (cfg, dummy_use, OP_DUMMY_USE);
2645 dummy_use->sreg1 = tmp_reg;
2646 MONO_ADD_INS (cfg->cbb, dummy_use);
2649 offset += SIZEOF_VOID_P;
2650 size -= SIZEOF_VOID_P;
2653 /*tmp += sizeof (void*)*/
2654 if (size >= SIZEOF_VOID_P) {
2655 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
2656 MONO_ADD_INS (cfg->cbb, iargs [0]);
2660 /* Those cannot be references since size < sizeof (void*) */
2662 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
2663 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
2669 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
2670 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
2676 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
2677 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
2686 * Emit code to copy a valuetype of type @klass whose address is stored in
2687 * @src->dreg to memory whose address is stored at @dest->dreg.
2690 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2692 MonoInst *iargs [4];
2695 MonoMethod *memcpy_method;
2699 * This check breaks with spilled vars... need to handle it during verification anyway.
2700 * g_assert (klass && klass == src->klass && klass == dest->klass);
2704 n = mono_class_native_size (klass, &align);
2706 n = mono_class_value_size (klass, &align);
2708 /* if native is true there should be no references in the struct */
2709 if (cfg->gen_write_barriers && klass->has_references && !native) {
2710 /* Avoid barriers when storing to the stack */
2711 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2712 (dest->opcode == OP_LDADDR))) {
2713 int context_used = 0;
2718 if (cfg->generic_sharing_context)
2719 context_used = mono_class_check_context_used (klass);
2721 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
2722 if ((cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
2724 } else if (context_used) {
2725 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2727 if (cfg->compile_aot) {
2728 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2730 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2731 mono_class_compute_gc_descriptor (klass);
2735 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2740 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2741 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2742 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2746 EMIT_NEW_ICONST (cfg, iargs [2], n);
2748 memcpy_method = get_memcpy_method ();
2749 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2754 get_memset_method (void)
2756 static MonoMethod *memset_method = NULL;
2757 if (!memset_method) {
2758 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2760 g_error ("Old corlib found. Install a new one");
2762 return memset_method;
2766 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2768 MonoInst *iargs [3];
2771 MonoMethod *memset_method;
2773 /* FIXME: Optimize this for the case when dest is an LDADDR */
2775 mono_class_init (klass);
2776 n = mono_class_value_size (klass, &align);
2778 if (n <= sizeof (gpointer) * 5) {
2779 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2782 memset_method = get_memset_method ();
2784 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2785 EMIT_NEW_ICONST (cfg, iargs [2], n);
2786 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2791 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2793 MonoInst *this = NULL;
2795 g_assert (cfg->generic_sharing_context);
2797 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2798 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2799 !method->klass->valuetype)
2800 EMIT_NEW_ARGLOAD (cfg, this, 0);
2802 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2803 MonoInst *mrgctx_loc, *mrgctx_var;
2806 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2808 mrgctx_loc = mono_get_vtable_var (cfg);
2809 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2812 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2813 MonoInst *vtable_loc, *vtable_var;
2817 vtable_loc = mono_get_vtable_var (cfg);
2818 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2820 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2821 MonoInst *mrgctx_var = vtable_var;
2824 vtable_reg = alloc_preg (cfg);
2825 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2826 vtable_var->type = STACK_PTR;
2832 int vtable_reg, res_reg;
2834 vtable_reg = alloc_preg (cfg);
2835 res_reg = alloc_preg (cfg);
2836 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2841 static MonoJumpInfoRgctxEntry *
2842 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2844 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2845 res->method = method;
2846 res->in_mrgctx = in_mrgctx;
2847 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2848 res->data->type = patch_type;
2849 res->data->data.target = patch_data;
2850 res->info_type = info_type;
2855 static inline MonoInst*
2856 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2858 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2862 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2863 MonoClass *klass, int rgctx_type)
2865 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2866 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2868 return emit_rgctx_fetch (cfg, rgctx, entry);
2872 * emit_get_rgctx_method:
2874 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2875 * normal constants, else emit a load from the rgctx.
2878 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2879 MonoMethod *cmethod, int rgctx_type)
2881 if (!context_used) {
2884 switch (rgctx_type) {
2885 case MONO_RGCTX_INFO_METHOD:
2886 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
2888 case MONO_RGCTX_INFO_METHOD_RGCTX:
2889 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
2892 g_assert_not_reached ();
2895 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2896 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2898 return emit_rgctx_fetch (cfg, rgctx, entry);
2903 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2904 MonoClassField *field, int rgctx_type)
2906 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2907 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2909 return emit_rgctx_fetch (cfg, rgctx, entry);
2913 * On return the caller must check @klass for load errors.
2916 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
2918 MonoInst *vtable_arg;
2920 int context_used = 0;
2922 if (cfg->generic_sharing_context)
2923 context_used = mono_class_check_context_used (klass);
2926 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
2927 klass, MONO_RGCTX_INFO_VTABLE);
2929 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2933 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
2936 if (COMPILE_LLVM (cfg))
2937 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
2939 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
2940 #ifdef MONO_ARCH_VTABLE_REG
2941 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
2942 cfg->uses_vtable_reg = TRUE;
2949 * On return the caller must check @array_class for load errors
2952 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
2954 int vtable_reg = alloc_preg (cfg);
2955 int context_used = 0;
2957 if (cfg->generic_sharing_context)
2958 context_used = mono_class_check_context_used (array_class);
2960 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2962 if (cfg->opt & MONO_OPT_SHARED) {
2963 int class_reg = alloc_preg (cfg);
2964 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2965 if (cfg->compile_aot) {
2966 int klass_reg = alloc_preg (cfg);
2967 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
2968 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
2970 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
2972 } else if (context_used) {
2973 MonoInst *vtable_ins;
2975 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
2976 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
2978 if (cfg->compile_aot) {
2982 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2984 vt_reg = alloc_preg (cfg);
2985 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
2986 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
2989 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2991 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
2995 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
2999 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
3001 if (mini_get_debug_options ()->better_cast_details) {
3002 int to_klass_reg = alloc_preg (cfg);
3003 int vtable_reg = alloc_preg (cfg);
3004 int klass_reg = alloc_preg (cfg);
3005 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3008 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3012 MONO_ADD_INS (cfg->cbb, tls_get);
3013 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3014 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3016 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3017 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3018 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3023 reset_cast_details (MonoCompile *cfg)
3025 /* Reset the variables holding the cast details */
3026 if (mini_get_debug_options ()->better_cast_details) {
3027 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3029 MONO_ADD_INS (cfg->cbb, tls_get);
3030 /* It is enough to reset the from field */
3031 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3036 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3037 * generic code is generated.
3040 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3042 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3045 MonoInst *rgctx, *addr;
3047 /* FIXME: What if the class is shared? We might not
3048 have to get the address of the method from the
3050 addr = emit_get_rgctx_method (cfg, context_used, method,
3051 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3053 rgctx = emit_get_rgctx (cfg, method, context_used);
3055 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3057 return mono_emit_method_call (cfg, method, &val, NULL);
3062 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3066 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3067 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3068 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3069 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3071 obj_reg = sp [0]->dreg;
3072 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3073 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3075 /* FIXME: generics */
3076 g_assert (klass->rank == 0);
3079 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3080 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3082 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3083 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3086 MonoInst *element_class;
3088 /* This assertion is from the unboxcast insn */
3089 g_assert (klass->rank == 0);
3091 element_class = emit_get_rgctx_klass (cfg, context_used,
3092 klass->element_class, MONO_RGCTX_INFO_KLASS);
3094 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3095 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3097 save_cast_details (cfg, klass->element_class, obj_reg);
3098 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3099 reset_cast_details (cfg);
3102 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
3103 MONO_ADD_INS (cfg->cbb, add);
3104 add->type = STACK_MP;
3111 * Returns NULL and set the cfg exception on error.
3114 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3116 MonoInst *iargs [2];
3122 MonoInst *iargs [2];
3125 FIXME: we cannot get managed_alloc here because we can't get
3126 the class's vtable (because it's not a closed class)
3128 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3129 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3132 if (cfg->opt & MONO_OPT_SHARED)
3133 rgctx_info = MONO_RGCTX_INFO_KLASS;
3135 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3136 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3138 if (cfg->opt & MONO_OPT_SHARED) {
3139 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3141 alloc_ftn = mono_object_new;
3144 alloc_ftn = mono_object_new_specific;
3147 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3150 if (cfg->opt & MONO_OPT_SHARED) {
3151 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3152 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3154 alloc_ftn = mono_object_new;
3155 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3156 /* This happens often in argument checking code, eg. throw new FooException... */
3157 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3158 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3159 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3161 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3162 MonoMethod *managed_alloc = NULL;
3166 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3167 cfg->exception_ptr = klass;
3171 #ifndef MONO_CROSS_COMPILE
3172 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3175 if (managed_alloc) {
3176 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3177 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3179 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3181 guint32 lw = vtable->klass->instance_size;
3182 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3183 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3184 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3187 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3191 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3195 * Returns NULL and set the cfg exception on error.
3198 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
3200 MonoInst *alloc, *ins;
3202 if (mono_class_is_nullable (klass)) {
3203 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3206 /* FIXME: What if the class is shared? We might not
3207 have to get the method address from the RGCTX. */
3208 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3209 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3210 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3212 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3214 return mono_emit_method_call (cfg, method, &val, NULL);
3218 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3222 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3227 // FIXME: This doesn't work yet (class libs tests fail?)
3228 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || mono_class_has_variant_generic_params (klass) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3231 * Returns NULL and set the cfg exception on error.
3234 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3236 MonoBasicBlock *is_null_bb;
3237 int obj_reg = src->dreg;
3238 int vtable_reg = alloc_preg (cfg);
3239 MonoInst *klass_inst = NULL;
3244 klass_inst = emit_get_rgctx_klass (cfg, context_used,
3245 klass, MONO_RGCTX_INFO_KLASS);
3247 if (is_complex_isinst (klass)) {
3248 /* Complex case, handle by an icall */
3254 args [1] = klass_inst;
3256 return mono_emit_jit_icall (cfg, mono_object_castclass, args);
3258 /* Simple case, handled by the code below */
3262 NEW_BBLOCK (cfg, is_null_bb);
3264 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3265 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3267 save_cast_details (cfg, klass, obj_reg);
3269 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3270 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3271 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3273 int klass_reg = alloc_preg (cfg);
3275 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3277 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3278 /* the remoting code is broken, access the class for now */
3279 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3280 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3282 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3283 cfg->exception_ptr = klass;
3286 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3288 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3289 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3291 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3293 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3294 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3298 MONO_START_BB (cfg, is_null_bb);
3300 reset_cast_details (cfg);
3306 * Returns NULL and set the cfg exception on error.
3309 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3312 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3313 int obj_reg = src->dreg;
3314 int vtable_reg = alloc_preg (cfg);
3315 int res_reg = alloc_preg (cfg);
3316 MonoInst *klass_inst = NULL;
3319 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3321 if (is_complex_isinst (klass)) {
3324 /* Complex case, handle by an icall */
3330 args [1] = klass_inst;
3332 return mono_emit_jit_icall (cfg, mono_object_isinst, args);
3334 /* Simple case, the code below can handle it */
3338 NEW_BBLOCK (cfg, is_null_bb);
3339 NEW_BBLOCK (cfg, false_bb);
3340 NEW_BBLOCK (cfg, end_bb);
3342 /* Do the assignment at the beginning, so the other assignment can be if converted */
3343 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3344 ins->type = STACK_OBJ;
3347 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3348 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3350 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3352 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3353 g_assert (!context_used);
3354 /* the is_null_bb target simply copies the input register to the output */
3355 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3357 int klass_reg = alloc_preg (cfg);
3360 int rank_reg = alloc_preg (cfg);
3361 int eclass_reg = alloc_preg (cfg);
3363 g_assert (!context_used);
3364 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3365 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3366 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3367 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3368 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3369 if (klass->cast_class == mono_defaults.object_class) {
3370 int parent_reg = alloc_preg (cfg);
3371 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3372 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3373 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3374 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3375 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3376 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3377 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3378 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3379 } else if (klass->cast_class == mono_defaults.enum_class) {
3380 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3381 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3382 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3383 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3385 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3386 /* Check that the object is a vector too */
3387 int bounds_reg = alloc_preg (cfg);
3388 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3389 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3390 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3393 /* the is_null_bb target simply copies the input register to the output */
3394 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3396 } else if (mono_class_is_nullable (klass)) {
3397 g_assert (!context_used);
3398 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3399 /* the is_null_bb target simply copies the input register to the output */
3400 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3402 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3403 g_assert (!context_used);
3404 /* the remoting code is broken, access the class for now */
3405 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3406 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3408 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3409 cfg->exception_ptr = klass;
3412 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3414 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3415 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3417 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3418 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3420 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3421 /* the is_null_bb target simply copies the input register to the output */
3422 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
3427 MONO_START_BB (cfg, false_bb);
3429 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3430 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3432 MONO_START_BB (cfg, is_null_bb);
3434 MONO_START_BB (cfg, end_bb);
3440 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3442 /* This opcode takes as input an object reference and a class, and returns:
3443 0) if the object is an instance of the class,
3444 1) if the object is not instance of the class,
3445 2) if the object is a proxy whose type cannot be determined */
3448 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3449 int obj_reg = src->dreg;
3450 int dreg = alloc_ireg (cfg);
3452 int klass_reg = alloc_preg (cfg);
3454 NEW_BBLOCK (cfg, true_bb);
3455 NEW_BBLOCK (cfg, false_bb);
3456 NEW_BBLOCK (cfg, false2_bb);
3457 NEW_BBLOCK (cfg, end_bb);
3458 NEW_BBLOCK (cfg, no_proxy_bb);
3460 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3461 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3463 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3464 NEW_BBLOCK (cfg, interface_fail_bb);
3466 tmp_reg = alloc_preg (cfg);
3467 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3468 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3469 MONO_START_BB (cfg, interface_fail_bb);
3470 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3472 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3474 tmp_reg = alloc_preg (cfg);
3475 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3476 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3477 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3479 tmp_reg = alloc_preg (cfg);
3480 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3481 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3483 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3484 tmp_reg = alloc_preg (cfg);
3485 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3486 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3488 tmp_reg = alloc_preg (cfg);
3489 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3490 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3491 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3493 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3494 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3496 MONO_START_BB (cfg, no_proxy_bb);
3498 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3501 MONO_START_BB (cfg, false_bb);
3503 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3504 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3506 MONO_START_BB (cfg, false2_bb);
3508 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3509 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3511 MONO_START_BB (cfg, true_bb);
3513 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3515 MONO_START_BB (cfg, end_bb);
3518 MONO_INST_NEW (cfg, ins, OP_ICONST);
3520 ins->type = STACK_I4;
3526 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3528 /* This opcode takes as input an object reference and a class, and returns:
3529 0) if the object is an instance of the class,
3530 1) if the object is a proxy whose type cannot be determined
3531 an InvalidCastException exception is thrown otherwhise*/
3534 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3535 int obj_reg = src->dreg;
3536 int dreg = alloc_ireg (cfg);
3537 int tmp_reg = alloc_preg (cfg);
3538 int klass_reg = alloc_preg (cfg);
3540 NEW_BBLOCK (cfg, end_bb);
3541 NEW_BBLOCK (cfg, ok_result_bb);
3543 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3544 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3546 save_cast_details (cfg, klass, obj_reg);
3548 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3549 NEW_BBLOCK (cfg, interface_fail_bb);
3551 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3552 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3553 MONO_START_BB (cfg, interface_fail_bb);
3554 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3556 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3558 tmp_reg = alloc_preg (cfg);
3559 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3560 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3561 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3563 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3564 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3567 NEW_BBLOCK (cfg, no_proxy_bb);
3569 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3570 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3571 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3573 tmp_reg = alloc_preg (cfg);
3574 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3575 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3577 tmp_reg = alloc_preg (cfg);
3578 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3579 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3580 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3582 NEW_BBLOCK (cfg, fail_1_bb);
3584 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3586 MONO_START_BB (cfg, fail_1_bb);
3588 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3589 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3591 MONO_START_BB (cfg, no_proxy_bb);
3593 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3596 MONO_START_BB (cfg, ok_result_bb);
3598 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3600 MONO_START_BB (cfg, end_bb);
3603 MONO_INST_NEW (cfg, ins, OP_ICONST);
3605 ins->type = STACK_I4;
3611 * Returns NULL and set the cfg exception on error.
3613 static G_GNUC_UNUSED MonoInst*
3614 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3616 gpointer *trampoline;
3617 MonoInst *obj, *method_ins, *tramp_ins;
3621 obj = handle_alloc (cfg, klass, FALSE, 0);
3625 /* Inline the contents of mono_delegate_ctor */
3627 /* Set target field */
3628 /* Optimize away setting of NULL target */
3629 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3630 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3632 /* Set method field */
3633 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3634 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3637 * To avoid looking up the compiled code belonging to the target method
3638 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3639 * store it, and we fill it after the method has been compiled.
3641 if (!cfg->compile_aot && !method->dynamic) {
3642 MonoInst *code_slot_ins;
3645 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3647 domain = mono_domain_get ();
3648 mono_domain_lock (domain);
3649 if (!domain_jit_info (domain)->method_code_hash)
3650 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3651 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3653 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3654 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3656 mono_domain_unlock (domain);
3658 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3660 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3663 /* Set invoke_impl field */
3664 if (cfg->compile_aot) {
3665 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3667 trampoline = mono_create_delegate_trampoline (klass);
3668 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3670 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3672 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3678 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3680 MonoJitICallInfo *info;
3682 /* Need to register the icall so it gets an icall wrapper */
3683 info = mono_get_array_new_va_icall (rank);
3685 cfg->flags |= MONO_CFG_HAS_VARARGS;
3687 /* mono_array_new_va () needs a vararg calling convention */
3688 cfg->disable_llvm = TRUE;
3690 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3691 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3695 mono_emit_load_got_addr (MonoCompile *cfg)
3697 MonoInst *getaddr, *dummy_use;
3699 if (!cfg->got_var || cfg->got_var_allocated)
3702 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3703 getaddr->dreg = cfg->got_var->dreg;
3705 /* Add it to the start of the first bblock */
3706 if (cfg->bb_entry->code) {
3707 getaddr->next = cfg->bb_entry->code;
3708 cfg->bb_entry->code = getaddr;
3711 MONO_ADD_INS (cfg->bb_entry, getaddr);
3713 cfg->got_var_allocated = TRUE;
3716 * Add a dummy use to keep the got_var alive, since real uses might
3717 * only be generated by the back ends.
3718 * Add it to end_bblock, so the variable's lifetime covers the whole
3720 * It would be better to make the usage of the got var explicit in all
3721 * cases when the backend needs it (i.e. calls, throw etc.), so this
3722 * wouldn't be needed.
3724 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3725 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3728 static int inline_limit;
3729 static gboolean inline_limit_inited;
3732 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3734 MonoMethodHeaderSummary header;
3736 #ifdef MONO_ARCH_SOFT_FLOAT
3737 MonoMethodSignature *sig = mono_method_signature (method);
3741 if (cfg->generic_sharing_context)
3744 if (cfg->inline_depth > 10)
3747 #ifdef MONO_ARCH_HAVE_LMF_OPS
3748 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3749 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3750 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3755 if (!mono_method_get_header_summary (method, &header))
3758 /*runtime, icall and pinvoke are checked by summary call*/
3759 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3760 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3761 (method->klass->marshalbyref) ||
3765 /* also consider num_locals? */
3766 /* Do the size check early to avoid creating vtables */
3767 if (!inline_limit_inited) {
3768 if (getenv ("MONO_INLINELIMIT"))
3769 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3771 inline_limit = INLINE_LENGTH_LIMIT;
3772 inline_limit_inited = TRUE;
3774 if (header.code_size >= inline_limit)
3778 * if we can initialize the class of the method right away, we do,
3779 * otherwise we don't allow inlining if the class needs initialization,
3780 * since it would mean inserting a call to mono_runtime_class_init()
3781 * inside the inlined code
3783 if (!(cfg->opt & MONO_OPT_SHARED)) {
3784 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3785 if (cfg->run_cctors && method->klass->has_cctor) {
3786 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3787 if (!method->klass->runtime_info)
3788 /* No vtable created yet */
3790 vtable = mono_class_vtable (cfg->domain, method->klass);
3793 /* This makes so that inline cannot trigger */
3794 /* .cctors: too many apps depend on them */
3795 /* running with a specific order... */
3796 if (! vtable->initialized)
3798 mono_runtime_class_init (vtable);
3800 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3801 if (!method->klass->runtime_info)
3802 /* No vtable created yet */
3804 vtable = mono_class_vtable (cfg->domain, method->klass);
3807 if (!vtable->initialized)
3812 * If we're compiling for shared code
3813 * the cctor will need to be run at aot method load time, for example,
3814 * or at the end of the compilation of the inlining method.
3816 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3821 * CAS - do not inline methods with declarative security
3822 * Note: this has to be before any possible return TRUE;
3824 if (mono_method_has_declsec (method))
3827 #ifdef MONO_ARCH_SOFT_FLOAT
3829 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3831 for (i = 0; i < sig->param_count; ++i)
3832 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3840 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3842 if (vtable->initialized && !cfg->compile_aot)
3845 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3848 if (!mono_class_needs_cctor_run (vtable->klass, method))
3851 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3852 /* The initialization is already done before the method is called */
3859 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
3863 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3865 mono_class_init (klass);
3866 size = mono_class_array_element_size (klass);
3868 mult_reg = alloc_preg (cfg);
3869 array_reg = arr->dreg;
3870 index_reg = index->dreg;
3872 #if SIZEOF_REGISTER == 8
3873 /* The array reg is 64 bits but the index reg is only 32 */
3874 if (COMPILE_LLVM (cfg)) {
3876 index2_reg = index_reg;
3878 index2_reg = alloc_preg (cfg);
3879 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3882 if (index->type == STACK_I8) {
3883 index2_reg = alloc_preg (cfg);
3884 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
3886 index2_reg = index_reg;
3891 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3893 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3894 if (size == 1 || size == 2 || size == 4 || size == 8) {
3895 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3897 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3898 ins->type = STACK_PTR;
3904 add_reg = alloc_preg (cfg);
3906 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3907 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3908 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3909 ins->type = STACK_PTR;
3910 MONO_ADD_INS (cfg->cbb, ins);
3915 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3917 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3919 int bounds_reg = alloc_preg (cfg);
3920 int add_reg = alloc_preg (cfg);
3921 int mult_reg = alloc_preg (cfg);
3922 int mult2_reg = alloc_preg (cfg);
3923 int low1_reg = alloc_preg (cfg);
3924 int low2_reg = alloc_preg (cfg);
3925 int high1_reg = alloc_preg (cfg);
3926 int high2_reg = alloc_preg (cfg);
3927 int realidx1_reg = alloc_preg (cfg);
3928 int realidx2_reg = alloc_preg (cfg);
3929 int sum_reg = alloc_preg (cfg);
3934 mono_class_init (klass);
3935 size = mono_class_array_element_size (klass);
3937 index1 = index_ins1->dreg;
3938 index2 = index_ins2->dreg;
3940 /* range checking */
3941 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3942 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3944 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3945 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3946 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3947 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3948 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3949 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3950 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3952 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3953 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3954 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3955 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3956 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3957 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3958 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3960 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3961 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3962 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3963 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3964 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3966 ins->type = STACK_MP;
3968 MONO_ADD_INS (cfg->cbb, ins);
3975 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3979 MonoMethod *addr_method;
3982 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3985 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
3987 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3988 /* emit_ldelema_2 depends on OP_LMUL */
3989 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3990 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3994 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3995 addr_method = mono_marshal_get_array_address (rank, element_size);
3996 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4001 static MonoBreakPolicy
4002 always_insert_breakpoint (MonoMethod *method)
4004 return MONO_BREAK_POLICY_ALWAYS;
4007 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4010 * mono_set_break_policy:
4011 * policy_callback: the new callback function
4013 * Allow embedders to decide wherther to actually obey breakpoint instructions
4014 * (both break IL instructions and Debugger.Break () method calls), for example
4015 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4016 * untrusted or semi-trusted code.
4018 * @policy_callback will be called every time a break point instruction needs to
4019 * be inserted with the method argument being the method that calls Debugger.Break()
4020 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4021 * if it wants the breakpoint to not be effective in the given method.
4022 * #MONO_BREAK_POLICY_ALWAYS is the default.
4025 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4027 if (policy_callback)
4028 break_policy_func = policy_callback;
4030 break_policy_func = always_insert_breakpoint;
4034 should_insert_brekpoint (MonoMethod *method) {
4035 switch (break_policy_func (method)) {
4036 case MONO_BREAK_POLICY_ALWAYS:
4038 case MONO_BREAK_POLICY_NEVER:
4040 case MONO_BREAK_POLICY_ON_DBG:
4041 return mono_debug_using_mono_debugger ();
4043 g_warning ("Incorrect value returned from break policy callback");
4048 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4050 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4052 MonoInst *addr, *store, *load;
4053 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4055 /* the bounds check is already done by the callers */
4056 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4058 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4059 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4061 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4062 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4068 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4070 MonoInst *ins = NULL;
4071 #ifdef MONO_ARCH_SIMD_INTRINSICS
4072 if (cfg->opt & MONO_OPT_SIMD) {
4073 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4083 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4085 MonoInst *ins = NULL;
4087 static MonoClass *runtime_helpers_class = NULL;
4088 if (! runtime_helpers_class)
4089 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
4090 "System.Runtime.CompilerServices", "RuntimeHelpers");
4092 if (cmethod->klass == mono_defaults.string_class) {
4093 if (strcmp (cmethod->name, "get_Chars") == 0) {
4094 int dreg = alloc_ireg (cfg);
4095 int index_reg = alloc_preg (cfg);
4096 int mult_reg = alloc_preg (cfg);
4097 int add_reg = alloc_preg (cfg);
4099 #if SIZEOF_REGISTER == 8
4100 /* The array reg is 64 bits but the index reg is only 32 */
4101 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
4103 index_reg = args [1]->dreg;
4105 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
4107 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4108 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
4109 add_reg = ins->dreg;
4110 /* Avoid a warning */
4112 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4115 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
4116 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4117 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4118 add_reg, G_STRUCT_OFFSET (MonoString, chars));
4120 type_from_op (ins, NULL, NULL);
4122 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4123 int dreg = alloc_ireg (cfg);
4124 /* Decompose later to allow more optimizations */
4125 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
4126 ins->type = STACK_I4;
4127 ins->flags |= MONO_INST_FAULT;
4128 cfg->cbb->has_array_access = TRUE;
4129 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
4132 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
4133 int mult_reg = alloc_preg (cfg);
4134 int add_reg = alloc_preg (cfg);
4136 /* The corlib functions check for oob already. */
4137 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
4138 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4139 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
4140 return cfg->cbb->last_ins;
4143 } else if (cmethod->klass == mono_defaults.object_class) {
4145 if (strcmp (cmethod->name, "GetType") == 0) {
4146 int dreg = alloc_preg (cfg);
4147 int vt_reg = alloc_preg (cfg);
4148 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4149 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
4150 type_from_op (ins, NULL, NULL);
4153 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
4154 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
4155 int dreg = alloc_ireg (cfg);
4156 int t1 = alloc_ireg (cfg);
4158 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
4159 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
4160 ins->type = STACK_I4;
4164 } else if (strcmp (cmethod->name, ".ctor") == 0) {
4165 MONO_INST_NEW (cfg, ins, OP_NOP);
4166 MONO_ADD_INS (cfg->cbb, ins);
4170 } else if (cmethod->klass == mono_defaults.array_class) {
4171 if (strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
4172 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
4174 #ifndef MONO_BIG_ARRAYS
4176 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
4179 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
4180 int dreg = alloc_ireg (cfg);
4181 int bounds_reg = alloc_ireg (cfg);
4182 MonoBasicBlock *end_bb, *szarray_bb;
4183 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
4185 NEW_BBLOCK (cfg, end_bb);
4186 NEW_BBLOCK (cfg, szarray_bb);
4188 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
4189 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4190 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4191 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
4192 /* Non-szarray case */
4194 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4195 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4197 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4198 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4199 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4200 MONO_START_BB (cfg, szarray_bb);
4203 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4204 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4206 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4207 MONO_START_BB (cfg, end_bb);
4209 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
4210 ins->type = STACK_I4;
4216 if (cmethod->name [0] != 'g')
4219 if (strcmp (cmethod->name, "get_Rank") == 0) {
4220 int dreg = alloc_ireg (cfg);
4221 int vtable_reg = alloc_preg (cfg);
4222 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
4223 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4224 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
4225 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4226 type_from_op (ins, NULL, NULL);
4229 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4230 int dreg = alloc_ireg (cfg);
4232 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4233 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4234 type_from_op (ins, NULL, NULL);
4239 } else if (cmethod->klass == runtime_helpers_class) {
4241 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
4242 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
4246 } else if (cmethod->klass == mono_defaults.thread_class) {
4247 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
4248 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
4249 MONO_ADD_INS (cfg->cbb, ins);
4251 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
4252 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4253 MONO_ADD_INS (cfg->cbb, ins);
4256 } else if (cmethod->klass == mono_defaults.monitor_class) {
4257 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4258 /* The trampolines don't work under SGEN */
4259 gboolean is_moving_gc = mono_gc_is_moving ();
4261 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1 && !is_moving_gc) {
4264 if (COMPILE_LLVM (cfg)) {
4266 * Pass the argument normally, the LLVM backend will handle the
4267 * calling convention problems.
4269 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4271 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
4272 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4273 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4274 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4277 return (MonoInst*)call;
4278 } else if (strcmp (cmethod->name, "Exit") == 0 && !is_moving_gc) {
4281 if (COMPILE_LLVM (cfg)) {
4282 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4284 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
4285 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4286 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4287 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4290 return (MonoInst*)call;
4292 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4293 MonoMethod *fast_method = NULL;
4295 /* Avoid infinite recursion */
4296 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
4297 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
4298 strcmp (cfg->method->name, "FastMonitorExit") == 0))
4301 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) ||
4302 strcmp (cmethod->name, "Exit") == 0)
4303 fast_method = mono_monitor_get_fast_path (cmethod);
4307 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4309 } else if (cmethod->klass->image == mono_defaults.corlib &&
4310 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4311 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4314 #if SIZEOF_REGISTER == 8
4315 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
4316 /* 64 bit reads are already atomic */
4317 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
4318 ins->dreg = mono_alloc_preg (cfg);
4319 ins->inst_basereg = args [0]->dreg;
4320 ins->inst_offset = 0;
4321 MONO_ADD_INS (cfg->cbb, ins);
4325 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4326 if (strcmp (cmethod->name, "Increment") == 0) {
4327 MonoInst *ins_iconst;
4330 if (fsig->params [0]->type == MONO_TYPE_I4)
4331 opcode = OP_ATOMIC_ADD_NEW_I4;
4332 #if SIZEOF_REGISTER == 8
4333 else if (fsig->params [0]->type == MONO_TYPE_I8)
4334 opcode = OP_ATOMIC_ADD_NEW_I8;
4337 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4338 ins_iconst->inst_c0 = 1;
4339 ins_iconst->dreg = mono_alloc_ireg (cfg);
4340 MONO_ADD_INS (cfg->cbb, ins_iconst);
4342 MONO_INST_NEW (cfg, ins, opcode);
4343 ins->dreg = mono_alloc_ireg (cfg);
4344 ins->inst_basereg = args [0]->dreg;
4345 ins->inst_offset = 0;
4346 ins->sreg2 = ins_iconst->dreg;
4347 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4348 MONO_ADD_INS (cfg->cbb, ins);
4350 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4351 MonoInst *ins_iconst;
4354 if (fsig->params [0]->type == MONO_TYPE_I4)
4355 opcode = OP_ATOMIC_ADD_NEW_I4;
4356 #if SIZEOF_REGISTER == 8
4357 else if (fsig->params [0]->type == MONO_TYPE_I8)
4358 opcode = OP_ATOMIC_ADD_NEW_I8;
4361 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4362 ins_iconst->inst_c0 = -1;
4363 ins_iconst->dreg = mono_alloc_ireg (cfg);
4364 MONO_ADD_INS (cfg->cbb, ins_iconst);
4366 MONO_INST_NEW (cfg, ins, opcode);
4367 ins->dreg = mono_alloc_ireg (cfg);
4368 ins->inst_basereg = args [0]->dreg;
4369 ins->inst_offset = 0;
4370 ins->sreg2 = ins_iconst->dreg;
4371 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4372 MONO_ADD_INS (cfg->cbb, ins);
4374 } else if (strcmp (cmethod->name, "Add") == 0) {
4377 if (fsig->params [0]->type == MONO_TYPE_I4)
4378 opcode = OP_ATOMIC_ADD_NEW_I4;
4379 #if SIZEOF_REGISTER == 8
4380 else if (fsig->params [0]->type == MONO_TYPE_I8)
4381 opcode = OP_ATOMIC_ADD_NEW_I8;
4385 MONO_INST_NEW (cfg, ins, opcode);
4386 ins->dreg = mono_alloc_ireg (cfg);
4387 ins->inst_basereg = args [0]->dreg;
4388 ins->inst_offset = 0;
4389 ins->sreg2 = args [1]->dreg;
4390 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4391 MONO_ADD_INS (cfg->cbb, ins);
4394 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4396 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4397 if (strcmp (cmethod->name, "Exchange") == 0) {
4399 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4401 if (fsig->params [0]->type == MONO_TYPE_I4)
4402 opcode = OP_ATOMIC_EXCHANGE_I4;
4403 #if SIZEOF_REGISTER == 8
4404 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4405 (fsig->params [0]->type == MONO_TYPE_I))
4406 opcode = OP_ATOMIC_EXCHANGE_I8;
4408 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4409 opcode = OP_ATOMIC_EXCHANGE_I4;
4414 MONO_INST_NEW (cfg, ins, opcode);
4415 ins->dreg = mono_alloc_ireg (cfg);
4416 ins->inst_basereg = args [0]->dreg;
4417 ins->inst_offset = 0;
4418 ins->sreg2 = args [1]->dreg;
4419 MONO_ADD_INS (cfg->cbb, ins);
4421 switch (fsig->params [0]->type) {
4423 ins->type = STACK_I4;
4427 ins->type = STACK_I8;
4429 case MONO_TYPE_OBJECT:
4430 ins->type = STACK_OBJ;
4433 g_assert_not_reached ();
4436 if (cfg->gen_write_barriers && is_ref) {
4437 MonoInst *dummy_use;
4438 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4439 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4440 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [1]);
4443 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4445 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4446 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4448 gboolean is_ref = MONO_TYPE_IS_REFERENCE (fsig->params [1]);
4449 if (fsig->params [1]->type == MONO_TYPE_I4)
4451 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4452 size = sizeof (gpointer);
4453 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
4456 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4457 ins->dreg = alloc_ireg (cfg);
4458 ins->sreg1 = args [0]->dreg;
4459 ins->sreg2 = args [1]->dreg;
4460 ins->sreg3 = args [2]->dreg;
4461 ins->type = STACK_I4;
4462 MONO_ADD_INS (cfg->cbb, ins);
4463 } else if (size == 8) {
4464 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4465 ins->dreg = alloc_ireg (cfg);
4466 ins->sreg1 = args [0]->dreg;
4467 ins->sreg2 = args [1]->dreg;
4468 ins->sreg3 = args [2]->dreg;
4469 ins->type = STACK_I8;
4470 MONO_ADD_INS (cfg->cbb, ins);
4472 /* g_assert_not_reached (); */
4474 if (cfg->gen_write_barriers && is_ref) {
4475 MonoInst *dummy_use;
4476 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4477 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4478 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [1]);
4481 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4485 } else if (cmethod->klass->image == mono_defaults.corlib) {
4486 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4487 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4488 if (should_insert_brekpoint (cfg->method))
4489 MONO_INST_NEW (cfg, ins, OP_BREAK);
4491 MONO_INST_NEW (cfg, ins, OP_NOP);
4492 MONO_ADD_INS (cfg->cbb, ins);
4495 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4496 && strcmp (cmethod->klass->name, "Environment") == 0) {
4498 EMIT_NEW_ICONST (cfg, ins, 1);
4500 EMIT_NEW_ICONST (cfg, ins, 0);
4504 } else if (cmethod->klass == mono_defaults.math_class) {
4506 * There is general branches code for Min/Max, but it does not work for
4508 * http://everything2.com/?node_id=1051618
4512 #ifdef MONO_ARCH_SIMD_INTRINSICS
4513 if (cfg->opt & MONO_OPT_SIMD) {
4514 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4520 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4524 * This entry point could be used later for arbitrary method
4527 inline static MonoInst*
4528 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4529 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4531 if (method->klass == mono_defaults.string_class) {
4532 /* managed string allocation support */
4533 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
4534 MonoInst *iargs [2];
4535 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4536 MonoMethod *managed_alloc = NULL;
4538 g_assert (vtable); /*Should not fail since it System.String*/
4539 #ifndef MONO_CROSS_COMPILE
4540 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4544 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4545 iargs [1] = args [0];
4546 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4553 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4555 MonoInst *store, *temp;
4558 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4559 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4562 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4563 * would be different than the MonoInst's used to represent arguments, and
4564 * the ldelema implementation can't deal with that.
4565 * Solution: When ldelema is used on an inline argument, create a var for
4566 * it, emit ldelema on that var, and emit the saving code below in
4567 * inline_method () if needed.
4569 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4570 cfg->args [i] = temp;
4571 /* This uses cfg->args [i] which is set by the preceeding line */
4572 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4573 store->cil_code = sp [0]->cil_code;
4578 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4579 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4581 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4583 check_inline_called_method_name_limit (MonoMethod *called_method)
4586 static char *limit = NULL;
4588 if (limit == NULL) {
4589 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4591 if (limit_string != NULL)
4592 limit = limit_string;
4594 limit = (char *) "";
4597 if (limit [0] != '\0') {
4598 char *called_method_name = mono_method_full_name (called_method, TRUE);
4600 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4601 g_free (called_method_name);
4603 //return (strncmp_result <= 0);
4604 return (strncmp_result == 0);
4611 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4613 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4616 static char *limit = NULL;
4618 if (limit == NULL) {
4619 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4620 if (limit_string != NULL) {
4621 limit = limit_string;
4623 limit = (char *) "";
4627 if (limit [0] != '\0') {
4628 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4630 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4631 g_free (caller_method_name);
4633 //return (strncmp_result <= 0);
4634 return (strncmp_result == 0);
4642 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4643 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4645 MonoInst *ins, *rvar = NULL;
4646 MonoMethodHeader *cheader;
4647 MonoBasicBlock *ebblock, *sbblock;
4649 MonoMethod *prev_inlined_method;
4650 MonoInst **prev_locals, **prev_args;
4651 MonoType **prev_arg_types;
4652 guint prev_real_offset;
4653 GHashTable *prev_cbb_hash;
4654 MonoBasicBlock **prev_cil_offset_to_bb;
4655 MonoBasicBlock *prev_cbb;
4656 unsigned char* prev_cil_start;
4657 guint32 prev_cil_offset_to_bb_len;
4658 MonoMethod *prev_current_method;
4659 MonoGenericContext *prev_generic_context;
4660 gboolean ret_var_set, prev_ret_var_set;
4662 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4664 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4665 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4668 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4669 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4673 if (cfg->verbose_level > 2)
4674 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4676 if (!cmethod->inline_info) {
4677 mono_jit_stats.inlineable_methods++;
4678 cmethod->inline_info = 1;
4681 /* allocate local variables */
4682 cheader = mono_method_get_header (cmethod);
4684 if (cheader == NULL || mono_loader_get_last_error ()) {
4686 mono_metadata_free_mh (cheader);
4687 mono_loader_clear_error ();
4691 /* allocate space to store the return value */
4692 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4693 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4697 prev_locals = cfg->locals;
4698 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4699 for (i = 0; i < cheader->num_locals; ++i)
4700 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4702 /* allocate start and end blocks */
4703 /* This is needed so if the inline is aborted, we can clean up */
4704 NEW_BBLOCK (cfg, sbblock);
4705 sbblock->real_offset = real_offset;
4707 NEW_BBLOCK (cfg, ebblock);
4708 ebblock->block_num = cfg->num_bblocks++;
4709 ebblock->real_offset = real_offset;
4711 prev_args = cfg->args;
4712 prev_arg_types = cfg->arg_types;
4713 prev_inlined_method = cfg->inlined_method;
4714 cfg->inlined_method = cmethod;
4715 cfg->ret_var_set = FALSE;
4716 cfg->inline_depth ++;
4717 prev_real_offset = cfg->real_offset;
4718 prev_cbb_hash = cfg->cbb_hash;
4719 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4720 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4721 prev_cil_start = cfg->cil_start;
4722 prev_cbb = cfg->cbb;
4723 prev_current_method = cfg->current_method;
4724 prev_generic_context = cfg->generic_context;
4725 prev_ret_var_set = cfg->ret_var_set;
4727 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4729 ret_var_set = cfg->ret_var_set;
4731 cfg->inlined_method = prev_inlined_method;
4732 cfg->real_offset = prev_real_offset;
4733 cfg->cbb_hash = prev_cbb_hash;
4734 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4735 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4736 cfg->cil_start = prev_cil_start;
4737 cfg->locals = prev_locals;
4738 cfg->args = prev_args;
4739 cfg->arg_types = prev_arg_types;
4740 cfg->current_method = prev_current_method;
4741 cfg->generic_context = prev_generic_context;
4742 cfg->ret_var_set = prev_ret_var_set;
4743 cfg->inline_depth --;
4745 if ((costs >= 0 && costs < 60) || inline_allways) {
4746 if (cfg->verbose_level > 2)
4747 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4749 mono_jit_stats.inlined_methods++;
4751 /* always add some code to avoid block split failures */
4752 MONO_INST_NEW (cfg, ins, OP_NOP);
4753 MONO_ADD_INS (prev_cbb, ins);
4755 prev_cbb->next_bb = sbblock;
4756 link_bblock (cfg, prev_cbb, sbblock);
4759 * Get rid of the begin and end bblocks if possible to aid local
4762 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4764 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4765 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4767 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4768 MonoBasicBlock *prev = ebblock->in_bb [0];
4769 mono_merge_basic_blocks (cfg, prev, ebblock);
4771 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4772 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4773 cfg->cbb = prev_cbb;
4781 * If the inlined method contains only a throw, then the ret var is not
4782 * set, so set it to a dummy value.
4785 static double r8_0 = 0.0;
4787 switch (rvar->type) {
4789 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4792 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4797 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4800 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4801 ins->type = STACK_R8;
4802 ins->inst_p0 = (void*)&r8_0;
4803 ins->dreg = rvar->dreg;
4804 MONO_ADD_INS (cfg->cbb, ins);
4807 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4810 g_assert_not_reached ();
4814 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4817 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4820 if (cfg->verbose_level > 2)
4821 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4822 cfg->exception_type = MONO_EXCEPTION_NONE;
4823 mono_loader_clear_error ();
4825 /* This gets rid of the newly added bblocks */
4826 cfg->cbb = prev_cbb;
4828 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4833 * Some of these comments may well be out-of-date.
4834 * Design decisions: we do a single pass over the IL code (and we do bblock
4835 * splitting/merging in the few cases when it's required: a back jump to an IL
4836 * address that was not already seen as bblock starting point).
4837 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4838 * Complex operations are decomposed in simpler ones right away. We need to let the
4839 * arch-specific code peek and poke inside this process somehow (except when the
4840 * optimizations can take advantage of the full semantic info of coarse opcodes).
4841 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4842 * MonoInst->opcode initially is the IL opcode or some simplification of that
4843 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4844 * opcode with value bigger than OP_LAST.
4845 * At this point the IR can be handed over to an interpreter, a dumb code generator
4846 * or to the optimizing code generator that will translate it to SSA form.
4848 * Profiling directed optimizations.
4849 * We may compile by default with few or no optimizations and instrument the code
4850 * or the user may indicate what methods to optimize the most either in a config file
4851 * or through repeated runs where the compiler applies offline the optimizations to
4852 * each method and then decides if it was worth it.
4855 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4856 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4857 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4858 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4859 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4860 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4861 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4862 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4864 /* offset from br.s -> br like opcodes */
4865 #define BIG_BRANCH_OFFSET 13
4868 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4870 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4872 return b == NULL || b == bb;
4876 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4878 unsigned char *ip = start;
4879 unsigned char *target;
4882 MonoBasicBlock *bblock;
4883 const MonoOpcode *opcode;
4886 cli_addr = ip - start;
4887 i = mono_opcode_value ((const guint8 **)&ip, end);
4890 opcode = &mono_opcodes [i];
4891 switch (opcode->argument) {
4892 case MonoInlineNone:
4895 case MonoInlineString:
4896 case MonoInlineType:
4897 case MonoInlineField:
4898 case MonoInlineMethod:
4901 case MonoShortInlineR:
4908 case MonoShortInlineVar:
4909 case MonoShortInlineI:
4912 case MonoShortInlineBrTarget:
4913 target = start + cli_addr + 2 + (signed char)ip [1];
4914 GET_BBLOCK (cfg, bblock, target);
4917 GET_BBLOCK (cfg, bblock, ip);
4919 case MonoInlineBrTarget:
4920 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4921 GET_BBLOCK (cfg, bblock, target);
4924 GET_BBLOCK (cfg, bblock, ip);
4926 case MonoInlineSwitch: {
4927 guint32 n = read32 (ip + 1);
4930 cli_addr += 5 + 4 * n;
4931 target = start + cli_addr;
4932 GET_BBLOCK (cfg, bblock, target);
4934 for (j = 0; j < n; ++j) {
4935 target = start + cli_addr + (gint32)read32 (ip);
4936 GET_BBLOCK (cfg, bblock, target);
4946 g_assert_not_reached ();
4949 if (i == CEE_THROW) {
4950 unsigned char *bb_start = ip - 1;
4952 /* Find the start of the bblock containing the throw */
4954 while ((bb_start >= start) && !bblock) {
4955 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4959 bblock->out_of_line = 1;
4968 static inline MonoMethod *
4969 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4973 if (m->wrapper_type != MONO_WRAPPER_NONE)
4974 return mono_method_get_wrapper_data (m, token);
4976 method = mono_get_method_full (m->klass->image, token, klass, context);
4981 static inline MonoMethod *
4982 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4984 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4986 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4992 static inline MonoClass*
4993 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4997 if (method->wrapper_type != MONO_WRAPPER_NONE)
4998 klass = mono_method_get_wrapper_data (method, token);
5000 klass = mono_class_get_full (method->klass->image, token, context);
5002 mono_class_init (klass);
5007 * Returns TRUE if the JIT should abort inlining because "callee"
5008 * is influenced by security attributes.
5011 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
5015 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
5019 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
5020 if (result == MONO_JIT_SECURITY_OK)
5023 if (result == MONO_JIT_LINKDEMAND_ECMA) {
5024 /* Generate code to throw a SecurityException before the actual call/link */
5025 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5028 NEW_ICONST (cfg, args [0], 4);
5029 NEW_METHODCONST (cfg, args [1], caller);
5030 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
5031 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
5032 /* don't hide previous results */
5033 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
5034 cfg->exception_data = result;
5042 throw_exception (void)
5044 static MonoMethod *method = NULL;
5047 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5048 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
5055 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
5057 MonoMethod *thrower = throw_exception ();
5060 EMIT_NEW_PCONST (cfg, args [0], ex);
5061 mono_emit_method_call (cfg, thrower, args, NULL);
5065 * Return the original method is a wrapper is specified. We can only access
5066 * the custom attributes from the original method.
5069 get_original_method (MonoMethod *method)
5071 if (method->wrapper_type == MONO_WRAPPER_NONE)
5074 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
5075 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
5078 /* in other cases we need to find the original method */
5079 return mono_marshal_method_from_wrapper (method);
5083 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
5084 MonoBasicBlock *bblock, unsigned char *ip)
5086 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5087 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
5089 emit_throw_exception (cfg, ex);
5093 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
5094 MonoBasicBlock *bblock, unsigned char *ip)
5096 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5097 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
5099 emit_throw_exception (cfg, ex);
5103 * Check that the IL instructions at ip are the array initialization
5104 * sequence and return the pointer to the data and the size.
5107 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
5110 * newarr[System.Int32]
5112 * ldtoken field valuetype ...
5113 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
5115 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
5116 guint32 token = read32 (ip + 7);
5117 guint32 field_token = read32 (ip + 2);
5118 guint32 field_index = field_token & 0xffffff;
5120 const char *data_ptr;
5122 MonoMethod *cmethod;
5123 MonoClass *dummy_class;
5124 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
5130 *out_field_token = field_token;
5132 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
5135 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
5137 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
5138 case MONO_TYPE_BOOLEAN:
5142 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
5143 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
5144 case MONO_TYPE_CHAR:
5154 return NULL; /* stupid ARM FP swapped format */
5164 if (size > mono_type_size (field->type, &dummy_align))
5167 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
5168 if (!method->klass->image->dynamic) {
5169 field_index = read32 (ip + 2) & 0xffffff;
5170 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
5171 data_ptr = mono_image_rva_map (method->klass->image, rva);
5172 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
5173 /* for aot code we do the lookup on load */
5174 if (aot && data_ptr)
5175 return GUINT_TO_POINTER (rva);
5177 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
5179 data_ptr = mono_field_get_data (field);
5187 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
5189 char *method_fname = mono_method_full_name (method, TRUE);
5191 MonoMethodHeader *header = mono_method_get_header (method);
5193 if (header->code_size == 0)
5194 method_code = g_strdup ("method body is empty.");
5196 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
5197 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
5198 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
5199 g_free (method_fname);
5200 g_free (method_code);
5201 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
5205 set_exception_object (MonoCompile *cfg, MonoException *exception)
5207 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
5208 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
5209 cfg->exception_ptr = exception;
5213 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5217 if (cfg->generic_sharing_context)
5218 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
5220 type = &klass->byval_arg;
5221 return MONO_TYPE_IS_REFERENCE (type);
5225 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5228 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5229 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5230 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5231 /* Optimize reg-reg moves away */
5233 * Can't optimize other opcodes, since sp[0] might point to
5234 * the last ins of a decomposed opcode.
5236 sp [0]->dreg = (cfg)->locals [n]->dreg;
5238 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5243 * ldloca inhibits many optimizations so try to get rid of it in common
5246 static inline unsigned char *
5247 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5256 local = read16 (ip + 2);
5260 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5261 gboolean skip = FALSE;
5263 /* From the INITOBJ case */
5264 token = read32 (ip + 2);
5265 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5266 CHECK_TYPELOAD (klass);
5267 if (generic_class_is_reference_type (cfg, klass)) {
5268 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5269 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5270 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5271 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5272 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5285 is_exception_class (MonoClass *class)
5288 if (class == mono_defaults.exception_class)
5290 class = class->parent;
5296 * mono_method_to_ir:
5298 * Translate the .net IL into linear IR.
5301 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5302 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5303 guint inline_offset, gboolean is_virtual_call)
5306 MonoInst *ins, **sp, **stack_start;
5307 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5308 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
5309 MonoMethod *cmethod, *method_definition;
5310 MonoInst **arg_array;
5311 MonoMethodHeader *header;
5313 guint32 token, ins_flag;
5315 MonoClass *constrained_call = NULL;
5316 unsigned char *ip, *end, *target, *err_pos;
5317 static double r8_0 = 0.0;
5318 MonoMethodSignature *sig;
5319 MonoGenericContext *generic_context = NULL;
5320 MonoGenericContainer *generic_container = NULL;
5321 MonoType **param_types;
5322 int i, n, start_new_bblock, dreg;
5323 int num_calls = 0, inline_costs = 0;
5324 int breakpoint_id = 0;
5326 MonoBoolean security, pinvoke;
5327 MonoSecurityManager* secman = NULL;
5328 MonoDeclSecurityActions actions;
5329 GSList *class_inits = NULL;
5330 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5332 gboolean init_locals, seq_points, skip_dead_blocks;
5334 /* serialization and xdomain stuff may need access to private fields and methods */
5335 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5336 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5337 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5338 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5339 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5340 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5342 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5344 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5345 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5346 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5347 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5349 image = method->klass->image;
5350 header = mono_method_get_header (method);
5352 MonoLoaderError *error;
5354 if ((error = mono_loader_get_last_error ())) {
5355 cfg->exception_type = error->exception_type;
5357 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
5358 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
5360 goto exception_exit;
5362 generic_container = mono_method_get_generic_container (method);
5363 sig = mono_method_signature (method);
5364 num_args = sig->hasthis + sig->param_count;
5365 ip = (unsigned char*)header->code;
5366 cfg->cil_start = ip;
5367 end = ip + header->code_size;
5368 mono_jit_stats.cil_code_size += header->code_size;
5369 init_locals = header->init_locals;
5371 seq_points = cfg->gen_seq_points && cfg->method == method;
5374 * Methods without init_locals set could cause asserts in various passes
5379 method_definition = method;
5380 while (method_definition->is_inflated) {
5381 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5382 method_definition = imethod->declaring;
5385 /* SkipVerification is not allowed if core-clr is enabled */
5386 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5388 dont_verify_stloc = TRUE;
5391 if (!dont_verify && mini_method_verify (cfg, method_definition))
5392 goto exception_exit;
5394 if (mono_debug_using_mono_debugger ())
5395 cfg->keep_cil_nops = TRUE;
5397 if (sig->is_inflated)
5398 generic_context = mono_method_get_context (method);
5399 else if (generic_container)
5400 generic_context = &generic_container->context;
5401 cfg->generic_context = generic_context;
5403 if (!cfg->generic_sharing_context)
5404 g_assert (!sig->has_type_parameters);
5406 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5407 g_assert (method->is_inflated);
5408 g_assert (mono_method_get_context (method)->method_inst);
5410 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5411 g_assert (sig->generic_param_count);
5413 if (cfg->method == method) {
5414 cfg->real_offset = 0;
5416 cfg->real_offset = inline_offset;
5419 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5420 cfg->cil_offset_to_bb_len = header->code_size;
5422 cfg->current_method = method;
5424 if (cfg->verbose_level > 2)
5425 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5427 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5429 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5430 for (n = 0; n < sig->param_count; ++n)
5431 param_types [n + sig->hasthis] = sig->params [n];
5432 cfg->arg_types = param_types;
5434 dont_inline = g_list_prepend (dont_inline, method);
5435 if (cfg->method == method) {
5437 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5438 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5441 NEW_BBLOCK (cfg, start_bblock);
5442 cfg->bb_entry = start_bblock;
5443 start_bblock->cil_code = NULL;
5444 start_bblock->cil_length = 0;
5447 NEW_BBLOCK (cfg, end_bblock);
5448 cfg->bb_exit = end_bblock;
5449 end_bblock->cil_code = NULL;
5450 end_bblock->cil_length = 0;
5451 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
5452 g_assert (cfg->num_bblocks == 2);
5454 arg_array = cfg->args;
5456 if (header->num_clauses) {
5457 cfg->spvars = g_hash_table_new (NULL, NULL);
5458 cfg->exvars = g_hash_table_new (NULL, NULL);
5460 /* handle exception clauses */
5461 for (i = 0; i < header->num_clauses; ++i) {
5462 MonoBasicBlock *try_bb;
5463 MonoExceptionClause *clause = &header->clauses [i];
5464 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5465 try_bb->real_offset = clause->try_offset;
5466 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5467 tblock->real_offset = clause->handler_offset;
5468 tblock->flags |= BB_EXCEPTION_HANDLER;
5470 link_bblock (cfg, try_bb, tblock);
5472 if (*(ip + clause->handler_offset) == CEE_POP)
5473 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5475 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5476 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5477 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5478 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5479 MONO_ADD_INS (tblock, ins);
5481 /* todo: is a fault block unsafe to optimize? */
5482 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5483 tblock->flags |= BB_EXCEPTION_UNSAFE;
5487 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5489 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5491 /* catch and filter blocks get the exception object on the stack */
5492 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5493 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5494 MonoInst *dummy_use;
5496 /* mostly like handle_stack_args (), but just sets the input args */
5497 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5498 tblock->in_scount = 1;
5499 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5500 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5503 * Add a dummy use for the exvar so its liveness info will be
5507 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5509 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5510 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5511 tblock->flags |= BB_EXCEPTION_HANDLER;
5512 tblock->real_offset = clause->data.filter_offset;
5513 tblock->in_scount = 1;
5514 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5515 /* The filter block shares the exvar with the handler block */
5516 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5517 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5518 MONO_ADD_INS (tblock, ins);
5522 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5523 clause->data.catch_class &&
5524 cfg->generic_sharing_context &&
5525 mono_class_check_context_used (clause->data.catch_class)) {
5527 * In shared generic code with catch
5528 * clauses containing type variables
5529 * the exception handling code has to
5530 * be able to get to the rgctx.
5531 * Therefore we have to make sure that
5532 * the vtable/mrgctx argument (for
5533 * static or generic methods) or the
5534 * "this" argument (for non-static
5535 * methods) are live.
5537 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5538 mini_method_get_context (method)->method_inst ||
5539 method->klass->valuetype) {
5540 mono_get_vtable_var (cfg);
5542 MonoInst *dummy_use;
5544 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5549 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5550 cfg->cbb = start_bblock;
5551 cfg->args = arg_array;
5552 mono_save_args (cfg, sig, inline_args);
5555 /* FIRST CODE BLOCK */
5556 NEW_BBLOCK (cfg, bblock);
5557 bblock->cil_code = ip;
5561 ADD_BBLOCK (cfg, bblock);
5563 if (cfg->method == method) {
5564 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5565 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5566 MONO_INST_NEW (cfg, ins, OP_BREAK);
5567 MONO_ADD_INS (bblock, ins);
5571 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5572 secman = mono_security_manager_get_methods ();
5574 security = (secman && mono_method_has_declsec (method));
5575 /* at this point having security doesn't mean we have any code to generate */
5576 if (security && (cfg->method == method)) {
5577 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5578 * And we do not want to enter the next section (with allocation) if we
5579 * have nothing to generate */
5580 security = mono_declsec_get_demands (method, &actions);
5583 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5584 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5586 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5587 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5588 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5590 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5591 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5595 mono_custom_attrs_free (custom);
5598 custom = mono_custom_attrs_from_class (wrapped->klass);
5599 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5603 mono_custom_attrs_free (custom);
5606 /* not a P/Invoke after all */
5611 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5612 /* we use a separate basic block for the initialization code */
5613 NEW_BBLOCK (cfg, init_localsbb);
5614 cfg->bb_init = init_localsbb;
5615 init_localsbb->real_offset = cfg->real_offset;
5616 start_bblock->next_bb = init_localsbb;
5617 init_localsbb->next_bb = bblock;
5618 link_bblock (cfg, start_bblock, init_localsbb);
5619 link_bblock (cfg, init_localsbb, bblock);
5621 cfg->cbb = init_localsbb;
5623 start_bblock->next_bb = bblock;
5624 link_bblock (cfg, start_bblock, bblock);
5627 /* at this point we know, if security is TRUE, that some code needs to be generated */
5628 if (security && (cfg->method == method)) {
5631 mono_jit_stats.cas_demand_generation++;
5633 if (actions.demand.blob) {
5634 /* Add code for SecurityAction.Demand */
5635 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5636 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5637 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5638 mono_emit_method_call (cfg, secman->demand, args, NULL);
5640 if (actions.noncasdemand.blob) {
5641 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5642 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5643 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5644 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5645 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5646 mono_emit_method_call (cfg, secman->demand, args, NULL);
5648 if (actions.demandchoice.blob) {
5649 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5650 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5651 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5652 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5653 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5657 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5659 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5662 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5663 /* check if this is native code, e.g. an icall or a p/invoke */
5664 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5665 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5667 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
5668 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
5670 /* if this ia a native call then it can only be JITted from platform code */
5671 if ((icall || pinvk) && method->klass && method->klass->image) {
5672 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
5673 MonoException *ex = icall ? mono_get_exception_security () :
5674 mono_get_exception_method_access ();
5675 emit_throw_exception (cfg, ex);
5682 if (header->code_size == 0)
5685 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5690 if (cfg->method == method)
5691 mono_debug_init_method (cfg, bblock, breakpoint_id);
5693 for (n = 0; n < header->num_locals; ++n) {
5694 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5699 /* We force the vtable variable here for all shared methods
5700 for the possibility that they might show up in a stack
5701 trace where their exact instantiation is needed. */
5702 if (cfg->generic_sharing_context && method == cfg->method) {
5703 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5704 mini_method_get_context (method)->method_inst ||
5705 method->klass->valuetype) {
5706 mono_get_vtable_var (cfg);
5708 /* FIXME: Is there a better way to do this?
5709 We need the variable live for the duration
5710 of the whole method. */
5711 cfg->args [0]->flags |= MONO_INST_INDIRECT;
5715 /* add a check for this != NULL to inlined methods */
5716 if (is_virtual_call) {
5719 NEW_ARGLOAD (cfg, arg_ins, 0);
5720 MONO_ADD_INS (cfg->cbb, arg_ins);
5721 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
5724 skip_dead_blocks = !dont_verify;
5725 if (skip_dead_blocks) {
5726 original_bb = bb = mono_basic_block_split (method, &error);
5727 if (!mono_error_ok (&error)) {
5728 mono_error_cleanup (&error);
5734 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5735 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5738 start_new_bblock = 0;
5741 if (cfg->method == method)
5742 cfg->real_offset = ip - header->code;
5744 cfg->real_offset = inline_offset;
5749 if (start_new_bblock) {
5750 bblock->cil_length = ip - bblock->cil_code;
5751 if (start_new_bblock == 2) {
5752 g_assert (ip == tblock->cil_code);
5754 GET_BBLOCK (cfg, tblock, ip);
5756 bblock->next_bb = tblock;
5759 start_new_bblock = 0;
5760 for (i = 0; i < bblock->in_scount; ++i) {
5761 if (cfg->verbose_level > 3)
5762 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5763 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5767 g_slist_free (class_inits);
5770 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5771 link_bblock (cfg, bblock, tblock);
5772 if (sp != stack_start) {
5773 handle_stack_args (cfg, stack_start, sp - stack_start);
5775 CHECK_UNVERIFIABLE (cfg);
5777 bblock->next_bb = tblock;
5780 for (i = 0; i < bblock->in_scount; ++i) {
5781 if (cfg->verbose_level > 3)
5782 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5783 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5786 g_slist_free (class_inits);
5791 if (skip_dead_blocks) {
5792 int ip_offset = ip - header->code;
5794 if (ip_offset == bb->end)
5798 int op_size = mono_opcode_size (ip, end);
5799 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
5801 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
5803 if (ip_offset + op_size == bb->end) {
5804 MONO_INST_NEW (cfg, ins, OP_NOP);
5805 MONO_ADD_INS (bblock, ins);
5806 start_new_bblock = 1;
5814 * Sequence points are points where the debugger can place a breakpoint.
5815 * Currently, we generate these automatically at points where the IL
5818 if (seq_points && sp == stack_start) {
5819 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
5820 MONO_ADD_INS (cfg->cbb, ins);
5823 bblock->real_offset = cfg->real_offset;
5825 if ((cfg->method == method) && cfg->coverage_info) {
5826 guint32 cil_offset = ip - header->code;
5827 cfg->coverage_info->data [cil_offset].cil_code = ip;
5829 /* TODO: Use an increment here */
5830 #if defined(TARGET_X86)
5831 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5832 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5834 MONO_ADD_INS (cfg->cbb, ins);
5836 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5837 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5841 if (cfg->verbose_level > 3)
5842 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5846 if (cfg->keep_cil_nops)
5847 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
5849 MONO_INST_NEW (cfg, ins, OP_NOP);
5851 MONO_ADD_INS (bblock, ins);
5854 if (should_insert_brekpoint (cfg->method))
5855 MONO_INST_NEW (cfg, ins, OP_BREAK);
5857 MONO_INST_NEW (cfg, ins, OP_NOP);
5859 MONO_ADD_INS (bblock, ins);
5865 CHECK_STACK_OVF (1);
5866 n = (*ip)-CEE_LDARG_0;
5868 EMIT_NEW_ARGLOAD (cfg, ins, n);
5876 CHECK_STACK_OVF (1);
5877 n = (*ip)-CEE_LDLOC_0;
5879 EMIT_NEW_LOCLOAD (cfg, ins, n);
5888 n = (*ip)-CEE_STLOC_0;
5891 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5893 emit_stloc_ir (cfg, sp, header, n);
5900 CHECK_STACK_OVF (1);
5903 EMIT_NEW_ARGLOAD (cfg, ins, n);
5909 CHECK_STACK_OVF (1);
5912 NEW_ARGLOADA (cfg, ins, n);
5913 MONO_ADD_INS (cfg->cbb, ins);
5923 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5925 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5930 CHECK_STACK_OVF (1);
5933 EMIT_NEW_LOCLOAD (cfg, ins, n);
5937 case CEE_LDLOCA_S: {
5938 unsigned char *tmp_ip;
5940 CHECK_STACK_OVF (1);
5941 CHECK_LOCAL (ip [1]);
5943 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
5949 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5958 CHECK_LOCAL (ip [1]);
5959 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5961 emit_stloc_ir (cfg, sp, header, ip [1]);
5966 CHECK_STACK_OVF (1);
5967 EMIT_NEW_PCONST (cfg, ins, NULL);
5968 ins->type = STACK_OBJ;
5973 CHECK_STACK_OVF (1);
5974 EMIT_NEW_ICONST (cfg, ins, -1);
5987 CHECK_STACK_OVF (1);
5988 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
5994 CHECK_STACK_OVF (1);
5996 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
6002 CHECK_STACK_OVF (1);
6003 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
6009 CHECK_STACK_OVF (1);
6010 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6011 ins->type = STACK_I8;
6012 ins->dreg = alloc_dreg (cfg, STACK_I8);
6014 ins->inst_l = (gint64)read64 (ip);
6015 MONO_ADD_INS (bblock, ins);
6021 gboolean use_aotconst = FALSE;
6023 #ifdef TARGET_POWERPC
6024 /* FIXME: Clean this up */
6025 if (cfg->compile_aot)
6026 use_aotconst = TRUE;
6029 /* FIXME: we should really allocate this only late in the compilation process */
6030 f = mono_domain_alloc (cfg->domain, sizeof (float));
6032 CHECK_STACK_OVF (1);
6038 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
6040 dreg = alloc_freg (cfg);
6041 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
6042 ins->type = STACK_R8;
6044 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6045 ins->type = STACK_R8;
6046 ins->dreg = alloc_dreg (cfg, STACK_R8);
6048 MONO_ADD_INS (bblock, ins);
6058 gboolean use_aotconst = FALSE;
6060 #ifdef TARGET_POWERPC
6061 /* FIXME: Clean this up */
6062 if (cfg->compile_aot)
6063 use_aotconst = TRUE;
6066 /* FIXME: we should really allocate this only late in the compilation process */
6067 d = mono_domain_alloc (cfg->domain, sizeof (double));
6069 CHECK_STACK_OVF (1);
6075 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
6077 dreg = alloc_freg (cfg);
6078 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
6079 ins->type = STACK_R8;
6081 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6082 ins->type = STACK_R8;
6083 ins->dreg = alloc_dreg (cfg, STACK_R8);
6085 MONO_ADD_INS (bblock, ins);
6094 MonoInst *temp, *store;
6096 CHECK_STACK_OVF (1);
6100 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
6101 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
6103 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6106 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6119 if (sp [0]->type == STACK_R8)
6120 /* we need to pop the value from the x86 FP stack */
6121 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
6130 if (stack_start != sp)
6132 token = read32 (ip + 1);
6133 /* FIXME: check the signature matches */
6134 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6139 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
6140 GENERIC_SHARING_FAILURE (CEE_JMP);
6142 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
6143 CHECK_CFG_EXCEPTION;
6145 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6147 MonoMethodSignature *fsig = mono_method_signature (cmethod);
6150 /* Handle tail calls similarly to calls */
6151 n = fsig->param_count + fsig->hasthis;
6153 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
6154 call->method = cmethod;
6155 call->tail_call = TRUE;
6156 call->signature = mono_method_signature (cmethod);
6157 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
6158 call->inst.inst_p0 = cmethod;
6159 for (i = 0; i < n; ++i)
6160 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
6162 mono_arch_emit_call (cfg, call);
6163 MONO_ADD_INS (bblock, (MonoInst*)call);
6166 for (i = 0; i < num_args; ++i)
6167 /* Prevent arguments from being optimized away */
6168 arg_array [i]->flags |= MONO_INST_VOLATILE;
6170 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6171 ins = (MonoInst*)call;
6172 ins->inst_p0 = cmethod;
6173 MONO_ADD_INS (bblock, ins);
6177 start_new_bblock = 1;
6182 case CEE_CALLVIRT: {
6183 MonoInst *addr = NULL;
6184 MonoMethodSignature *fsig = NULL;
6186 int virtual = *ip == CEE_CALLVIRT;
6187 int calli = *ip == CEE_CALLI;
6188 gboolean pass_imt_from_rgctx = FALSE;
6189 MonoInst *imt_arg = NULL;
6190 gboolean pass_vtable = FALSE;
6191 gboolean pass_mrgctx = FALSE;
6192 MonoInst *vtable_arg = NULL;
6193 gboolean check_this = FALSE;
6194 gboolean supported_tail_call = FALSE;
6197 token = read32 (ip + 1);
6204 if (method->wrapper_type != MONO_WRAPPER_NONE)
6205 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6207 fsig = mono_metadata_parse_signature (image, token);
6209 n = fsig->param_count + fsig->hasthis;
6211 if (method->dynamic && fsig->pinvoke) {
6215 * This is a call through a function pointer using a pinvoke
6216 * signature. Have to create a wrapper and call that instead.
6217 * FIXME: This is very slow, need to create a wrapper at JIT time
6218 * instead based on the signature.
6220 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
6221 EMIT_NEW_PCONST (cfg, args [1], fsig);
6223 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
6226 MonoMethod *cil_method;
6228 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6229 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6230 cil_method = cmethod;
6231 } else if (constrained_call) {
6232 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6234 * This is needed since get_method_constrained can't find
6235 * the method in klass representing a type var.
6236 * The type var is guaranteed to be a reference type in this
6239 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6240 cil_method = cmethod;
6241 g_assert (!cmethod->klass->valuetype);
6243 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6246 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6247 cil_method = cmethod;
6252 if (!dont_verify && !cfg->skip_visibility) {
6253 MonoMethod *target_method = cil_method;
6254 if (method->is_inflated) {
6255 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6257 if (!mono_method_can_access_method (method_definition, target_method) &&
6258 !mono_method_can_access_method (method, cil_method))
6259 METHOD_ACCESS_FAILURE;
6262 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6263 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6265 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6266 /* MS.NET seems to silently convert this to a callvirt */
6271 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
6272 * converts to a callvirt.
6274 * tests/bug-515884.il is an example of this behavior
6276 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
6277 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
6278 if (!virtual && cmethod->klass->marshalbyref && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
6282 if (!cmethod->klass->inited)
6283 if (!mono_class_init (cmethod->klass))
6286 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6287 mini_class_is_system_array (cmethod->klass)) {
6288 array_rank = cmethod->klass->rank;
6289 fsig = mono_method_signature (cmethod);
6291 fsig = mono_method_signature (cmethod);
6296 if (fsig->pinvoke) {
6297 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6298 check_for_pending_exc, FALSE);
6299 fsig = mono_method_signature (wrapper);
6300 } else if (constrained_call) {
6301 fsig = mono_method_signature (cmethod);
6303 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6307 mono_save_token_info (cfg, image, token, cil_method);
6309 n = fsig->param_count + fsig->hasthis;
6311 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6312 if (check_linkdemand (cfg, method, cmethod))
6314 CHECK_CFG_EXCEPTION;
6317 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6318 g_assert_not_reached ();
6321 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6324 if (!cfg->generic_sharing_context && cmethod)
6325 g_assert (!mono_method_check_context_used (cmethod));
6329 //g_assert (!virtual || fsig->hasthis);
6333 if (constrained_call) {
6335 * We have the `constrained.' prefix opcode.
6337 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6339 * The type parameter is instantiated as a valuetype,
6340 * but that type doesn't override the method we're
6341 * calling, so we need to box `this'.
6343 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
6344 ins->klass = constrained_call;
6345 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call));
6346 CHECK_CFG_EXCEPTION;
6347 } else if (!constrained_call->valuetype) {
6348 int dreg = alloc_preg (cfg);
6351 * The type parameter is instantiated as a reference
6352 * type. We have a managed pointer on the stack, so
6353 * we need to dereference it here.
6355 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6356 ins->type = STACK_OBJ;
6358 } else if (cmethod->klass->valuetype)
6360 constrained_call = NULL;
6363 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6367 * If the callee is a shared method, then its static cctor
6368 * might not get called after the call was patched.
6370 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6371 emit_generic_class_init (cfg, cmethod->klass);
6372 CHECK_TYPELOAD (cmethod->klass);
6375 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6376 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6377 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6378 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6379 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6382 * Pass vtable iff target method might
6383 * be shared, which means that sharing
6384 * is enabled for its class and its
6385 * context is sharable (and it's not a
6388 if (sharing_enabled && context_sharable &&
6389 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6393 if (cmethod && mini_method_get_context (cmethod) &&
6394 mini_method_get_context (cmethod)->method_inst) {
6395 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6396 MonoGenericContext *context = mini_method_get_context (cmethod);
6397 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6399 g_assert (!pass_vtable);
6401 if (sharing_enabled && context_sharable)
6405 if (cfg->generic_sharing_context && cmethod) {
6406 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6408 context_used = mono_method_check_context_used (cmethod);
6410 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6411 /* Generic method interface
6412 calls are resolved via a
6413 helper function and don't
6415 if (!cmethod_context || !cmethod_context->method_inst)
6416 pass_imt_from_rgctx = TRUE;
6420 * If a shared method calls another
6421 * shared method then the caller must
6422 * have a generic sharing context
6423 * because the magic trampoline
6424 * requires it. FIXME: We shouldn't
6425 * have to force the vtable/mrgctx
6426 * variable here. Instead there
6427 * should be a flag in the cfg to
6428 * request a generic sharing context.
6431 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6432 mono_get_vtable_var (cfg);
6437 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6439 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6441 CHECK_TYPELOAD (cmethod->klass);
6442 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6447 g_assert (!vtable_arg);
6449 if (!cfg->compile_aot) {
6451 * emit_get_rgctx_method () calls mono_class_vtable () so check
6452 * for type load errors before.
6454 mono_class_setup_vtable (cmethod->klass);
6455 CHECK_TYPELOAD (cmethod->klass);
6458 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6460 /* !marshalbyref is needed to properly handle generic methods + remoting */
6461 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6462 MONO_METHOD_IS_FINAL (cmethod)) &&
6463 !cmethod->klass->marshalbyref) {
6470 if (pass_imt_from_rgctx) {
6471 g_assert (!pass_vtable);
6474 imt_arg = emit_get_rgctx_method (cfg, context_used,
6475 cmethod, MONO_RGCTX_INFO_METHOD);
6479 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
6481 /* Calling virtual generic methods */
6482 if (cmethod && virtual &&
6483 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6484 !(MONO_METHOD_IS_FINAL (cmethod) &&
6485 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6486 mono_method_signature (cmethod)->generic_param_count) {
6487 MonoInst *this_temp, *this_arg_temp, *store;
6488 MonoInst *iargs [4];
6490 g_assert (mono_method_signature (cmethod)->is_inflated);
6492 /* Prevent inlining of methods that contain indirect calls */
6495 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
6496 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt) {
6497 g_assert (!imt_arg);
6499 g_assert (cmethod->is_inflated);
6500 imt_arg = emit_get_rgctx_method (cfg, context_used,
6501 cmethod, MONO_RGCTX_INFO_METHOD);
6502 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6506 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6507 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6508 MONO_ADD_INS (bblock, store);
6510 /* FIXME: This should be a managed pointer */
6511 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6513 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6514 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6515 cmethod, MONO_RGCTX_INFO_METHOD);
6516 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6517 addr = mono_emit_jit_icall (cfg,
6518 mono_helper_compile_generic_method, iargs);
6520 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6522 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6525 if (!MONO_TYPE_IS_VOID (fsig->ret))
6526 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6528 CHECK_CFG_EXCEPTION;
6535 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6536 supported_tail_call = cmethod && MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
6538 supported_tail_call = cmethod && mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6542 /* FIXME: runtime generic context pointer for jumps? */
6543 /* FIXME: handle this for generic sharing eventually */
6544 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) && supported_tail_call) {
6547 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6550 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6551 /* Handle tail calls similarly to calls */
6552 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE);
6554 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6555 call->tail_call = TRUE;
6556 call->method = cmethod;
6557 call->signature = mono_method_signature (cmethod);
6560 * We implement tail calls by storing the actual arguments into the
6561 * argument variables, then emitting a CEE_JMP.
6563 for (i = 0; i < n; ++i) {
6564 /* Prevent argument from being register allocated */
6565 arg_array [i]->flags |= MONO_INST_VOLATILE;
6566 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6570 ins = (MonoInst*)call;
6571 ins->inst_p0 = cmethod;
6572 ins->inst_p1 = arg_array [0];
6573 MONO_ADD_INS (bblock, ins);
6574 link_bblock (cfg, bblock, end_bblock);
6575 start_new_bblock = 1;
6577 CHECK_CFG_EXCEPTION;
6579 /* skip CEE_RET as well */
6585 /* Conversion to a JIT intrinsic */
6586 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6588 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6589 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6594 CHECK_CFG_EXCEPTION;
6602 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6603 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6604 mono_method_check_inlining (cfg, cmethod) &&
6605 !g_list_find (dont_inline, cmethod)) {
6607 gboolean allways = FALSE;
6609 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6610 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6611 /* Prevent inlining of methods that call wrappers */
6613 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6617 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6619 cfg->real_offset += 5;
6622 if (!MONO_TYPE_IS_VOID (fsig->ret))
6623 /* *sp is already set by inline_method */
6626 inline_costs += costs;
6632 inline_costs += 10 * num_calls++;
6634 /* Tail recursion elimination */
6635 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6636 gboolean has_vtargs = FALSE;
6639 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6642 /* keep it simple */
6643 for (i = fsig->param_count - 1; i >= 0; i--) {
6644 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6649 for (i = 0; i < n; ++i)
6650 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6651 MONO_INST_NEW (cfg, ins, OP_BR);
6652 MONO_ADD_INS (bblock, ins);
6653 tblock = start_bblock->out_bb [0];
6654 link_bblock (cfg, bblock, tblock);
6655 ins->inst_target_bb = tblock;
6656 start_new_bblock = 1;
6658 /* skip the CEE_RET, too */
6659 if (ip_in_bb (cfg, bblock, ip + 5))
6669 /* Generic sharing */
6670 /* FIXME: only do this for generic methods if
6671 they are not shared! */
6672 if (context_used && !imt_arg && !array_rank &&
6673 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6674 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6675 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6676 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6679 g_assert (cfg->generic_sharing_context && cmethod);
6683 * We are compiling a call to a
6684 * generic method from shared code,
6685 * which means that we have to look up
6686 * the method in the rgctx and do an
6689 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6692 /* Indirect calls */
6694 g_assert (!imt_arg);
6696 if (*ip == CEE_CALL)
6697 g_assert (context_used);
6698 else if (*ip == CEE_CALLI)
6699 g_assert (!vtable_arg);
6701 /* FIXME: what the hell is this??? */
6702 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6703 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6705 /* Prevent inlining of methods with indirect calls */
6710 int rgctx_reg = mono_alloc_preg (cfg);
6712 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6713 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6714 call = (MonoCallInst*)ins;
6715 set_rgctx_arg (cfg, call, rgctx_reg, vtable_arg);
6717 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6719 * Instead of emitting an indirect call, emit a direct call
6720 * with the contents of the aotconst as the patch info.
6722 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
6724 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6725 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
6728 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6731 if (!MONO_TYPE_IS_VOID (fsig->ret))
6732 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6734 CHECK_CFG_EXCEPTION;
6745 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6746 if (sp [fsig->param_count]->type == STACK_OBJ) {
6747 MonoInst *iargs [2];
6750 iargs [1] = sp [fsig->param_count];
6752 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6755 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6756 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6757 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6758 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6760 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6763 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6764 if (!cmethod->klass->element_class->valuetype && !readonly)
6765 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6766 CHECK_TYPELOAD (cmethod->klass);
6769 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6772 g_assert_not_reached ();
6775 CHECK_CFG_EXCEPTION;
6782 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6784 if (!MONO_TYPE_IS_VOID (fsig->ret))
6785 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6787 CHECK_CFG_EXCEPTION;
6797 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6799 } else if (imt_arg) {
6800 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6802 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6805 if (!MONO_TYPE_IS_VOID (fsig->ret))
6806 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6808 CHECK_CFG_EXCEPTION;
6815 if (cfg->method != method) {
6816 /* return from inlined method */
6818 * If in_count == 0, that means the ret is unreachable due to
6819 * being preceeded by a throw. In that case, inline_method () will
6820 * handle setting the return value
6821 * (test case: test_0_inline_throw ()).
6823 if (return_var && cfg->cbb->in_count) {
6827 //g_assert (returnvar != -1);
6828 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6829 cfg->ret_var_set = TRUE;
6833 MonoType *ret_type = mono_method_signature (method)->ret;
6837 * Place a seq point here too even through the IL stack is not
6838 * empty, so a step over on
6841 * will work correctly.
6843 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
6844 MONO_ADD_INS (cfg->cbb, ins);
6847 g_assert (!return_var);
6850 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6853 if (!cfg->vret_addr) {
6856 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6858 EMIT_NEW_RETLOADA (cfg, ret_addr);
6860 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6861 ins->klass = mono_class_from_mono_type (ret_type);
6864 #ifdef MONO_ARCH_SOFT_FLOAT
6865 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6866 MonoInst *iargs [1];
6870 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6871 mono_arch_emit_setret (cfg, method, conv);
6873 mono_arch_emit_setret (cfg, method, *sp);
6876 mono_arch_emit_setret (cfg, method, *sp);
6881 if (sp != stack_start)
6883 MONO_INST_NEW (cfg, ins, OP_BR);
6885 ins->inst_target_bb = end_bblock;
6886 MONO_ADD_INS (bblock, ins);
6887 link_bblock (cfg, bblock, end_bblock);
6888 start_new_bblock = 1;
6892 MONO_INST_NEW (cfg, ins, OP_BR);
6894 target = ip + 1 + (signed char)(*ip);
6896 GET_BBLOCK (cfg, tblock, target);
6897 link_bblock (cfg, bblock, tblock);
6898 ins->inst_target_bb = tblock;
6899 if (sp != stack_start) {
6900 handle_stack_args (cfg, stack_start, sp - stack_start);
6902 CHECK_UNVERIFIABLE (cfg);
6904 MONO_ADD_INS (bblock, ins);
6905 start_new_bblock = 1;
6906 inline_costs += BRANCH_COST;
6920 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6922 target = ip + 1 + *(signed char*)ip;
6928 inline_costs += BRANCH_COST;
6932 MONO_INST_NEW (cfg, ins, OP_BR);
6935 target = ip + 4 + (gint32)read32(ip);
6937 GET_BBLOCK (cfg, tblock, target);
6938 link_bblock (cfg, bblock, tblock);
6939 ins->inst_target_bb = tblock;
6940 if (sp != stack_start) {
6941 handle_stack_args (cfg, stack_start, sp - stack_start);
6943 CHECK_UNVERIFIABLE (cfg);
6946 MONO_ADD_INS (bblock, ins);
6948 start_new_bblock = 1;
6949 inline_costs += BRANCH_COST;
6956 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6957 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6958 guint32 opsize = is_short ? 1 : 4;
6960 CHECK_OPSIZE (opsize);
6962 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6965 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6970 GET_BBLOCK (cfg, tblock, target);
6971 link_bblock (cfg, bblock, tblock);
6972 GET_BBLOCK (cfg, tblock, ip);
6973 link_bblock (cfg, bblock, tblock);
6975 if (sp != stack_start) {
6976 handle_stack_args (cfg, stack_start, sp - stack_start);
6977 CHECK_UNVERIFIABLE (cfg);
6980 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6981 cmp->sreg1 = sp [0]->dreg;
6982 type_from_op (cmp, sp [0], NULL);
6985 #if SIZEOF_REGISTER == 4
6986 if (cmp->opcode == OP_LCOMPARE_IMM) {
6987 /* Convert it to OP_LCOMPARE */
6988 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6989 ins->type = STACK_I8;
6990 ins->dreg = alloc_dreg (cfg, STACK_I8);
6992 MONO_ADD_INS (bblock, ins);
6993 cmp->opcode = OP_LCOMPARE;
6994 cmp->sreg2 = ins->dreg;
6997 MONO_ADD_INS (bblock, cmp);
6999 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
7000 type_from_op (ins, sp [0], NULL);
7001 MONO_ADD_INS (bblock, ins);
7002 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
7003 GET_BBLOCK (cfg, tblock, target);
7004 ins->inst_true_bb = tblock;
7005 GET_BBLOCK (cfg, tblock, ip);
7006 ins->inst_false_bb = tblock;
7007 start_new_bblock = 2;
7010 inline_costs += BRANCH_COST;
7025 MONO_INST_NEW (cfg, ins, *ip);
7027 target = ip + 4 + (gint32)read32(ip);
7033 inline_costs += BRANCH_COST;
7037 MonoBasicBlock **targets;
7038 MonoBasicBlock *default_bblock;
7039 MonoJumpInfoBBTable *table;
7040 int offset_reg = alloc_preg (cfg);
7041 int target_reg = alloc_preg (cfg);
7042 int table_reg = alloc_preg (cfg);
7043 int sum_reg = alloc_preg (cfg);
7044 gboolean use_op_switch;
7048 n = read32 (ip + 1);
7051 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
7055 CHECK_OPSIZE (n * sizeof (guint32));
7056 target = ip + n * sizeof (guint32);
7058 GET_BBLOCK (cfg, default_bblock, target);
7059 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7061 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
7062 for (i = 0; i < n; ++i) {
7063 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
7064 targets [i] = tblock;
7065 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
7069 if (sp != stack_start) {
7071 * Link the current bb with the targets as well, so handle_stack_args
7072 * will set their in_stack correctly.
7074 link_bblock (cfg, bblock, default_bblock);
7075 for (i = 0; i < n; ++i)
7076 link_bblock (cfg, bblock, targets [i]);
7078 handle_stack_args (cfg, stack_start, sp - stack_start);
7080 CHECK_UNVERIFIABLE (cfg);
7083 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
7084 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
7087 for (i = 0; i < n; ++i)
7088 link_bblock (cfg, bblock, targets [i]);
7090 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
7091 table->table = targets;
7092 table->table_size = n;
7094 use_op_switch = FALSE;
7096 /* ARM implements SWITCH statements differently */
7097 /* FIXME: Make it use the generic implementation */
7098 if (!cfg->compile_aot)
7099 use_op_switch = TRUE;
7102 if (COMPILE_LLVM (cfg))
7103 use_op_switch = TRUE;
7105 cfg->cbb->has_jump_table = 1;
7107 if (use_op_switch) {
7108 MONO_INST_NEW (cfg, ins, OP_SWITCH);
7109 ins->sreg1 = src1->dreg;
7110 ins->inst_p0 = table;
7111 ins->inst_many_bb = targets;
7112 ins->klass = GUINT_TO_POINTER (n);
7113 MONO_ADD_INS (cfg->cbb, ins);
7115 if (sizeof (gpointer) == 8)
7116 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
7118 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
7120 #if SIZEOF_REGISTER == 8
7121 /* The upper word might not be zero, and we add it to a 64 bit address later */
7122 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
7125 if (cfg->compile_aot) {
7126 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
7128 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
7129 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
7130 ins->inst_p0 = table;
7131 ins->dreg = table_reg;
7132 MONO_ADD_INS (cfg->cbb, ins);
7135 /* FIXME: Use load_memindex */
7136 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
7137 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
7138 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
7140 start_new_bblock = 1;
7141 inline_costs += (BRANCH_COST * 2);
7161 dreg = alloc_freg (cfg);
7164 dreg = alloc_lreg (cfg);
7167 dreg = alloc_preg (cfg);
7170 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
7171 ins->type = ldind_type [*ip - CEE_LDIND_I1];
7172 ins->flags |= ins_flag;
7174 MONO_ADD_INS (bblock, ins);
7189 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
7190 ins->flags |= ins_flag;
7192 MONO_ADD_INS (bblock, ins);
7194 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0))) {
7195 MonoInst *dummy_use;
7196 /* insert call to write barrier */
7197 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
7198 mono_emit_method_call (cfg, write_barrier, sp, NULL);
7199 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
7209 MONO_INST_NEW (cfg, ins, (*ip));
7211 ins->sreg1 = sp [0]->dreg;
7212 ins->sreg2 = sp [1]->dreg;
7213 type_from_op (ins, sp [0], sp [1]);
7215 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7217 /* Use the immediate opcodes if possible */
7218 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
7219 int imm_opcode = mono_op_to_op_imm (ins->opcode);
7220 if (imm_opcode != -1) {
7221 ins->opcode = imm_opcode;
7222 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7225 sp [1]->opcode = OP_NOP;
7229 MONO_ADD_INS ((cfg)->cbb, (ins));
7231 *sp++ = mono_decompose_opcode (cfg, ins);
7248 MONO_INST_NEW (cfg, ins, (*ip));
7250 ins->sreg1 = sp [0]->dreg;
7251 ins->sreg2 = sp [1]->dreg;
7252 type_from_op (ins, sp [0], sp [1]);
7254 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7255 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7257 /* FIXME: Pass opcode to is_inst_imm */
7259 /* Use the immediate opcodes if possible */
7260 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7263 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7264 if (imm_opcode != -1) {
7265 ins->opcode = imm_opcode;
7266 if (sp [1]->opcode == OP_I8CONST) {
7267 #if SIZEOF_REGISTER == 8
7268 ins->inst_imm = sp [1]->inst_l;
7270 ins->inst_ls_word = sp [1]->inst_ls_word;
7271 ins->inst_ms_word = sp [1]->inst_ms_word;
7275 ins->inst_imm = (gssize)(sp [1]->inst_c0);
7278 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7279 if (sp [1]->next == NULL)
7280 sp [1]->opcode = OP_NOP;
7283 MONO_ADD_INS ((cfg)->cbb, (ins));
7285 *sp++ = mono_decompose_opcode (cfg, ins);
7298 case CEE_CONV_OVF_I8:
7299 case CEE_CONV_OVF_U8:
7303 /* Special case this earlier so we have long constants in the IR */
7304 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7305 int data = sp [-1]->inst_c0;
7306 sp [-1]->opcode = OP_I8CONST;
7307 sp [-1]->type = STACK_I8;
7308 #if SIZEOF_REGISTER == 8
7309 if ((*ip) == CEE_CONV_U8)
7310 sp [-1]->inst_c0 = (guint32)data;
7312 sp [-1]->inst_c0 = data;
7314 sp [-1]->inst_ls_word = data;
7315 if ((*ip) == CEE_CONV_U8)
7316 sp [-1]->inst_ms_word = 0;
7318 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7320 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7327 case CEE_CONV_OVF_I4:
7328 case CEE_CONV_OVF_I1:
7329 case CEE_CONV_OVF_I2:
7330 case CEE_CONV_OVF_I:
7331 case CEE_CONV_OVF_U:
7334 if (sp [-1]->type == STACK_R8) {
7335 ADD_UNOP (CEE_CONV_OVF_I8);
7342 case CEE_CONV_OVF_U1:
7343 case CEE_CONV_OVF_U2:
7344 case CEE_CONV_OVF_U4:
7347 if (sp [-1]->type == STACK_R8) {
7348 ADD_UNOP (CEE_CONV_OVF_U8);
7355 case CEE_CONV_OVF_I1_UN:
7356 case CEE_CONV_OVF_I2_UN:
7357 case CEE_CONV_OVF_I4_UN:
7358 case CEE_CONV_OVF_I8_UN:
7359 case CEE_CONV_OVF_U1_UN:
7360 case CEE_CONV_OVF_U2_UN:
7361 case CEE_CONV_OVF_U4_UN:
7362 case CEE_CONV_OVF_U8_UN:
7363 case CEE_CONV_OVF_I_UN:
7364 case CEE_CONV_OVF_U_UN:
7371 CHECK_CFG_EXCEPTION;
7375 case CEE_ADD_OVF_UN:
7377 case CEE_MUL_OVF_UN:
7379 case CEE_SUB_OVF_UN:
7387 token = read32 (ip + 1);
7388 klass = mini_get_class (method, token, generic_context);
7389 CHECK_TYPELOAD (klass);
7391 if (generic_class_is_reference_type (cfg, klass)) {
7392 MonoInst *store, *load;
7393 int dreg = alloc_preg (cfg);
7395 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7396 load->flags |= ins_flag;
7397 MONO_ADD_INS (cfg->cbb, load);
7399 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7400 store->flags |= ins_flag;
7401 MONO_ADD_INS (cfg->cbb, store);
7403 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER) {
7404 MonoInst *dummy_use;
7405 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
7406 mono_emit_method_call (cfg, write_barrier, sp, NULL);
7407 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
7410 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7422 token = read32 (ip + 1);
7423 klass = mini_get_class (method, token, generic_context);
7424 CHECK_TYPELOAD (klass);
7426 /* Optimize the common ldobj+stloc combination */
7436 loc_index = ip [5] - CEE_STLOC_0;
7443 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7444 CHECK_LOCAL (loc_index);
7446 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7447 ins->dreg = cfg->locals [loc_index]->dreg;
7453 /* Optimize the ldobj+stobj combination */
7454 /* The reference case ends up being a load+store anyway */
7455 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7460 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7467 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7476 CHECK_STACK_OVF (1);
7478 n = read32 (ip + 1);
7480 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7481 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7482 ins->type = STACK_OBJ;
7485 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7486 MonoInst *iargs [1];
7488 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7489 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7491 if (cfg->opt & MONO_OPT_SHARED) {
7492 MonoInst *iargs [3];
7494 if (cfg->compile_aot) {
7495 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7497 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7498 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7499 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7500 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7501 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7503 if (bblock->out_of_line) {
7504 MonoInst *iargs [2];
7506 if (image == mono_defaults.corlib) {
7508 * Avoid relocations in AOT and save some space by using a
7509 * version of helper_ldstr specialized to mscorlib.
7511 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7512 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7514 /* Avoid creating the string object */
7515 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7516 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7517 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7521 if (cfg->compile_aot) {
7522 NEW_LDSTRCONST (cfg, ins, image, n);
7524 MONO_ADD_INS (bblock, ins);
7527 NEW_PCONST (cfg, ins, NULL);
7528 ins->type = STACK_OBJ;
7529 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7531 MONO_ADD_INS (bblock, ins);
7540 MonoInst *iargs [2];
7541 MonoMethodSignature *fsig;
7544 MonoInst *vtable_arg = NULL;
7547 token = read32 (ip + 1);
7548 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7551 fsig = mono_method_get_signature (cmethod, image, token);
7555 mono_save_token_info (cfg, image, token, cmethod);
7557 if (!mono_class_init (cmethod->klass))
7560 if (cfg->generic_sharing_context)
7561 context_used = mono_method_check_context_used (cmethod);
7563 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7564 if (check_linkdemand (cfg, method, cmethod))
7566 CHECK_CFG_EXCEPTION;
7567 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7568 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7571 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
7572 emit_generic_class_init (cfg, cmethod->klass);
7573 CHECK_TYPELOAD (cmethod->klass);
7576 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7577 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7578 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7579 mono_class_vtable (cfg->domain, cmethod->klass);
7580 CHECK_TYPELOAD (cmethod->klass);
7582 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7583 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7586 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7587 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7589 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7591 CHECK_TYPELOAD (cmethod->klass);
7592 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7597 n = fsig->param_count;
7601 * Generate smaller code for the common newobj <exception> instruction in
7602 * argument checking code.
7604 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7605 is_exception_class (cmethod->klass) && n <= 2 &&
7606 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7607 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7608 MonoInst *iargs [3];
7610 g_assert (!vtable_arg);
7614 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7617 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7621 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7626 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7629 g_assert_not_reached ();
7637 /* move the args to allow room for 'this' in the first position */
7643 /* check_call_signature () requires sp[0] to be set */
7644 this_ins.type = STACK_OBJ;
7646 if (check_call_signature (cfg, fsig, sp))
7651 if (mini_class_is_system_array (cmethod->klass)) {
7652 g_assert (!vtable_arg);
7654 *sp = emit_get_rgctx_method (cfg, context_used,
7655 cmethod, MONO_RGCTX_INFO_METHOD);
7657 /* Avoid varargs in the common case */
7658 if (fsig->param_count == 1)
7659 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7660 else if (fsig->param_count == 2)
7661 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7662 else if (fsig->param_count == 3)
7663 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
7665 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7666 } else if (cmethod->string_ctor) {
7667 g_assert (!context_used);
7668 g_assert (!vtable_arg);
7669 /* we simply pass a null pointer */
7670 EMIT_NEW_PCONST (cfg, *sp, NULL);
7671 /* now call the string ctor */
7672 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7674 MonoInst* callvirt_this_arg = NULL;
7676 if (cmethod->klass->valuetype) {
7677 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7678 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7679 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7684 * The code generated by mini_emit_virtual_call () expects
7685 * iargs [0] to be a boxed instance, but luckily the vcall
7686 * will be transformed into a normal call there.
7688 } else if (context_used) {
7689 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
7692 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7694 CHECK_TYPELOAD (cmethod->klass);
7697 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7698 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7699 * As a workaround, we call class cctors before allocating objects.
7701 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7702 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7703 if (cfg->verbose_level > 2)
7704 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7705 class_inits = g_slist_prepend (class_inits, vtable);
7708 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
7711 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
7714 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7716 /* Now call the actual ctor */
7717 /* Avoid virtual calls to ctors if possible */
7718 if (cmethod->klass->marshalbyref)
7719 callvirt_this_arg = sp [0];
7722 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7723 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7724 type_to_eval_stack_type ((cfg), fsig->ret, ins);
7729 CHECK_CFG_EXCEPTION;
7734 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7735 mono_method_check_inlining (cfg, cmethod) &&
7736 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7737 !g_list_find (dont_inline, cmethod)) {
7740 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7741 cfg->real_offset += 5;
7744 inline_costs += costs - 5;
7747 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7749 } else if (context_used &&
7750 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7751 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7752 MonoInst *cmethod_addr;
7754 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7755 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7757 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7760 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7761 callvirt_this_arg, NULL, vtable_arg);
7765 if (alloc == NULL) {
7767 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7768 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7782 token = read32 (ip + 1);
7783 klass = mini_get_class (method, token, generic_context);
7784 CHECK_TYPELOAD (klass);
7785 if (sp [0]->type != STACK_OBJ)
7788 if (cfg->generic_sharing_context)
7789 context_used = mono_class_check_context_used (klass);
7791 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7798 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7800 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7804 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7805 MonoMethod *mono_castclass;
7806 MonoInst *iargs [1];
7809 mono_castclass = mono_marshal_get_castclass (klass);
7812 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7813 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7814 g_assert (costs > 0);
7817 cfg->real_offset += 5;
7822 inline_costs += costs;
7825 ins = handle_castclass (cfg, klass, *sp, context_used);
7826 CHECK_CFG_EXCEPTION;
7836 token = read32 (ip + 1);
7837 klass = mini_get_class (method, token, generic_context);
7838 CHECK_TYPELOAD (klass);
7839 if (sp [0]->type != STACK_OBJ)
7842 if (cfg->generic_sharing_context)
7843 context_used = mono_class_check_context_used (klass);
7845 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7852 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7854 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7858 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7859 MonoMethod *mono_isinst;
7860 MonoInst *iargs [1];
7863 mono_isinst = mono_marshal_get_isinst (klass);
7866 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7867 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7868 g_assert (costs > 0);
7871 cfg->real_offset += 5;
7876 inline_costs += costs;
7879 ins = handle_isinst (cfg, klass, *sp, context_used);
7880 CHECK_CFG_EXCEPTION;
7887 case CEE_UNBOX_ANY: {
7891 token = read32 (ip + 1);
7892 klass = mini_get_class (method, token, generic_context);
7893 CHECK_TYPELOAD (klass);
7895 mono_save_token_info (cfg, image, token, klass);
7897 if (cfg->generic_sharing_context)
7898 context_used = mono_class_check_context_used (klass);
7900 if (generic_class_is_reference_type (cfg, klass)) {
7901 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
7902 if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7903 MonoMethod *mono_castclass;
7904 MonoInst *iargs [1];
7907 mono_castclass = mono_marshal_get_castclass (klass);
7910 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7911 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7913 g_assert (costs > 0);
7916 cfg->real_offset += 5;
7920 inline_costs += costs;
7922 ins = handle_castclass (cfg, klass, *sp, context_used);
7923 CHECK_CFG_EXCEPTION;
7931 if (mono_class_is_nullable (klass)) {
7932 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
7939 ins = handle_unbox (cfg, klass, sp, context_used);
7945 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7958 token = read32 (ip + 1);
7959 klass = mini_get_class (method, token, generic_context);
7960 CHECK_TYPELOAD (klass);
7962 mono_save_token_info (cfg, image, token, klass);
7964 if (cfg->generic_sharing_context)
7965 context_used = mono_class_check_context_used (klass);
7967 if (generic_class_is_reference_type (cfg, klass)) {
7973 if (klass == mono_defaults.void_class)
7975 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7977 /* frequent check in generic code: box (struct), brtrue */
7979 // FIXME: LLVM can't handle the inconsistent bb linking
7980 if (!mono_class_is_nullable (klass) &&
7981 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
7982 (ip [5] == CEE_BRTRUE ||
7983 ip [5] == CEE_BRTRUE_S ||
7984 ip [5] == CEE_BRFALSE ||
7985 ip [5] == CEE_BRFALSE_S)) {
7986 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
7988 MonoBasicBlock *true_bb, *false_bb;
7992 if (cfg->verbose_level > 3) {
7993 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
7994 printf ("<box+brtrue opt>\n");
8002 target = ip + 1 + (signed char)(*ip);
8009 target = ip + 4 + (gint)(read32 (ip));
8013 g_assert_not_reached ();
8017 * We need to link both bblocks, since it is needed for handling stack
8018 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
8019 * Branching to only one of them would lead to inconsistencies, so
8020 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
8022 GET_BBLOCK (cfg, true_bb, target);
8023 GET_BBLOCK (cfg, false_bb, ip);
8025 mono_link_bblock (cfg, cfg->cbb, true_bb);
8026 mono_link_bblock (cfg, cfg->cbb, false_bb);
8028 if (sp != stack_start) {
8029 handle_stack_args (cfg, stack_start, sp - stack_start);
8031 CHECK_UNVERIFIABLE (cfg);
8034 if (COMPILE_LLVM (cfg)) {
8035 dreg = alloc_ireg (cfg);
8036 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
8037 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
8039 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
8041 /* The JIT can't eliminate the iconst+compare */
8042 MONO_INST_NEW (cfg, ins, OP_BR);
8043 ins->inst_target_bb = is_true ? true_bb : false_bb;
8044 MONO_ADD_INS (cfg->cbb, ins);
8047 start_new_bblock = 1;
8051 *sp++ = handle_box (cfg, val, klass, context_used);
8053 CHECK_CFG_EXCEPTION;
8062 token = read32 (ip + 1);
8063 klass = mini_get_class (method, token, generic_context);
8064 CHECK_TYPELOAD (klass);
8066 mono_save_token_info (cfg, image, token, klass);
8068 if (cfg->generic_sharing_context)
8069 context_used = mono_class_check_context_used (klass);
8071 if (mono_class_is_nullable (klass)) {
8074 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
8075 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
8079 ins = handle_unbox (cfg, klass, sp, context_used);
8089 MonoClassField *field;
8093 if (*ip == CEE_STFLD) {
8100 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
8102 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
8105 token = read32 (ip + 1);
8106 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8107 field = mono_method_get_wrapper_data (method, token);
8108 klass = field->parent;
8111 field = mono_field_from_token (image, token, &klass, generic_context);
8115 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8116 FIELD_ACCESS_FAILURE;
8117 mono_class_init (klass);
8119 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
8120 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
8121 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8122 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8125 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
8126 if (*ip == CEE_STFLD) {
8127 if (target_type_is_incompatible (cfg, field->type, sp [1]))
8129 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8130 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
8131 MonoInst *iargs [5];
8134 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8135 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8136 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
8140 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8141 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
8142 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8143 g_assert (costs > 0);
8145 cfg->real_offset += 5;
8148 inline_costs += costs;
8150 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
8155 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8157 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
8158 if (sp [0]->opcode != OP_LDADDR)
8159 store->flags |= MONO_INST_FAULT;
8161 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
8162 /* insert call to write barrier */
8163 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
8164 MonoInst *iargs [2], *dummy_use;
8167 dreg = alloc_preg (cfg);
8168 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8170 mono_emit_method_call (cfg, write_barrier, iargs, NULL);
8172 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
8175 store->flags |= ins_flag;
8182 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8183 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
8184 MonoInst *iargs [4];
8187 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8188 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8189 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
8190 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8191 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
8192 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8194 g_assert (costs > 0);
8196 cfg->real_offset += 5;
8200 inline_costs += costs;
8202 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
8206 if (sp [0]->type == STACK_VTYPE) {
8209 /* Have to compute the address of the variable */
8211 var = get_vreg_to_inst (cfg, sp [0]->dreg);
8213 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
8215 g_assert (var->klass == klass);
8217 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
8221 if (*ip == CEE_LDFLDA) {
8222 if (sp [0]->type == STACK_OBJ) {
8223 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
8224 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
8227 dreg = alloc_preg (cfg);
8229 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8230 ins->klass = mono_class_from_mono_type (field->type);
8231 ins->type = STACK_MP;
8236 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8238 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
8239 load->flags |= ins_flag;
8240 if (sp [0]->opcode != OP_LDADDR)
8241 load->flags |= MONO_INST_FAULT;
8252 MonoClassField *field;
8253 gpointer addr = NULL;
8254 gboolean is_special_static;
8257 token = read32 (ip + 1);
8259 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8260 field = mono_method_get_wrapper_data (method, token);
8261 klass = field->parent;
8264 field = mono_field_from_token (image, token, &klass, generic_context);
8267 mono_class_init (klass);
8268 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8269 FIELD_ACCESS_FAILURE;
8271 /* if the class is Critical then transparent code cannot access it's fields */
8272 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8273 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8276 * We can only support shared generic static
8277 * field access on architectures where the
8278 * trampoline code has been extended to handle
8279 * the generic class init.
8281 #ifndef MONO_ARCH_VTABLE_REG
8282 GENERIC_SHARING_FAILURE (*ip);
8285 if (cfg->generic_sharing_context)
8286 context_used = mono_class_check_context_used (klass);
8288 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
8290 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8291 * to be called here.
8293 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8294 mono_class_vtable (cfg->domain, klass);
8295 CHECK_TYPELOAD (klass);
8297 mono_domain_lock (cfg->domain);
8298 if (cfg->domain->special_static_fields)
8299 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8300 mono_domain_unlock (cfg->domain);
8302 is_special_static = mono_class_field_is_special_static (field);
8304 /* Generate IR to compute the field address */
8305 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
8307 * Fast access to TLS data
8308 * Inline version of get_thread_static_data () in
8312 int idx, static_data_reg, array_reg, dreg;
8313 MonoInst *thread_ins;
8315 // offset &= 0x7fffffff;
8316 // idx = (offset >> 24) - 1;
8317 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
8319 thread_ins = mono_get_thread_intrinsic (cfg);
8320 MONO_ADD_INS (cfg->cbb, thread_ins);
8321 static_data_reg = alloc_ireg (cfg);
8322 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
8324 if (cfg->compile_aot) {
8325 int offset_reg, offset2_reg, idx_reg;
8327 /* For TLS variables, this will return the TLS offset */
8328 EMIT_NEW_SFLDACONST (cfg, ins, field);
8329 offset_reg = ins->dreg;
8330 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
8331 idx_reg = alloc_ireg (cfg);
8332 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
8333 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
8334 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
8335 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
8336 array_reg = alloc_ireg (cfg);
8337 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
8338 offset2_reg = alloc_ireg (cfg);
8339 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
8340 dreg = alloc_ireg (cfg);
8341 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
8343 offset = (gsize)addr & 0x7fffffff;
8344 idx = (offset >> 24) - 1;
8346 array_reg = alloc_ireg (cfg);
8347 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
8348 dreg = alloc_ireg (cfg);
8349 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
8351 } else if ((cfg->opt & MONO_OPT_SHARED) ||
8352 (cfg->compile_aot && is_special_static) ||
8353 (context_used && is_special_static)) {
8354 MonoInst *iargs [2];
8356 g_assert (field->parent);
8357 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8359 iargs [1] = emit_get_rgctx_field (cfg, context_used,
8360 field, MONO_RGCTX_INFO_CLASS_FIELD);
8362 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8364 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8365 } else if (context_used) {
8366 MonoInst *static_data;
8369 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8370 method->klass->name_space, method->klass->name, method->name,
8371 depth, field->offset);
8374 if (mono_class_needs_cctor_run (klass, method))
8375 emit_generic_class_init (cfg, klass);
8378 * The pointer we're computing here is
8380 * super_info.static_data + field->offset
8382 static_data = emit_get_rgctx_klass (cfg, context_used,
8383 klass, MONO_RGCTX_INFO_STATIC_DATA);
8385 if (field->offset == 0) {
8388 int addr_reg = mono_alloc_preg (cfg);
8389 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8391 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8392 MonoInst *iargs [2];
8394 g_assert (field->parent);
8395 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8396 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8397 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8399 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8401 CHECK_TYPELOAD (klass);
8403 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8404 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8405 if (cfg->verbose_level > 2)
8406 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8407 class_inits = g_slist_prepend (class_inits, vtable);
8409 if (cfg->run_cctors) {
8411 /* This makes so that inline cannot trigger */
8412 /* .cctors: too many apps depend on them */
8413 /* running with a specific order... */
8414 if (! vtable->initialized)
8416 ex = mono_runtime_class_init_full (vtable, FALSE);
8418 set_exception_object (cfg, ex);
8419 goto exception_exit;
8423 addr = (char*)vtable->data + field->offset;
8425 if (cfg->compile_aot)
8426 EMIT_NEW_SFLDACONST (cfg, ins, field);
8428 EMIT_NEW_PCONST (cfg, ins, addr);
8430 MonoInst *iargs [1];
8431 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8432 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8436 /* Generate IR to do the actual load/store operation */
8438 if (*ip == CEE_LDSFLDA) {
8439 ins->klass = mono_class_from_mono_type (field->type);
8440 ins->type = STACK_PTR;
8442 } else if (*ip == CEE_STSFLD) {
8447 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
8448 store->flags |= ins_flag;
8450 gboolean is_const = FALSE;
8451 MonoVTable *vtable = NULL;
8453 if (!context_used) {
8454 vtable = mono_class_vtable (cfg->domain, klass);
8455 CHECK_TYPELOAD (klass);
8457 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8458 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8459 gpointer addr = (char*)vtable->data + field->offset;
8460 int ro_type = field->type->type;
8461 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
8462 ro_type = mono_class_enum_basetype (field->type->data.klass)->type;
8464 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8467 case MONO_TYPE_BOOLEAN:
8469 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8473 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8476 case MONO_TYPE_CHAR:
8478 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8482 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8487 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8491 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8497 case MONO_TYPE_FNPTR:
8498 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8499 type_to_eval_stack_type ((cfg), field->type, *sp);
8502 case MONO_TYPE_STRING:
8503 case MONO_TYPE_OBJECT:
8504 case MONO_TYPE_CLASS:
8505 case MONO_TYPE_SZARRAY:
8506 case MONO_TYPE_ARRAY:
8507 if (!mono_gc_is_moving ()) {
8508 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8509 type_to_eval_stack_type ((cfg), field->type, *sp);
8517 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8522 case MONO_TYPE_VALUETYPE:
8532 CHECK_STACK_OVF (1);
8534 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8535 load->flags |= ins_flag;
8548 token = read32 (ip + 1);
8549 klass = mini_get_class (method, token, generic_context);
8550 CHECK_TYPELOAD (klass);
8551 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8552 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8553 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
8554 generic_class_is_reference_type (cfg, klass)) {
8555 MonoInst *dummy_use;
8556 /* insert call to write barrier */
8557 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
8558 mono_emit_method_call (cfg, write_barrier, sp, NULL);
8559 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
8571 const char *data_ptr;
8573 guint32 field_token;
8579 token = read32 (ip + 1);
8581 klass = mini_get_class (method, token, generic_context);
8582 CHECK_TYPELOAD (klass);
8584 if (cfg->generic_sharing_context)
8585 context_used = mono_class_check_context_used (klass);
8587 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
8588 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
8589 ins->sreg1 = sp [0]->dreg;
8590 ins->type = STACK_I4;
8591 ins->dreg = alloc_ireg (cfg);
8592 MONO_ADD_INS (cfg->cbb, ins);
8593 *sp = mono_decompose_opcode (cfg, ins);
8598 MonoClass *array_class = mono_array_class_get (klass, 1);
8599 /* FIXME: we cannot get a managed
8600 allocator because we can't get the
8601 open generic class's vtable. We
8602 have the same problem in
8603 handle_alloc(). This
8604 needs to be solved so that we can
8605 have managed allocs of shared
8608 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
8609 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
8611 MonoMethod *managed_alloc = NULL;
8613 /* FIXME: Decompose later to help abcrem */
8616 args [0] = emit_get_rgctx_klass (cfg, context_used,
8617 array_class, MONO_RGCTX_INFO_VTABLE);
8622 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
8624 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8626 if (cfg->opt & MONO_OPT_SHARED) {
8627 /* Decompose now to avoid problems with references to the domainvar */
8628 MonoInst *iargs [3];
8630 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8631 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8634 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8636 /* Decompose later since it is needed by abcrem */
8637 MonoClass *array_type = mono_array_class_get (klass, 1);
8638 mono_class_vtable (cfg->domain, array_type);
8639 CHECK_TYPELOAD (array_type);
8641 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8642 ins->dreg = alloc_preg (cfg);
8643 ins->sreg1 = sp [0]->dreg;
8644 ins->inst_newa_class = klass;
8645 ins->type = STACK_OBJ;
8647 MONO_ADD_INS (cfg->cbb, ins);
8648 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8649 cfg->cbb->has_array_access = TRUE;
8651 /* Needed so mono_emit_load_get_addr () gets called */
8652 mono_get_got_var (cfg);
8662 * we inline/optimize the initialization sequence if possible.
8663 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8664 * for small sizes open code the memcpy
8665 * ensure the rva field is big enough
8667 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8668 MonoMethod *memcpy_method = get_memcpy_method ();
8669 MonoInst *iargs [3];
8670 int add_reg = alloc_preg (cfg);
8672 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8673 if (cfg->compile_aot) {
8674 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8676 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8678 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8679 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8688 if (sp [0]->type != STACK_OBJ)
8691 dreg = alloc_preg (cfg);
8692 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8693 ins->dreg = alloc_preg (cfg);
8694 ins->sreg1 = sp [0]->dreg;
8695 ins->type = STACK_I4;
8696 /* This flag will be inherited by the decomposition */
8697 ins->flags |= MONO_INST_FAULT;
8698 MONO_ADD_INS (cfg->cbb, ins);
8699 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8700 cfg->cbb->has_array_access = TRUE;
8708 if (sp [0]->type != STACK_OBJ)
8711 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8713 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8714 CHECK_TYPELOAD (klass);
8715 /* we need to make sure that this array is exactly the type it needs
8716 * to be for correctness. the wrappers are lax with their usage
8717 * so we need to ignore them here
8719 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
8720 MonoClass *array_class = mono_array_class_get (klass, 1);
8721 mini_emit_check_array_type (cfg, sp [0], array_class);
8722 CHECK_TYPELOAD (array_class);
8726 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8741 case CEE_LDELEM_REF: {
8747 if (*ip == CEE_LDELEM) {
8749 token = read32 (ip + 1);
8750 klass = mini_get_class (method, token, generic_context);
8751 CHECK_TYPELOAD (klass);
8752 mono_class_init (klass);
8755 klass = array_access_to_klass (*ip);
8757 if (sp [0]->type != STACK_OBJ)
8760 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8762 if (sp [1]->opcode == OP_ICONST) {
8763 int array_reg = sp [0]->dreg;
8764 int index_reg = sp [1]->dreg;
8765 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8767 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8768 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8770 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8771 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8774 if (*ip == CEE_LDELEM)
8787 case CEE_STELEM_REF:
8794 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8796 if (*ip == CEE_STELEM) {
8798 token = read32 (ip + 1);
8799 klass = mini_get_class (method, token, generic_context);
8800 CHECK_TYPELOAD (klass);
8801 mono_class_init (klass);
8804 klass = array_access_to_klass (*ip);
8806 if (sp [0]->type != STACK_OBJ)
8809 /* storing a NULL doesn't need any of the complex checks in stelemref */
8810 if (generic_class_is_reference_type (cfg, klass) &&
8811 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8812 MonoMethod* helper = mono_marshal_get_stelemref ();
8813 MonoInst *iargs [3];
8815 if (sp [0]->type != STACK_OBJ)
8817 if (sp [2]->type != STACK_OBJ)
8824 mono_emit_method_call (cfg, helper, iargs, NULL);
8826 if (sp [1]->opcode == OP_ICONST) {
8827 int array_reg = sp [0]->dreg;
8828 int index_reg = sp [1]->dreg;
8829 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8831 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8832 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8834 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8835 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8839 if (*ip == CEE_STELEM)
8846 case CEE_CKFINITE: {
8850 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8851 ins->sreg1 = sp [0]->dreg;
8852 ins->dreg = alloc_freg (cfg);
8853 ins->type = STACK_R8;
8854 MONO_ADD_INS (bblock, ins);
8856 *sp++ = mono_decompose_opcode (cfg, ins);
8861 case CEE_REFANYVAL: {
8862 MonoInst *src_var, *src;
8864 int klass_reg = alloc_preg (cfg);
8865 int dreg = alloc_preg (cfg);
8868 MONO_INST_NEW (cfg, ins, *ip);
8871 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8872 CHECK_TYPELOAD (klass);
8873 mono_class_init (klass);
8875 if (cfg->generic_sharing_context)
8876 context_used = mono_class_check_context_used (klass);
8879 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8881 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8882 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8883 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8886 MonoInst *klass_ins;
8888 klass_ins = emit_get_rgctx_klass (cfg, context_used,
8889 klass, MONO_RGCTX_INFO_KLASS);
8892 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8893 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8895 mini_emit_class_check (cfg, klass_reg, klass);
8897 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8898 ins->type = STACK_MP;
8903 case CEE_MKREFANY: {
8904 MonoInst *loc, *addr;
8907 MONO_INST_NEW (cfg, ins, *ip);
8910 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8911 CHECK_TYPELOAD (klass);
8912 mono_class_init (klass);
8914 if (cfg->generic_sharing_context)
8915 context_used = mono_class_check_context_used (klass);
8917 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8918 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8921 MonoInst *const_ins;
8922 int type_reg = alloc_preg (cfg);
8924 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
8925 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8926 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8927 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8928 } else if (cfg->compile_aot) {
8929 int const_reg = alloc_preg (cfg);
8930 int type_reg = alloc_preg (cfg);
8932 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8933 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8934 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8935 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8937 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8938 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8940 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8942 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8943 ins->type = STACK_VTYPE;
8944 ins->klass = mono_defaults.typed_reference_class;
8951 MonoClass *handle_class;
8953 CHECK_STACK_OVF (1);
8956 n = read32 (ip + 1);
8958 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
8959 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8960 handle = mono_method_get_wrapper_data (method, n);
8961 handle_class = mono_method_get_wrapper_data (method, n + 1);
8962 if (handle_class == mono_defaults.typehandle_class)
8963 handle = &((MonoClass*)handle)->byval_arg;
8966 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8970 mono_class_init (handle_class);
8971 if (cfg->generic_sharing_context) {
8972 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
8973 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
8974 /* This case handles ldtoken
8975 of an open type, like for
8978 } else if (handle_class == mono_defaults.typehandle_class) {
8979 /* If we get a MONO_TYPE_CLASS
8980 then we need to provide the
8982 instantiation of it. */
8983 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8986 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8987 } else if (handle_class == mono_defaults.fieldhandle_class)
8988 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8989 else if (handle_class == mono_defaults.methodhandle_class)
8990 context_used = mono_method_check_context_used (handle);
8992 g_assert_not_reached ();
8995 if ((cfg->opt & MONO_OPT_SHARED) &&
8996 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
8997 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
8998 MonoInst *addr, *vtvar, *iargs [3];
8999 int method_context_used;
9001 if (cfg->generic_sharing_context)
9002 method_context_used = mono_method_check_context_used (method);
9004 method_context_used = 0;
9006 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9008 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9009 EMIT_NEW_ICONST (cfg, iargs [1], n);
9010 if (method_context_used) {
9011 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
9012 method, MONO_RGCTX_INFO_METHOD);
9013 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
9015 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
9016 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
9018 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9020 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9022 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9024 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
9025 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
9026 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
9027 (cmethod->klass == mono_defaults.monotype_class->parent) &&
9028 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
9029 MonoClass *tclass = mono_class_from_mono_type (handle);
9031 mono_class_init (tclass);
9033 ins = emit_get_rgctx_klass (cfg, context_used,
9034 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
9035 } else if (cfg->compile_aot) {
9036 if (method->wrapper_type) {
9037 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
9038 /* Special case for static synchronized wrappers */
9039 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
9041 /* FIXME: n is not a normal token */
9042 cfg->disable_aot = TRUE;
9043 EMIT_NEW_PCONST (cfg, ins, NULL);
9046 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
9049 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
9051 ins->type = STACK_OBJ;
9052 ins->klass = cmethod->klass;
9055 MonoInst *addr, *vtvar;
9057 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9060 if (handle_class == mono_defaults.typehandle_class) {
9061 ins = emit_get_rgctx_klass (cfg, context_used,
9062 mono_class_from_mono_type (handle),
9063 MONO_RGCTX_INFO_TYPE);
9064 } else if (handle_class == mono_defaults.methodhandle_class) {
9065 ins = emit_get_rgctx_method (cfg, context_used,
9066 handle, MONO_RGCTX_INFO_METHOD);
9067 } else if (handle_class == mono_defaults.fieldhandle_class) {
9068 ins = emit_get_rgctx_field (cfg, context_used,
9069 handle, MONO_RGCTX_INFO_CLASS_FIELD);
9071 g_assert_not_reached ();
9073 } else if (cfg->compile_aot) {
9074 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
9076 EMIT_NEW_PCONST (cfg, ins, handle);
9078 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9079 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9080 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9090 MONO_INST_NEW (cfg, ins, OP_THROW);
9092 ins->sreg1 = sp [0]->dreg;
9094 bblock->out_of_line = TRUE;
9095 MONO_ADD_INS (bblock, ins);
9096 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
9097 MONO_ADD_INS (bblock, ins);
9100 link_bblock (cfg, bblock, end_bblock);
9101 start_new_bblock = 1;
9103 case CEE_ENDFINALLY:
9104 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
9105 MONO_ADD_INS (bblock, ins);
9107 start_new_bblock = 1;
9110 * Control will leave the method so empty the stack, otherwise
9111 * the next basic block will start with a nonempty stack.
9113 while (sp != stack_start) {
9121 if (*ip == CEE_LEAVE) {
9123 target = ip + 5 + (gint32)read32(ip + 1);
9126 target = ip + 2 + (signed char)(ip [1]);
9129 /* empty the stack */
9130 while (sp != stack_start) {
9135 * If this leave statement is in a catch block, check for a
9136 * pending exception, and rethrow it if necessary.
9137 * We avoid doing this in runtime invoke wrappers, since those are called
9138 * by native code which excepts the wrapper to catch all exceptions.
9140 for (i = 0; i < header->num_clauses; ++i) {
9141 MonoExceptionClause *clause = &header->clauses [i];
9144 * Use <= in the final comparison to handle clauses with multiple
9145 * leave statements, like in bug #78024.
9146 * The ordering of the exception clauses guarantees that we find the
9149 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
9151 MonoBasicBlock *dont_throw;
9156 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
9159 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
9161 NEW_BBLOCK (cfg, dont_throw);
9164 * Currently, we allways rethrow the abort exception, despite the
9165 * fact that this is not correct. See thread6.cs for an example.
9166 * But propagating the abort exception is more important than
9167 * getting the sematics right.
9169 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
9170 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
9171 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
9173 MONO_START_BB (cfg, dont_throw);
9178 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
9180 MonoExceptionClause *clause;
9182 for (tmp = handlers; tmp; tmp = tmp->next) {
9184 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
9186 link_bblock (cfg, bblock, tblock);
9187 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
9188 ins->inst_target_bb = tblock;
9189 ins->inst_eh_block = clause;
9190 MONO_ADD_INS (bblock, ins);
9191 bblock->has_call_handler = 1;
9192 if (COMPILE_LLVM (cfg)) {
9193 MonoBasicBlock *target_bb;
9196 * Link the finally bblock with the target, since it will
9197 * conceptually branch there.
9198 * FIXME: Have to link the bblock containing the endfinally.
9200 GET_BBLOCK (cfg, target_bb, target);
9201 link_bblock (cfg, tblock, target_bb);
9204 g_list_free (handlers);
9207 MONO_INST_NEW (cfg, ins, OP_BR);
9208 MONO_ADD_INS (bblock, ins);
9209 GET_BBLOCK (cfg, tblock, target);
9210 link_bblock (cfg, bblock, tblock);
9211 ins->inst_target_bb = tblock;
9212 start_new_bblock = 1;
9214 if (*ip == CEE_LEAVE)
9223 * Mono specific opcodes
9225 case MONO_CUSTOM_PREFIX: {
9227 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
9231 case CEE_MONO_ICALL: {
9233 MonoJitICallInfo *info;
9235 token = read32 (ip + 2);
9236 func = mono_method_get_wrapper_data (method, token);
9237 info = mono_find_jit_icall_by_addr (func);
9240 CHECK_STACK (info->sig->param_count);
9241 sp -= info->sig->param_count;
9243 ins = mono_emit_jit_icall (cfg, info->func, sp);
9244 if (!MONO_TYPE_IS_VOID (info->sig->ret))
9248 inline_costs += 10 * num_calls++;
9252 case CEE_MONO_LDPTR: {
9255 CHECK_STACK_OVF (1);
9257 token = read32 (ip + 2);
9259 ptr = mono_method_get_wrapper_data (method, token);
9260 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
9261 MonoJitICallInfo *callinfo;
9262 const char *icall_name;
9264 icall_name = method->name + strlen ("__icall_wrapper_");
9265 g_assert (icall_name);
9266 callinfo = mono_find_jit_icall_by_name (icall_name);
9267 g_assert (callinfo);
9269 if (ptr == callinfo->func) {
9270 /* Will be transformed into an AOTCONST later */
9271 EMIT_NEW_PCONST (cfg, ins, ptr);
9277 /* FIXME: Generalize this */
9278 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
9279 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
9284 EMIT_NEW_PCONST (cfg, ins, ptr);
9287 inline_costs += 10 * num_calls++;
9288 /* Can't embed random pointers into AOT code */
9289 cfg->disable_aot = 1;
9292 case CEE_MONO_ICALL_ADDR: {
9293 MonoMethod *cmethod;
9296 CHECK_STACK_OVF (1);
9298 token = read32 (ip + 2);
9300 cmethod = mono_method_get_wrapper_data (method, token);
9302 if (cfg->compile_aot) {
9303 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
9305 ptr = mono_lookup_internal_call (cmethod);
9307 EMIT_NEW_PCONST (cfg, ins, ptr);
9313 case CEE_MONO_VTADDR: {
9314 MonoInst *src_var, *src;
9320 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9321 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
9326 case CEE_MONO_NEWOBJ: {
9327 MonoInst *iargs [2];
9329 CHECK_STACK_OVF (1);
9331 token = read32 (ip + 2);
9332 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9333 mono_class_init (klass);
9334 NEW_DOMAINCONST (cfg, iargs [0]);
9335 MONO_ADD_INS (cfg->cbb, iargs [0]);
9336 NEW_CLASSCONST (cfg, iargs [1], klass);
9337 MONO_ADD_INS (cfg->cbb, iargs [1]);
9338 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
9340 inline_costs += 10 * num_calls++;
9343 case CEE_MONO_OBJADDR:
9346 MONO_INST_NEW (cfg, ins, OP_MOVE);
9347 ins->dreg = alloc_preg (cfg);
9348 ins->sreg1 = sp [0]->dreg;
9349 ins->type = STACK_MP;
9350 MONO_ADD_INS (cfg->cbb, ins);
9354 case CEE_MONO_LDNATIVEOBJ:
9356 * Similar to LDOBJ, but instead load the unmanaged
9357 * representation of the vtype to the stack.
9362 token = read32 (ip + 2);
9363 klass = mono_method_get_wrapper_data (method, token);
9364 g_assert (klass->valuetype);
9365 mono_class_init (klass);
9368 MonoInst *src, *dest, *temp;
9371 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
9372 temp->backend.is_pinvoke = 1;
9373 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
9374 mini_emit_stobj (cfg, dest, src, klass, TRUE);
9376 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
9377 dest->type = STACK_VTYPE;
9378 dest->klass = klass;
9384 case CEE_MONO_RETOBJ: {
9386 * Same as RET, but return the native representation of a vtype
9389 g_assert (cfg->ret);
9390 g_assert (mono_method_signature (method)->pinvoke);
9395 token = read32 (ip + 2);
9396 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9398 if (!cfg->vret_addr) {
9399 g_assert (cfg->ret_var_is_local);
9401 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
9403 EMIT_NEW_RETLOADA (cfg, ins);
9405 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
9407 if (sp != stack_start)
9410 MONO_INST_NEW (cfg, ins, OP_BR);
9411 ins->inst_target_bb = end_bblock;
9412 MONO_ADD_INS (bblock, ins);
9413 link_bblock (cfg, bblock, end_bblock);
9414 start_new_bblock = 1;
9418 case CEE_MONO_CISINST:
9419 case CEE_MONO_CCASTCLASS: {
9424 token = read32 (ip + 2);
9425 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9426 if (ip [1] == CEE_MONO_CISINST)
9427 ins = handle_cisinst (cfg, klass, sp [0]);
9429 ins = handle_ccastclass (cfg, klass, sp [0]);
9435 case CEE_MONO_SAVE_LMF:
9436 case CEE_MONO_RESTORE_LMF:
9437 #ifdef MONO_ARCH_HAVE_LMF_OPS
9438 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9439 MONO_ADD_INS (bblock, ins);
9440 cfg->need_lmf_area = TRUE;
9444 case CEE_MONO_CLASSCONST:
9445 CHECK_STACK_OVF (1);
9447 token = read32 (ip + 2);
9448 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9451 inline_costs += 10 * num_calls++;
9453 case CEE_MONO_NOT_TAKEN:
9454 bblock->out_of_line = TRUE;
9458 CHECK_STACK_OVF (1);
9460 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9461 ins->dreg = alloc_preg (cfg);
9462 ins->inst_offset = (gint32)read32 (ip + 2);
9463 ins->type = STACK_PTR;
9464 MONO_ADD_INS (bblock, ins);
9468 case CEE_MONO_DYN_CALL: {
9471 /* It would be easier to call a trampoline, but that would put an
9472 * extra frame on the stack, confusing exception handling. So
9473 * implement it inline using an opcode for now.
9476 if (!cfg->dyn_call_var) {
9477 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9478 /* prevent it from being register allocated */
9479 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
9482 /* Has to use a call inst since it local regalloc expects it */
9483 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
9484 ins = (MonoInst*)call;
9486 ins->sreg1 = sp [0]->dreg;
9487 ins->sreg2 = sp [1]->dreg;
9488 MONO_ADD_INS (bblock, ins);
9490 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
9491 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
9495 inline_costs += 10 * num_calls++;
9500 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9510 /* somewhat similar to LDTOKEN */
9511 MonoInst *addr, *vtvar;
9512 CHECK_STACK_OVF (1);
9513 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9515 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9516 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9518 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9519 ins->type = STACK_VTYPE;
9520 ins->klass = mono_defaults.argumenthandle_class;
9533 * The following transforms:
9534 * CEE_CEQ into OP_CEQ
9535 * CEE_CGT into OP_CGT
9536 * CEE_CGT_UN into OP_CGT_UN
9537 * CEE_CLT into OP_CLT
9538 * CEE_CLT_UN into OP_CLT_UN
9540 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9542 MONO_INST_NEW (cfg, ins, cmp->opcode);
9544 cmp->sreg1 = sp [0]->dreg;
9545 cmp->sreg2 = sp [1]->dreg;
9546 type_from_op (cmp, sp [0], sp [1]);
9548 if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9549 cmp->opcode = OP_LCOMPARE;
9550 else if (sp [0]->type == STACK_R8)
9551 cmp->opcode = OP_FCOMPARE;
9553 cmp->opcode = OP_ICOMPARE;
9554 MONO_ADD_INS (bblock, cmp);
9555 ins->type = STACK_I4;
9556 ins->dreg = alloc_dreg (cfg, ins->type);
9557 type_from_op (ins, sp [0], sp [1]);
9559 if (cmp->opcode == OP_FCOMPARE) {
9561 * The backends expect the fceq opcodes to do the
9564 cmp->opcode = OP_NOP;
9565 ins->sreg1 = cmp->sreg1;
9566 ins->sreg2 = cmp->sreg2;
9568 MONO_ADD_INS (bblock, ins);
9575 MonoMethod *cil_method;
9576 gboolean needs_static_rgctx_invoke;
9578 CHECK_STACK_OVF (1);
9580 n = read32 (ip + 2);
9581 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9584 mono_class_init (cmethod->klass);
9586 mono_save_token_info (cfg, image, n, cmethod);
9588 if (cfg->generic_sharing_context)
9589 context_used = mono_method_check_context_used (cmethod);
9591 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9593 cil_method = cmethod;
9594 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9595 METHOD_ACCESS_FAILURE;
9597 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9598 if (check_linkdemand (cfg, method, cmethod))
9600 CHECK_CFG_EXCEPTION;
9601 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9602 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9606 * Optimize the common case of ldftn+delegate creation
9608 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9609 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9610 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9612 int invoke_context_used = 0;
9614 invoke = mono_get_delegate_invoke (ctor_method->klass);
9615 if (!invoke || !mono_method_signature (invoke))
9618 if (cfg->generic_sharing_context)
9619 invoke_context_used = mono_method_check_context_used (invoke);
9621 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
9622 /* FIXME: SGEN support */
9623 if (!cfg->gen_write_barriers && invoke_context_used == 0) {
9624 MonoInst *target_ins;
9627 if (cfg->verbose_level > 3)
9628 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9629 target_ins = sp [-1];
9631 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
9632 CHECK_CFG_EXCEPTION;
9641 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
9642 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
9646 inline_costs += 10 * num_calls++;
9649 case CEE_LDVIRTFTN: {
9654 n = read32 (ip + 2);
9655 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9658 mono_class_init (cmethod->klass);
9660 if (cfg->generic_sharing_context)
9661 context_used = mono_method_check_context_used (cmethod);
9663 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9664 if (check_linkdemand (cfg, method, cmethod))
9666 CHECK_CFG_EXCEPTION;
9667 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9668 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9674 args [1] = emit_get_rgctx_method (cfg, context_used,
9675 cmethod, MONO_RGCTX_INFO_METHOD);
9678 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
9680 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
9683 inline_costs += 10 * num_calls++;
9687 CHECK_STACK_OVF (1);
9689 n = read16 (ip + 2);
9691 EMIT_NEW_ARGLOAD (cfg, ins, n);
9696 CHECK_STACK_OVF (1);
9698 n = read16 (ip + 2);
9700 NEW_ARGLOADA (cfg, ins, n);
9701 MONO_ADD_INS (cfg->cbb, ins);
9709 n = read16 (ip + 2);
9711 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9713 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9717 CHECK_STACK_OVF (1);
9719 n = read16 (ip + 2);
9721 EMIT_NEW_LOCLOAD (cfg, ins, n);
9726 unsigned char *tmp_ip;
9727 CHECK_STACK_OVF (1);
9729 n = read16 (ip + 2);
9732 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9738 EMIT_NEW_LOCLOADA (cfg, ins, n);
9747 n = read16 (ip + 2);
9749 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9751 emit_stloc_ir (cfg, sp, header, n);
9758 if (sp != stack_start)
9760 if (cfg->method != method)
9762 * Inlining this into a loop in a parent could lead to
9763 * stack overflows which is different behavior than the
9764 * non-inlined case, thus disable inlining in this case.
9766 goto inline_failure;
9768 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9769 ins->dreg = alloc_preg (cfg);
9770 ins->sreg1 = sp [0]->dreg;
9771 ins->type = STACK_PTR;
9772 MONO_ADD_INS (cfg->cbb, ins);
9774 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9776 ins->flags |= MONO_INST_INIT;
9781 case CEE_ENDFILTER: {
9782 MonoExceptionClause *clause, *nearest;
9783 int cc, nearest_num;
9787 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9789 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9790 ins->sreg1 = (*sp)->dreg;
9791 MONO_ADD_INS (bblock, ins);
9792 start_new_bblock = 1;
9797 for (cc = 0; cc < header->num_clauses; ++cc) {
9798 clause = &header->clauses [cc];
9799 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9800 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9801 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9807 if ((ip - header->code) != nearest->handler_offset)
9812 case CEE_UNALIGNED_:
9813 ins_flag |= MONO_INST_UNALIGNED;
9814 /* FIXME: record alignment? we can assume 1 for now */
9819 ins_flag |= MONO_INST_VOLATILE;
9823 ins_flag |= MONO_INST_TAILCALL;
9824 cfg->flags |= MONO_CFG_HAS_TAIL;
9825 /* Can't inline tail calls at this time */
9826 inline_costs += 100000;
9833 token = read32 (ip + 2);
9834 klass = mini_get_class (method, token, generic_context);
9835 CHECK_TYPELOAD (klass);
9836 if (generic_class_is_reference_type (cfg, klass))
9837 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9839 mini_emit_initobj (cfg, *sp, NULL, klass);
9843 case CEE_CONSTRAINED_:
9845 token = read32 (ip + 2);
9846 if (method->wrapper_type != MONO_WRAPPER_NONE)
9847 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
9849 constrained_call = mono_class_get_full (image, token, generic_context);
9850 CHECK_TYPELOAD (constrained_call);
9855 MonoInst *iargs [3];
9859 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9860 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9861 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9862 /* emit_memset only works when val == 0 */
9863 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9868 if (ip [1] == CEE_CPBLK) {
9869 MonoMethod *memcpy_method = get_memcpy_method ();
9870 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9872 MonoMethod *memset_method = get_memset_method ();
9873 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9883 ins_flag |= MONO_INST_NOTYPECHECK;
9885 ins_flag |= MONO_INST_NORANGECHECK;
9886 /* we ignore the no-nullcheck for now since we
9887 * really do it explicitly only when doing callvirt->call
9893 int handler_offset = -1;
9895 for (i = 0; i < header->num_clauses; ++i) {
9896 MonoExceptionClause *clause = &header->clauses [i];
9897 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9898 handler_offset = clause->handler_offset;
9903 bblock->flags |= BB_EXCEPTION_UNSAFE;
9905 g_assert (handler_offset != -1);
9907 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9908 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9909 ins->sreg1 = load->dreg;
9910 MONO_ADD_INS (bblock, ins);
9912 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
9913 MONO_ADD_INS (bblock, ins);
9916 link_bblock (cfg, bblock, end_bblock);
9917 start_new_bblock = 1;
9925 CHECK_STACK_OVF (1);
9927 token = read32 (ip + 2);
9928 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic) {
9929 MonoType *type = mono_type_create_from_typespec (image, token);
9930 token = mono_type_size (type, &ialign);
9932 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9933 CHECK_TYPELOAD (klass);
9934 mono_class_init (klass);
9935 token = mono_class_value_size (klass, &align);
9937 EMIT_NEW_ICONST (cfg, ins, token);
9942 case CEE_REFANYTYPE: {
9943 MonoInst *src_var, *src;
9949 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9951 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9952 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9953 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9971 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
9981 g_warning ("opcode 0x%02x not handled", *ip);
9985 if (start_new_bblock != 1)
9988 bblock->cil_length = ip - bblock->cil_code;
9989 bblock->next_bb = end_bblock;
9991 if (cfg->method == method && cfg->domainvar) {
9993 MonoInst *get_domain;
9995 cfg->cbb = init_localsbb;
9997 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9998 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
10001 get_domain->dreg = alloc_preg (cfg);
10002 MONO_ADD_INS (cfg->cbb, get_domain);
10004 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
10005 MONO_ADD_INS (cfg->cbb, store);
10008 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
10009 if (cfg->compile_aot)
10010 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
10011 mono_get_got_var (cfg);
10014 if (cfg->method == method && cfg->got_var)
10015 mono_emit_load_got_addr (cfg);
10020 cfg->cbb = init_localsbb;
10022 for (i = 0; i < header->num_locals; ++i) {
10023 MonoType *ptype = header->locals [i];
10024 int t = ptype->type;
10025 dreg = cfg->locals [i]->dreg;
10027 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
10028 t = mono_class_enum_basetype (ptype->data.klass)->type;
10029 if (ptype->byref) {
10030 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10031 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
10032 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
10033 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
10034 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
10035 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
10036 MONO_INST_NEW (cfg, ins, OP_R8CONST);
10037 ins->type = STACK_R8;
10038 ins->inst_p0 = (void*)&r8_0;
10039 ins->dreg = alloc_dreg (cfg, STACK_R8);
10040 MONO_ADD_INS (init_localsbb, ins);
10041 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
10042 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
10043 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
10044 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
10046 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10051 if (cfg->init_ref_vars && cfg->method == method) {
10052 /* Emit initialization for ref vars */
10053 // FIXME: Avoid duplication initialization for IL locals.
10054 for (i = 0; i < cfg->num_varinfo; ++i) {
10055 MonoInst *ins = cfg->varinfo [i];
10057 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
10058 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
10062 /* Add a sequence point for method entry/exit events */
10064 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
10065 MONO_ADD_INS (init_localsbb, ins);
10066 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
10067 MONO_ADD_INS (cfg->bb_exit, ins);
10072 if (cfg->method == method) {
10073 MonoBasicBlock *bb;
10074 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10075 bb->region = mono_find_block_region (cfg, bb->real_offset);
10077 mono_create_spvar_for_region (cfg, bb->region);
10078 if (cfg->verbose_level > 2)
10079 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
10083 g_slist_free (class_inits);
10084 dont_inline = g_list_remove (dont_inline, method);
10086 if (inline_costs < 0) {
10089 /* Method is too large */
10090 mname = mono_method_full_name (method, TRUE);
10091 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
10092 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
10094 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10095 mono_basic_block_free (original_bb);
10099 if ((cfg->verbose_level > 2) && (cfg->method == method))
10100 mono_print_code (cfg, "AFTER METHOD-TO-IR");
10102 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10103 mono_basic_block_free (original_bb);
10104 return inline_costs;
10107 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
10114 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
10118 set_exception_type_from_invalid_il (cfg, method, ip);
10122 g_slist_free (class_inits);
10123 mono_basic_block_free (original_bb);
10124 dont_inline = g_list_remove (dont_inline, method);
10125 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10130 store_membase_reg_to_store_membase_imm (int opcode)
10133 case OP_STORE_MEMBASE_REG:
10134 return OP_STORE_MEMBASE_IMM;
10135 case OP_STOREI1_MEMBASE_REG:
10136 return OP_STOREI1_MEMBASE_IMM;
10137 case OP_STOREI2_MEMBASE_REG:
10138 return OP_STOREI2_MEMBASE_IMM;
10139 case OP_STOREI4_MEMBASE_REG:
10140 return OP_STOREI4_MEMBASE_IMM;
10141 case OP_STOREI8_MEMBASE_REG:
10142 return OP_STOREI8_MEMBASE_IMM;
10144 g_assert_not_reached ();
10150 #endif /* DISABLE_JIT */
10153 mono_op_to_op_imm (int opcode)
10157 return OP_IADD_IMM;
10159 return OP_ISUB_IMM;
10161 return OP_IDIV_IMM;
10163 return OP_IDIV_UN_IMM;
10165 return OP_IREM_IMM;
10167 return OP_IREM_UN_IMM;
10169 return OP_IMUL_IMM;
10171 return OP_IAND_IMM;
10175 return OP_IXOR_IMM;
10177 return OP_ISHL_IMM;
10179 return OP_ISHR_IMM;
10181 return OP_ISHR_UN_IMM;
10184 return OP_LADD_IMM;
10186 return OP_LSUB_IMM;
10188 return OP_LAND_IMM;
10192 return OP_LXOR_IMM;
10194 return OP_LSHL_IMM;
10196 return OP_LSHR_IMM;
10198 return OP_LSHR_UN_IMM;
10201 return OP_COMPARE_IMM;
10203 return OP_ICOMPARE_IMM;
10205 return OP_LCOMPARE_IMM;
10207 case OP_STORE_MEMBASE_REG:
10208 return OP_STORE_MEMBASE_IMM;
10209 case OP_STOREI1_MEMBASE_REG:
10210 return OP_STOREI1_MEMBASE_IMM;
10211 case OP_STOREI2_MEMBASE_REG:
10212 return OP_STOREI2_MEMBASE_IMM;
10213 case OP_STOREI4_MEMBASE_REG:
10214 return OP_STOREI4_MEMBASE_IMM;
10216 #if defined(TARGET_X86) || defined (TARGET_AMD64)
10218 return OP_X86_PUSH_IMM;
10219 case OP_X86_COMPARE_MEMBASE_REG:
10220 return OP_X86_COMPARE_MEMBASE_IMM;
10222 #if defined(TARGET_AMD64)
10223 case OP_AMD64_ICOMPARE_MEMBASE_REG:
10224 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10226 case OP_VOIDCALL_REG:
10227 return OP_VOIDCALL;
10235 return OP_LOCALLOC_IMM;
10242 ldind_to_load_membase (int opcode)
10246 return OP_LOADI1_MEMBASE;
10248 return OP_LOADU1_MEMBASE;
10250 return OP_LOADI2_MEMBASE;
10252 return OP_LOADU2_MEMBASE;
10254 return OP_LOADI4_MEMBASE;
10256 return OP_LOADU4_MEMBASE;
10258 return OP_LOAD_MEMBASE;
10259 case CEE_LDIND_REF:
10260 return OP_LOAD_MEMBASE;
10262 return OP_LOADI8_MEMBASE;
10264 return OP_LOADR4_MEMBASE;
10266 return OP_LOADR8_MEMBASE;
10268 g_assert_not_reached ();
10275 stind_to_store_membase (int opcode)
10279 return OP_STOREI1_MEMBASE_REG;
10281 return OP_STOREI2_MEMBASE_REG;
10283 return OP_STOREI4_MEMBASE_REG;
10285 case CEE_STIND_REF:
10286 return OP_STORE_MEMBASE_REG;
10288 return OP_STOREI8_MEMBASE_REG;
10290 return OP_STORER4_MEMBASE_REG;
10292 return OP_STORER8_MEMBASE_REG;
10294 g_assert_not_reached ();
10301 mono_load_membase_to_load_mem (int opcode)
10303 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
10304 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10306 case OP_LOAD_MEMBASE:
10307 return OP_LOAD_MEM;
10308 case OP_LOADU1_MEMBASE:
10309 return OP_LOADU1_MEM;
10310 case OP_LOADU2_MEMBASE:
10311 return OP_LOADU2_MEM;
10312 case OP_LOADI4_MEMBASE:
10313 return OP_LOADI4_MEM;
10314 case OP_LOADU4_MEMBASE:
10315 return OP_LOADU4_MEM;
10316 #if SIZEOF_REGISTER == 8
10317 case OP_LOADI8_MEMBASE:
10318 return OP_LOADI8_MEM;
10327 op_to_op_dest_membase (int store_opcode, int opcode)
10329 #if defined(TARGET_X86)
10330 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
10335 return OP_X86_ADD_MEMBASE_REG;
10337 return OP_X86_SUB_MEMBASE_REG;
10339 return OP_X86_AND_MEMBASE_REG;
10341 return OP_X86_OR_MEMBASE_REG;
10343 return OP_X86_XOR_MEMBASE_REG;
10346 return OP_X86_ADD_MEMBASE_IMM;
10349 return OP_X86_SUB_MEMBASE_IMM;
10352 return OP_X86_AND_MEMBASE_IMM;
10355 return OP_X86_OR_MEMBASE_IMM;
10358 return OP_X86_XOR_MEMBASE_IMM;
10364 #if defined(TARGET_AMD64)
10365 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
10370 return OP_X86_ADD_MEMBASE_REG;
10372 return OP_X86_SUB_MEMBASE_REG;
10374 return OP_X86_AND_MEMBASE_REG;
10376 return OP_X86_OR_MEMBASE_REG;
10378 return OP_X86_XOR_MEMBASE_REG;
10380 return OP_X86_ADD_MEMBASE_IMM;
10382 return OP_X86_SUB_MEMBASE_IMM;
10384 return OP_X86_AND_MEMBASE_IMM;
10386 return OP_X86_OR_MEMBASE_IMM;
10388 return OP_X86_XOR_MEMBASE_IMM;
10390 return OP_AMD64_ADD_MEMBASE_REG;
10392 return OP_AMD64_SUB_MEMBASE_REG;
10394 return OP_AMD64_AND_MEMBASE_REG;
10396 return OP_AMD64_OR_MEMBASE_REG;
10398 return OP_AMD64_XOR_MEMBASE_REG;
10401 return OP_AMD64_ADD_MEMBASE_IMM;
10404 return OP_AMD64_SUB_MEMBASE_IMM;
10407 return OP_AMD64_AND_MEMBASE_IMM;
10410 return OP_AMD64_OR_MEMBASE_IMM;
10413 return OP_AMD64_XOR_MEMBASE_IMM;
10423 op_to_op_store_membase (int store_opcode, int opcode)
10425 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10428 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10429 return OP_X86_SETEQ_MEMBASE;
10431 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10432 return OP_X86_SETNE_MEMBASE;
10440 op_to_op_src1_membase (int load_opcode, int opcode)
10443 /* FIXME: This has sign extension issues */
10445 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10446 return OP_X86_COMPARE_MEMBASE8_IMM;
10449 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10454 return OP_X86_PUSH_MEMBASE;
10455 case OP_COMPARE_IMM:
10456 case OP_ICOMPARE_IMM:
10457 return OP_X86_COMPARE_MEMBASE_IMM;
10460 return OP_X86_COMPARE_MEMBASE_REG;
10464 #ifdef TARGET_AMD64
10465 /* FIXME: This has sign extension issues */
10467 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10468 return OP_X86_COMPARE_MEMBASE8_IMM;
10473 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10474 return OP_X86_PUSH_MEMBASE;
10476 /* FIXME: This only works for 32 bit immediates
10477 case OP_COMPARE_IMM:
10478 case OP_LCOMPARE_IMM:
10479 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10480 return OP_AMD64_COMPARE_MEMBASE_IMM;
10482 case OP_ICOMPARE_IMM:
10483 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10484 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10488 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10489 return OP_AMD64_COMPARE_MEMBASE_REG;
10492 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10493 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10502 op_to_op_src2_membase (int load_opcode, int opcode)
10505 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10511 return OP_X86_COMPARE_REG_MEMBASE;
10513 return OP_X86_ADD_REG_MEMBASE;
10515 return OP_X86_SUB_REG_MEMBASE;
10517 return OP_X86_AND_REG_MEMBASE;
10519 return OP_X86_OR_REG_MEMBASE;
10521 return OP_X86_XOR_REG_MEMBASE;
10525 #ifdef TARGET_AMD64
10526 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
10529 return OP_AMD64_ICOMPARE_REG_MEMBASE;
10531 return OP_X86_ADD_REG_MEMBASE;
10533 return OP_X86_SUB_REG_MEMBASE;
10535 return OP_X86_AND_REG_MEMBASE;
10537 return OP_X86_OR_REG_MEMBASE;
10539 return OP_X86_XOR_REG_MEMBASE;
10541 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
10545 return OP_AMD64_COMPARE_REG_MEMBASE;
10547 return OP_AMD64_ADD_REG_MEMBASE;
10549 return OP_AMD64_SUB_REG_MEMBASE;
10551 return OP_AMD64_AND_REG_MEMBASE;
10553 return OP_AMD64_OR_REG_MEMBASE;
10555 return OP_AMD64_XOR_REG_MEMBASE;
10564 mono_op_to_op_imm_noemul (int opcode)
10567 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10573 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10581 return mono_op_to_op_imm (opcode);
10585 #ifndef DISABLE_JIT
10588 * mono_handle_global_vregs:
10590 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10594 mono_handle_global_vregs (MonoCompile *cfg)
10596 gint32 *vreg_to_bb;
10597 MonoBasicBlock *bb;
10600 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
10602 #ifdef MONO_ARCH_SIMD_INTRINSICS
10603 if (cfg->uses_simd_intrinsics)
10604 mono_simd_simplify_indirection (cfg);
10607 /* Find local vregs used in more than one bb */
10608 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10609 MonoInst *ins = bb->code;
10610 int block_num = bb->block_num;
10612 if (cfg->verbose_level > 2)
10613 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
10616 for (; ins; ins = ins->next) {
10617 const char *spec = INS_INFO (ins->opcode);
10618 int regtype = 0, regindex;
10621 if (G_UNLIKELY (cfg->verbose_level > 2))
10622 mono_print_ins (ins);
10624 g_assert (ins->opcode >= MONO_CEE_LAST);
10626 for (regindex = 0; regindex < 4; regindex ++) {
10629 if (regindex == 0) {
10630 regtype = spec [MONO_INST_DEST];
10631 if (regtype == ' ')
10634 } else if (regindex == 1) {
10635 regtype = spec [MONO_INST_SRC1];
10636 if (regtype == ' ')
10639 } else if (regindex == 2) {
10640 regtype = spec [MONO_INST_SRC2];
10641 if (regtype == ' ')
10644 } else if (regindex == 3) {
10645 regtype = spec [MONO_INST_SRC3];
10646 if (regtype == ' ')
10651 #if SIZEOF_REGISTER == 4
10652 /* In the LLVM case, the long opcodes are not decomposed */
10653 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
10655 * Since some instructions reference the original long vreg,
10656 * and some reference the two component vregs, it is quite hard
10657 * to determine when it needs to be global. So be conservative.
10659 if (!get_vreg_to_inst (cfg, vreg)) {
10660 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10662 if (cfg->verbose_level > 2)
10663 printf ("LONG VREG R%d made global.\n", vreg);
10667 * Make the component vregs volatile since the optimizations can
10668 * get confused otherwise.
10670 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
10671 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
10675 g_assert (vreg != -1);
10677 prev_bb = vreg_to_bb [vreg];
10678 if (prev_bb == 0) {
10679 /* 0 is a valid block num */
10680 vreg_to_bb [vreg] = block_num + 1;
10681 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
10682 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
10685 if (!get_vreg_to_inst (cfg, vreg)) {
10686 if (G_UNLIKELY (cfg->verbose_level > 2))
10687 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
10691 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
10694 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10697 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
10700 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
10703 g_assert_not_reached ();
10707 /* Flag as having been used in more than one bb */
10708 vreg_to_bb [vreg] = -1;
10714 /* If a variable is used in only one bblock, convert it into a local vreg */
10715 for (i = 0; i < cfg->num_varinfo; i++) {
10716 MonoInst *var = cfg->varinfo [i];
10717 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
10719 switch (var->type) {
10725 #if SIZEOF_REGISTER == 8
10728 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
10729 /* Enabling this screws up the fp stack on x86 */
10732 /* Arguments are implicitly global */
10733 /* Putting R4 vars into registers doesn't work currently */
10734 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
10736 * Make that the variable's liveness interval doesn't contain a call, since
10737 * that would cause the lvreg to be spilled, making the whole optimization
10740 /* This is too slow for JIT compilation */
10742 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
10744 int def_index, call_index, ins_index;
10745 gboolean spilled = FALSE;
10750 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
10751 const char *spec = INS_INFO (ins->opcode);
10753 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
10754 def_index = ins_index;
10756 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
10757 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
10758 if (call_index > def_index) {
10764 if (MONO_IS_CALL (ins))
10765 call_index = ins_index;
10775 if (G_UNLIKELY (cfg->verbose_level > 2))
10776 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10777 var->flags |= MONO_INST_IS_DEAD;
10778 cfg->vreg_to_inst [var->dreg] = NULL;
10785 * Compress the varinfo and vars tables so the liveness computation is faster and
10786 * takes up less space.
10789 for (i = 0; i < cfg->num_varinfo; ++i) {
10790 MonoInst *var = cfg->varinfo [i];
10791 if (pos < i && cfg->locals_start == i)
10792 cfg->locals_start = pos;
10793 if (!(var->flags & MONO_INST_IS_DEAD)) {
10795 cfg->varinfo [pos] = cfg->varinfo [i];
10796 cfg->varinfo [pos]->inst_c0 = pos;
10797 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10798 cfg->vars [pos].idx = pos;
10799 #if SIZEOF_REGISTER == 4
10800 if (cfg->varinfo [pos]->type == STACK_I8) {
10801 /* Modify the two component vars too */
10804 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10805 var1->inst_c0 = pos;
10806 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10807 var1->inst_c0 = pos;
10814 cfg->num_varinfo = pos;
10815 if (cfg->locals_start > cfg->num_varinfo)
10816 cfg->locals_start = cfg->num_varinfo;
10820 * mono_spill_global_vars:
10822 * Generate spill code for variables which are not allocated to registers,
10823 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10824 * code is generated which could be optimized by the local optimization passes.
10827 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10829 MonoBasicBlock *bb;
10831 int orig_next_vreg;
10832 guint32 *vreg_to_lvreg;
10834 guint32 i, lvregs_len;
10835 gboolean dest_has_lvreg = FALSE;
10836 guint32 stacktypes [128];
10837 MonoInst **live_range_start, **live_range_end;
10838 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
10840 *need_local_opts = FALSE;
10842 memset (spec2, 0, sizeof (spec2));
10844 /* FIXME: Move this function to mini.c */
10845 stacktypes ['i'] = STACK_PTR;
10846 stacktypes ['l'] = STACK_I8;
10847 stacktypes ['f'] = STACK_R8;
10848 #ifdef MONO_ARCH_SIMD_INTRINSICS
10849 stacktypes ['x'] = STACK_VTYPE;
10852 #if SIZEOF_REGISTER == 4
10853 /* Create MonoInsts for longs */
10854 for (i = 0; i < cfg->num_varinfo; i++) {
10855 MonoInst *ins = cfg->varinfo [i];
10857 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10858 switch (ins->type) {
10863 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
10866 g_assert (ins->opcode == OP_REGOFFSET);
10868 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10870 tree->opcode = OP_REGOFFSET;
10871 tree->inst_basereg = ins->inst_basereg;
10872 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10874 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10876 tree->opcode = OP_REGOFFSET;
10877 tree->inst_basereg = ins->inst_basereg;
10878 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10888 /* FIXME: widening and truncation */
10891 * As an optimization, when a variable allocated to the stack is first loaded into
10892 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10893 * the variable again.
10895 orig_next_vreg = cfg->next_vreg;
10896 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10897 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10901 * These arrays contain the first and last instructions accessing a given
10903 * Since we emit bblocks in the same order we process them here, and we
10904 * don't split live ranges, these will precisely describe the live range of
10905 * the variable, i.e. the instruction range where a valid value can be found
10906 * in the variables location.
10907 * The live range is computed using the liveness info computed by the liveness pass.
10908 * We can't use vmv->range, since that is an abstract live range, and we need
10909 * one which is instruction precise.
10910 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
10912 /* FIXME: Only do this if debugging info is requested */
10913 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
10914 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
10915 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10916 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10918 /* Add spill loads/stores */
10919 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10922 if (cfg->verbose_level > 2)
10923 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10925 /* Clear vreg_to_lvreg array */
10926 for (i = 0; i < lvregs_len; i++)
10927 vreg_to_lvreg [lvregs [i]] = 0;
10931 MONO_BB_FOR_EACH_INS (bb, ins) {
10932 const char *spec = INS_INFO (ins->opcode);
10933 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
10934 gboolean store, no_lvreg;
10935 int sregs [MONO_MAX_SRC_REGS];
10937 if (G_UNLIKELY (cfg->verbose_level > 2))
10938 mono_print_ins (ins);
10940 if (ins->opcode == OP_NOP)
10944 * We handle LDADDR here as well, since it can only be decomposed
10945 * when variable addresses are known.
10947 if (ins->opcode == OP_LDADDR) {
10948 MonoInst *var = ins->inst_p0;
10950 if (var->opcode == OP_VTARG_ADDR) {
10951 /* Happens on SPARC/S390 where vtypes are passed by reference */
10952 MonoInst *vtaddr = var->inst_left;
10953 if (vtaddr->opcode == OP_REGVAR) {
10954 ins->opcode = OP_MOVE;
10955 ins->sreg1 = vtaddr->dreg;
10957 else if (var->inst_left->opcode == OP_REGOFFSET) {
10958 ins->opcode = OP_LOAD_MEMBASE;
10959 ins->inst_basereg = vtaddr->inst_basereg;
10960 ins->inst_offset = vtaddr->inst_offset;
10964 g_assert (var->opcode == OP_REGOFFSET);
10966 ins->opcode = OP_ADD_IMM;
10967 ins->sreg1 = var->inst_basereg;
10968 ins->inst_imm = var->inst_offset;
10971 *need_local_opts = TRUE;
10972 spec = INS_INFO (ins->opcode);
10975 if (ins->opcode < MONO_CEE_LAST) {
10976 mono_print_ins (ins);
10977 g_assert_not_reached ();
10981 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10985 if (MONO_IS_STORE_MEMBASE (ins)) {
10986 tmp_reg = ins->dreg;
10987 ins->dreg = ins->sreg2;
10988 ins->sreg2 = tmp_reg;
10991 spec2 [MONO_INST_DEST] = ' ';
10992 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10993 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10994 spec2 [MONO_INST_SRC3] = ' ';
10996 } else if (MONO_IS_STORE_MEMINDEX (ins))
10997 g_assert_not_reached ();
11002 if (G_UNLIKELY (cfg->verbose_level > 2)) {
11003 printf ("\t %.3s %d", spec, ins->dreg);
11004 num_sregs = mono_inst_get_src_registers (ins, sregs);
11005 for (srcindex = 0; srcindex < 3; ++srcindex)
11006 printf (" %d", sregs [srcindex]);
11013 regtype = spec [MONO_INST_DEST];
11014 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
11017 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
11018 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
11019 MonoInst *store_ins;
11021 MonoInst *def_ins = ins;
11022 int dreg = ins->dreg; /* The original vreg */
11024 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
11026 if (var->opcode == OP_REGVAR) {
11027 ins->dreg = var->dreg;
11028 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
11030 * Instead of emitting a load+store, use a _membase opcode.
11032 g_assert (var->opcode == OP_REGOFFSET);
11033 if (ins->opcode == OP_MOVE) {
11037 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
11038 ins->inst_basereg = var->inst_basereg;
11039 ins->inst_offset = var->inst_offset;
11042 spec = INS_INFO (ins->opcode);
11046 g_assert (var->opcode == OP_REGOFFSET);
11048 prev_dreg = ins->dreg;
11050 /* Invalidate any previous lvreg for this vreg */
11051 vreg_to_lvreg [ins->dreg] = 0;
11055 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
11057 store_opcode = OP_STOREI8_MEMBASE_REG;
11060 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
11062 if (regtype == 'l') {
11063 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
11064 mono_bblock_insert_after_ins (bb, ins, store_ins);
11065 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
11066 mono_bblock_insert_after_ins (bb, ins, store_ins);
11067 def_ins = store_ins;
11070 g_assert (store_opcode != OP_STOREV_MEMBASE);
11072 /* Try to fuse the store into the instruction itself */
11073 /* FIXME: Add more instructions */
11074 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
11075 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
11076 ins->inst_imm = ins->inst_c0;
11077 ins->inst_destbasereg = var->inst_basereg;
11078 ins->inst_offset = var->inst_offset;
11079 spec = INS_INFO (ins->opcode);
11080 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
11081 ins->opcode = store_opcode;
11082 ins->inst_destbasereg = var->inst_basereg;
11083 ins->inst_offset = var->inst_offset;
11087 tmp_reg = ins->dreg;
11088 ins->dreg = ins->sreg2;
11089 ins->sreg2 = tmp_reg;
11092 spec2 [MONO_INST_DEST] = ' ';
11093 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11094 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11095 spec2 [MONO_INST_SRC3] = ' ';
11097 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
11098 // FIXME: The backends expect the base reg to be in inst_basereg
11099 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
11101 ins->inst_basereg = var->inst_basereg;
11102 ins->inst_offset = var->inst_offset;
11103 spec = INS_INFO (ins->opcode);
11105 /* printf ("INS: "); mono_print_ins (ins); */
11106 /* Create a store instruction */
11107 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
11109 /* Insert it after the instruction */
11110 mono_bblock_insert_after_ins (bb, ins, store_ins);
11112 def_ins = store_ins;
11115 * We can't assign ins->dreg to var->dreg here, since the
11116 * sregs could use it. So set a flag, and do it after
11119 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
11120 dest_has_lvreg = TRUE;
11125 if (def_ins && !live_range_start [dreg]) {
11126 live_range_start [dreg] = def_ins;
11127 live_range_start_bb [dreg] = bb;
11134 num_sregs = mono_inst_get_src_registers (ins, sregs);
11135 for (srcindex = 0; srcindex < 3; ++srcindex) {
11136 regtype = spec [MONO_INST_SRC1 + srcindex];
11137 sreg = sregs [srcindex];
11139 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
11140 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
11141 MonoInst *var = get_vreg_to_inst (cfg, sreg);
11142 MonoInst *use_ins = ins;
11143 MonoInst *load_ins;
11144 guint32 load_opcode;
11146 if (var->opcode == OP_REGVAR) {
11147 sregs [srcindex] = var->dreg;
11148 //mono_inst_set_src_registers (ins, sregs);
11149 live_range_end [sreg] = use_ins;
11150 live_range_end_bb [sreg] = bb;
11154 g_assert (var->opcode == OP_REGOFFSET);
11156 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
11158 g_assert (load_opcode != OP_LOADV_MEMBASE);
11160 if (vreg_to_lvreg [sreg]) {
11161 g_assert (vreg_to_lvreg [sreg] != -1);
11163 /* The variable is already loaded to an lvreg */
11164 if (G_UNLIKELY (cfg->verbose_level > 2))
11165 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
11166 sregs [srcindex] = vreg_to_lvreg [sreg];
11167 //mono_inst_set_src_registers (ins, sregs);
11171 /* Try to fuse the load into the instruction */
11172 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
11173 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
11174 sregs [0] = var->inst_basereg;
11175 //mono_inst_set_src_registers (ins, sregs);
11176 ins->inst_offset = var->inst_offset;
11177 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
11178 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
11179 sregs [1] = var->inst_basereg;
11180 //mono_inst_set_src_registers (ins, sregs);
11181 ins->inst_offset = var->inst_offset;
11183 if (MONO_IS_REAL_MOVE (ins)) {
11184 ins->opcode = OP_NOP;
11187 //printf ("%d ", srcindex); mono_print_ins (ins);
11189 sreg = alloc_dreg (cfg, stacktypes [regtype]);
11191 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
11192 if (var->dreg == prev_dreg) {
11194 * sreg refers to the value loaded by the load
11195 * emitted below, but we need to use ins->dreg
11196 * since it refers to the store emitted earlier.
11200 g_assert (sreg != -1);
11201 vreg_to_lvreg [var->dreg] = sreg;
11202 g_assert (lvregs_len < 1024);
11203 lvregs [lvregs_len ++] = var->dreg;
11207 sregs [srcindex] = sreg;
11208 //mono_inst_set_src_registers (ins, sregs);
11210 if (regtype == 'l') {
11211 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
11212 mono_bblock_insert_before_ins (bb, ins, load_ins);
11213 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
11214 mono_bblock_insert_before_ins (bb, ins, load_ins);
11215 use_ins = load_ins;
11218 #if SIZEOF_REGISTER == 4
11219 g_assert (load_opcode != OP_LOADI8_MEMBASE);
11221 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
11222 mono_bblock_insert_before_ins (bb, ins, load_ins);
11223 use_ins = load_ins;
11227 if (var->dreg < orig_next_vreg) {
11228 live_range_end [var->dreg] = use_ins;
11229 live_range_end_bb [var->dreg] = bb;
11233 mono_inst_set_src_registers (ins, sregs);
11235 if (dest_has_lvreg) {
11236 g_assert (ins->dreg != -1);
11237 vreg_to_lvreg [prev_dreg] = ins->dreg;
11238 g_assert (lvregs_len < 1024);
11239 lvregs [lvregs_len ++] = prev_dreg;
11240 dest_has_lvreg = FALSE;
11244 tmp_reg = ins->dreg;
11245 ins->dreg = ins->sreg2;
11246 ins->sreg2 = tmp_reg;
11249 if (MONO_IS_CALL (ins)) {
11250 /* Clear vreg_to_lvreg array */
11251 for (i = 0; i < lvregs_len; i++)
11252 vreg_to_lvreg [lvregs [i]] = 0;
11254 } else if (ins->opcode == OP_NOP) {
11256 MONO_INST_NULLIFY_SREGS (ins);
11259 if (cfg->verbose_level > 2)
11260 mono_print_ins_index (1, ins);
11263 /* Extend the live range based on the liveness info */
11264 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
11265 for (i = 0; i < cfg->num_varinfo; i ++) {
11266 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
11268 if (vreg_is_volatile (cfg, vi->vreg))
11269 /* The liveness info is incomplete */
11272 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
11273 /* Live from at least the first ins of this bb */
11274 live_range_start [vi->vreg] = bb->code;
11275 live_range_start_bb [vi->vreg] = bb;
11278 if (mono_bitset_test_fast (bb->live_out_set, i)) {
11279 /* Live at least until the last ins of this bb */
11280 live_range_end [vi->vreg] = bb->last_ins;
11281 live_range_end_bb [vi->vreg] = bb;
11287 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
11289 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
11290 * by storing the current native offset into MonoMethodVar->live_range_start/end.
11292 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
11293 for (i = 0; i < cfg->num_varinfo; ++i) {
11294 int vreg = MONO_VARINFO (cfg, i)->vreg;
11297 if (live_range_start [vreg]) {
11298 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
11300 ins->inst_c1 = vreg;
11301 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
11303 if (live_range_end [vreg]) {
11304 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
11306 ins->inst_c1 = vreg;
11307 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
11308 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
11310 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
11316 g_free (live_range_start);
11317 g_free (live_range_end);
11318 g_free (live_range_start_bb);
11319 g_free (live_range_end_bb);
11324 * - use 'iadd' instead of 'int_add'
11325 * - handling ovf opcodes: decompose in method_to_ir.
11326 * - unify iregs/fregs
11327 * -> partly done, the missing parts are:
11328 * - a more complete unification would involve unifying the hregs as well, so
11329 * code wouldn't need if (fp) all over the place. but that would mean the hregs
11330 * would no longer map to the machine hregs, so the code generators would need to
11331 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
11332 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
11333 * fp/non-fp branches speeds it up by about 15%.
11334 * - use sext/zext opcodes instead of shifts
11336 * - get rid of TEMPLOADs if possible and use vregs instead
11337 * - clean up usage of OP_P/OP_ opcodes
11338 * - cleanup usage of DUMMY_USE
11339 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
11341 * - set the stack type and allocate a dreg in the EMIT_NEW macros
11342 * - get rid of all the <foo>2 stuff when the new JIT is ready.
11343 * - make sure handle_stack_args () is called before the branch is emitted
11344 * - when the new IR is done, get rid of all unused stuff
11345 * - COMPARE/BEQ as separate instructions or unify them ?
11346 * - keeping them separate allows specialized compare instructions like
11347 * compare_imm, compare_membase
11348 * - most back ends unify fp compare+branch, fp compare+ceq
11349 * - integrate mono_save_args into inline_method
11350 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
11351 * - handle long shift opts on 32 bit platforms somehow: they require
11352 * 3 sregs (2 for arg1 and 1 for arg2)
11353 * - make byref a 'normal' type.
11354 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
11355 * variable if needed.
11356 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
11357 * like inline_method.
11358 * - remove inlining restrictions
11359 * - fix LNEG and enable cfold of INEG
11360 * - generalize x86 optimizations like ldelema as a peephole optimization
11361 * - add store_mem_imm for amd64
11362 * - optimize the loading of the interruption flag in the managed->native wrappers
11363 * - avoid special handling of OP_NOP in passes
11364 * - move code inserting instructions into one function/macro.
11365 * - try a coalescing phase after liveness analysis
11366 * - add float -> vreg conversion + local optimizations on !x86
11367 * - figure out how to handle decomposed branches during optimizations, ie.
11368 * compare+branch, op_jump_table+op_br etc.
11369 * - promote RuntimeXHandles to vregs
11370 * - vtype cleanups:
11371 * - add a NEW_VARLOADA_VREG macro
11372 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11373 * accessing vtype fields.
11374 * - get rid of I8CONST on 64 bit platforms
11375 * - dealing with the increase in code size due to branches created during opcode
11377 * - use extended basic blocks
11378 * - all parts of the JIT
11379 * - handle_global_vregs () && local regalloc
11380 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
11381 * - sources of increase in code size:
11384 * - isinst and castclass
11385 * - lvregs not allocated to global registers even if used multiple times
11386 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
11388 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
11389 * - add all micro optimizations from the old JIT
11390 * - put tree optimizations into the deadce pass
11391 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
11392 * specific function.
11393 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
11394 * fcompare + branchCC.
11395 * - create a helper function for allocating a stack slot, taking into account
11396 * MONO_CFG_HAS_SPILLUP.
11398 * - merge the ia64 switch changes.
11399 * - optimize mono_regstate2_alloc_int/float.
11400 * - fix the pessimistic handling of variables accessed in exception handler blocks.
11401 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
11402 * parts of the tree could be separated by other instructions, killing the tree
11403 * arguments, or stores killing loads etc. Also, should we fold loads into other
11404 * instructions if the result of the load is used multiple times ?
11405 * - make the REM_IMM optimization in mini-x86.c arch-independent.
11406 * - LAST MERGE: 108395.
11407 * - when returning vtypes in registers, generate IR and append it to the end of the
11408 * last bb instead of doing it in the epilog.
11409 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
11417 - When to decompose opcodes:
11418 - earlier: this makes some optimizations hard to implement, since the low level IR
11419 no longer contains the neccessary information. But it is easier to do.
11420 - later: harder to implement, enables more optimizations.
11421 - Branches inside bblocks:
11422 - created when decomposing complex opcodes.
11423 - branches to another bblock: harmless, but not tracked by the branch
11424 optimizations, so need to branch to a label at the start of the bblock.
11425 - branches to inside the same bblock: very problematic, trips up the local
11426 reg allocator. Can be fixed by spitting the current bblock, but that is a
11427 complex operation, since some local vregs can become global vregs etc.
11428 - Local/global vregs:
11429 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11430 local register allocator.
11431 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11432 structure, created by mono_create_var (). Assigned to hregs or the stack by
11433 the global register allocator.
11434 - When to do optimizations like alu->alu_imm:
11435 - earlier -> saves work later on since the IR will be smaller/simpler
11436 - later -> can work on more instructions
11437 - Handling of valuetypes:
11438 - When a vtype is pushed on the stack, a new temporary is created, an
11439 instruction computing its address (LDADDR) is emitted and pushed on
11440 the stack. Need to optimize cases when the vtype is used immediately as in
11441 argument passing, stloc etc.
11442 - Instead of the to_end stuff in the old JIT, simply call the function handling
11443 the values on the stack before emitting the last instruction of the bb.
11446 #endif /* DISABLE_JIT */