2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
30 #include <mono/utils/memcheck.h>
32 #include <mono/metadata/assembly.h>
33 #include <mono/metadata/loader.h>
34 #include <mono/metadata/tabledefs.h>
35 #include <mono/metadata/class.h>
36 #include <mono/metadata/object.h>
37 #include <mono/metadata/exception.h>
38 #include <mono/metadata/opcodes.h>
39 #include <mono/metadata/mono-endian.h>
40 #include <mono/metadata/tokentype.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/marshal.h>
43 #include <mono/metadata/debug-helpers.h>
44 #include <mono/metadata/mono-debug.h>
45 #include <mono/metadata/gc-internal.h>
46 #include <mono/metadata/security-manager.h>
47 #include <mono/metadata/threads-types.h>
48 #include <mono/metadata/security-core-clr.h>
49 #include <mono/metadata/monitor.h>
50 #include <mono/metadata/profiler-private.h>
51 #include <mono/metadata/profiler.h>
52 #include <mono/utils/mono-compiler.h>
53 #include <mono/metadata/mono-basic-block.h>
60 #include "jit-icalls.h"
62 #include "debugger-agent.h"
64 #define BRANCH_COST 10
65 #define INLINE_LENGTH_LIMIT 20
66 #define INLINE_FAILURE do {\
67 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
70 #define CHECK_CFG_EXCEPTION do {\
71 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
74 #define METHOD_ACCESS_FAILURE do { \
75 char *method_fname = mono_method_full_name (method, TRUE); \
76 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
77 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
78 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
79 g_free (method_fname); \
80 g_free (cil_method_fname); \
81 goto exception_exit; \
83 #define FIELD_ACCESS_FAILURE do { \
84 char *method_fname = mono_method_full_name (method, TRUE); \
85 char *field_fname = mono_field_full_name (field); \
86 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
87 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
88 g_free (method_fname); \
89 g_free (field_fname); \
90 goto exception_exit; \
92 #define GENERIC_SHARING_FAILURE(opcode) do { \
93 if (cfg->generic_sharing_context) { \
94 if (cfg->verbose_level > 2) \
95 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
96 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
97 goto exception_exit; \
101 /* Determine whenever 'ins' represents a load of the 'this' argument */
102 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
104 static int ldind_to_load_membase (int opcode);
105 static int stind_to_store_membase (int opcode);
107 int mono_op_to_op_imm (int opcode);
108 int mono_op_to_op_imm_noemul (int opcode);
110 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
111 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
112 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
114 /* helper methods signature */
115 extern MonoMethodSignature *helper_sig_class_init_trampoline;
116 extern MonoMethodSignature *helper_sig_domain_get;
117 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
118 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm;
119 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
120 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
121 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
124 * Instruction metadata
132 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
133 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
139 #if SIZEOF_REGISTER == 8
144 /* keep in sync with the enum in mini.h */
147 #include "mini-ops.h"
152 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
153 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
155 * This should contain the index of the last sreg + 1. This is not the same
156 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
158 const gint8 ins_sreg_counts[] = {
159 #include "mini-ops.h"
164 #define MONO_INIT_VARINFO(vi,id) do { \
165 (vi)->range.first_use.pos.bid = 0xffff; \
171 mono_inst_set_src_registers (MonoInst *ins, int *regs)
173 ins->sreg1 = regs [0];
174 ins->sreg2 = regs [1];
175 ins->sreg3 = regs [2];
179 mono_alloc_ireg (MonoCompile *cfg)
181 return alloc_ireg (cfg);
185 mono_alloc_freg (MonoCompile *cfg)
187 return alloc_freg (cfg);
191 mono_alloc_preg (MonoCompile *cfg)
193 return alloc_preg (cfg);
197 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
199 return alloc_dreg (cfg, stack_type);
203 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
209 switch (type->type) {
212 case MONO_TYPE_BOOLEAN:
224 case MONO_TYPE_FNPTR:
226 case MONO_TYPE_CLASS:
227 case MONO_TYPE_STRING:
228 case MONO_TYPE_OBJECT:
229 case MONO_TYPE_SZARRAY:
230 case MONO_TYPE_ARRAY:
234 #if SIZEOF_REGISTER == 8
243 case MONO_TYPE_VALUETYPE:
244 if (type->data.klass->enumtype) {
245 type = mono_class_enum_basetype (type->data.klass);
248 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
251 case MONO_TYPE_TYPEDBYREF:
253 case MONO_TYPE_GENERICINST:
254 type = &type->data.generic_class->container_class->byval_arg;
258 g_assert (cfg->generic_sharing_context);
261 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
267 mono_print_bb (MonoBasicBlock *bb, const char *msg)
272 printf ("\n%s %d: [IN: ", msg, bb->block_num);
273 for (i = 0; i < bb->in_count; ++i)
274 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
276 for (i = 0; i < bb->out_count; ++i)
277 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
279 for (tree = bb->code; tree; tree = tree->next)
280 mono_print_ins_index (-1, tree);
284 * Can't put this at the beginning, since other files reference stuff from this
289 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
291 #define GET_BBLOCK(cfg,tblock,ip) do { \
292 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
294 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
295 NEW_BBLOCK (cfg, (tblock)); \
296 (tblock)->cil_code = (ip); \
297 ADD_BBLOCK (cfg, (tblock)); \
301 #if defined(TARGET_X86) || defined(TARGET_AMD64)
302 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
303 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
304 (dest)->dreg = alloc_preg ((cfg)); \
305 (dest)->sreg1 = (sr1); \
306 (dest)->sreg2 = (sr2); \
307 (dest)->inst_imm = (imm); \
308 (dest)->backend.shift_amount = (shift); \
309 MONO_ADD_INS ((cfg)->cbb, (dest)); \
313 #if SIZEOF_REGISTER == 8
314 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
315 /* FIXME: Need to add many more cases */ \
316 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
318 int dr = alloc_preg (cfg); \
319 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
320 (ins)->sreg2 = widen->dreg; \
324 #define ADD_WIDEN_OP(ins, arg1, arg2)
327 #define ADD_BINOP(op) do { \
328 MONO_INST_NEW (cfg, ins, (op)); \
330 ins->sreg1 = sp [0]->dreg; \
331 ins->sreg2 = sp [1]->dreg; \
332 type_from_op (ins, sp [0], sp [1]); \
334 /* Have to insert a widening op */ \
335 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
336 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
337 MONO_ADD_INS ((cfg)->cbb, (ins)); \
338 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
341 #define ADD_UNOP(op) do { \
342 MONO_INST_NEW (cfg, ins, (op)); \
344 ins->sreg1 = sp [0]->dreg; \
345 type_from_op (ins, sp [0], NULL); \
347 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
348 MONO_ADD_INS ((cfg)->cbb, (ins)); \
349 *sp++ = mono_decompose_opcode (cfg, ins); \
352 #define ADD_BINCOND(next_block) do { \
355 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
356 cmp->sreg1 = sp [0]->dreg; \
357 cmp->sreg2 = sp [1]->dreg; \
358 type_from_op (cmp, sp [0], sp [1]); \
360 type_from_op (ins, sp [0], sp [1]); \
361 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
362 GET_BBLOCK (cfg, tblock, target); \
363 link_bblock (cfg, bblock, tblock); \
364 ins->inst_true_bb = tblock; \
365 if ((next_block)) { \
366 link_bblock (cfg, bblock, (next_block)); \
367 ins->inst_false_bb = (next_block); \
368 start_new_bblock = 1; \
370 GET_BBLOCK (cfg, tblock, ip); \
371 link_bblock (cfg, bblock, tblock); \
372 ins->inst_false_bb = tblock; \
373 start_new_bblock = 2; \
375 if (sp != stack_start) { \
376 handle_stack_args (cfg, stack_start, sp - stack_start); \
377 CHECK_UNVERIFIABLE (cfg); \
379 MONO_ADD_INS (bblock, cmp); \
380 MONO_ADD_INS (bblock, ins); \
384 * link_bblock: Links two basic blocks
386 * links two basic blocks in the control flow graph, the 'from'
387 * argument is the starting block and the 'to' argument is the block
388 * the control flow ends to after 'from'.
391 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
393 MonoBasicBlock **newa;
397 if (from->cil_code) {
399 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
401 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
404 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
406 printf ("edge from entry to exit\n");
411 for (i = 0; i < from->out_count; ++i) {
412 if (to == from->out_bb [i]) {
418 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
419 for (i = 0; i < from->out_count; ++i) {
420 newa [i] = from->out_bb [i];
428 for (i = 0; i < to->in_count; ++i) {
429 if (from == to->in_bb [i]) {
435 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
436 for (i = 0; i < to->in_count; ++i) {
437 newa [i] = to->in_bb [i];
446 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
448 link_bblock (cfg, from, to);
452 * mono_find_block_region:
454 * We mark each basic block with a region ID. We use that to avoid BB
455 * optimizations when blocks are in different regions.
458 * A region token that encodes where this region is, and information
459 * about the clause owner for this block.
461 * The region encodes the try/catch/filter clause that owns this block
462 * as well as the type. -1 is a special value that represents a block
463 * that is in none of try/catch/filter.
466 mono_find_block_region (MonoCompile *cfg, int offset)
468 MonoMethodHeader *header = cfg->header;
469 MonoExceptionClause *clause;
472 for (i = 0; i < header->num_clauses; ++i) {
473 clause = &header->clauses [i];
474 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
475 (offset < (clause->handler_offset)))
476 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
478 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
479 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
480 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
481 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
482 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
484 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
487 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
488 return ((i + 1) << 8) | clause->flags;
495 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
497 MonoMethodHeader *header = cfg->header;
498 MonoExceptionClause *clause;
502 for (i = 0; i < header->num_clauses; ++i) {
503 clause = &header->clauses [i];
504 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
505 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
506 if (clause->flags == type)
507 res = g_list_append (res, clause);
514 mono_create_spvar_for_region (MonoCompile *cfg, int region)
518 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
522 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
523 /* prevent it from being register allocated */
524 var->flags |= MONO_INST_INDIRECT;
526 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
530 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
532 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
536 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
540 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
544 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
545 /* prevent it from being register allocated */
546 var->flags |= MONO_INST_INDIRECT;
548 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
554 * Returns the type used in the eval stack when @type is loaded.
555 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
558 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
562 inst->klass = klass = mono_class_from_mono_type (type);
564 inst->type = STACK_MP;
569 switch (type->type) {
571 inst->type = STACK_INV;
575 case MONO_TYPE_BOOLEAN:
581 inst->type = STACK_I4;
586 case MONO_TYPE_FNPTR:
587 inst->type = STACK_PTR;
589 case MONO_TYPE_CLASS:
590 case MONO_TYPE_STRING:
591 case MONO_TYPE_OBJECT:
592 case MONO_TYPE_SZARRAY:
593 case MONO_TYPE_ARRAY:
594 inst->type = STACK_OBJ;
598 inst->type = STACK_I8;
602 inst->type = STACK_R8;
604 case MONO_TYPE_VALUETYPE:
605 if (type->data.klass->enumtype) {
606 type = mono_class_enum_basetype (type->data.klass);
610 inst->type = STACK_VTYPE;
613 case MONO_TYPE_TYPEDBYREF:
614 inst->klass = mono_defaults.typed_reference_class;
615 inst->type = STACK_VTYPE;
617 case MONO_TYPE_GENERICINST:
618 type = &type->data.generic_class->container_class->byval_arg;
621 case MONO_TYPE_MVAR :
622 /* FIXME: all the arguments must be references for now,
623 * later look inside cfg and see if the arg num is
626 g_assert (cfg->generic_sharing_context);
627 inst->type = STACK_OBJ;
630 g_error ("unknown type 0x%02x in eval stack type", type->type);
635 * The following tables are used to quickly validate the IL code in type_from_op ().
638 bin_num_table [STACK_MAX] [STACK_MAX] = {
639 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
640 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
641 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
642 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
643 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
644 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
645 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
646 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
651 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
654 /* reduce the size of this table */
656 bin_int_table [STACK_MAX] [STACK_MAX] = {
657 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
658 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
659 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
660 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
661 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
662 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
663 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
664 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
668 bin_comp_table [STACK_MAX] [STACK_MAX] = {
669 /* Inv i L p F & O vt */
671 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
672 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
673 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
674 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
675 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
676 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
677 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
680 /* reduce the size of this table */
682 shift_table [STACK_MAX] [STACK_MAX] = {
683 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
684 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
685 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
686 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
687 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
688 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
689 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
690 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
694 * Tables to map from the non-specific opcode to the matching
695 * type-specific opcode.
697 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
699 binops_op_map [STACK_MAX] = {
700 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
703 /* handles from CEE_NEG to CEE_CONV_U8 */
705 unops_op_map [STACK_MAX] = {
706 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
709 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
711 ovfops_op_map [STACK_MAX] = {
712 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
715 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
717 ovf2ops_op_map [STACK_MAX] = {
718 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
721 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
723 ovf3ops_op_map [STACK_MAX] = {
724 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
727 /* handles from CEE_BEQ to CEE_BLT_UN */
729 beqops_op_map [STACK_MAX] = {
730 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
733 /* handles from CEE_CEQ to CEE_CLT_UN */
735 ceqops_op_map [STACK_MAX] = {
736 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
740 * Sets ins->type (the type on the eval stack) according to the
741 * type of the opcode and the arguments to it.
742 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
744 * FIXME: this function sets ins->type unconditionally in some cases, but
745 * it should set it to invalid for some types (a conv.x on an object)
748 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
750 switch (ins->opcode) {
757 /* FIXME: check unverifiable args for STACK_MP */
758 ins->type = bin_num_table [src1->type] [src2->type];
759 ins->opcode += binops_op_map [ins->type];
766 ins->type = bin_int_table [src1->type] [src2->type];
767 ins->opcode += binops_op_map [ins->type];
772 ins->type = shift_table [src1->type] [src2->type];
773 ins->opcode += binops_op_map [ins->type];
778 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
779 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
780 ins->opcode = OP_LCOMPARE;
781 else if (src1->type == STACK_R8)
782 ins->opcode = OP_FCOMPARE;
784 ins->opcode = OP_ICOMPARE;
786 case OP_ICOMPARE_IMM:
787 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
788 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
789 ins->opcode = OP_LCOMPARE_IMM;
801 ins->opcode += beqops_op_map [src1->type];
804 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
805 ins->opcode += ceqops_op_map [src1->type];
811 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
812 ins->opcode += ceqops_op_map [src1->type];
816 ins->type = neg_table [src1->type];
817 ins->opcode += unops_op_map [ins->type];
820 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
821 ins->type = src1->type;
823 ins->type = STACK_INV;
824 ins->opcode += unops_op_map [ins->type];
830 ins->type = STACK_I4;
831 ins->opcode += unops_op_map [src1->type];
834 ins->type = STACK_R8;
835 switch (src1->type) {
838 ins->opcode = OP_ICONV_TO_R_UN;
841 ins->opcode = OP_LCONV_TO_R_UN;
845 case CEE_CONV_OVF_I1:
846 case CEE_CONV_OVF_U1:
847 case CEE_CONV_OVF_I2:
848 case CEE_CONV_OVF_U2:
849 case CEE_CONV_OVF_I4:
850 case CEE_CONV_OVF_U4:
851 ins->type = STACK_I4;
852 ins->opcode += ovf3ops_op_map [src1->type];
854 case CEE_CONV_OVF_I_UN:
855 case CEE_CONV_OVF_U_UN:
856 ins->type = STACK_PTR;
857 ins->opcode += ovf2ops_op_map [src1->type];
859 case CEE_CONV_OVF_I1_UN:
860 case CEE_CONV_OVF_I2_UN:
861 case CEE_CONV_OVF_I4_UN:
862 case CEE_CONV_OVF_U1_UN:
863 case CEE_CONV_OVF_U2_UN:
864 case CEE_CONV_OVF_U4_UN:
865 ins->type = STACK_I4;
866 ins->opcode += ovf2ops_op_map [src1->type];
869 ins->type = STACK_PTR;
870 switch (src1->type) {
872 ins->opcode = OP_ICONV_TO_U;
876 #if SIZEOF_REGISTER == 8
877 ins->opcode = OP_LCONV_TO_U;
879 ins->opcode = OP_MOVE;
883 ins->opcode = OP_LCONV_TO_U;
886 ins->opcode = OP_FCONV_TO_U;
892 ins->type = STACK_I8;
893 ins->opcode += unops_op_map [src1->type];
895 case CEE_CONV_OVF_I8:
896 case CEE_CONV_OVF_U8:
897 ins->type = STACK_I8;
898 ins->opcode += ovf3ops_op_map [src1->type];
900 case CEE_CONV_OVF_U8_UN:
901 case CEE_CONV_OVF_I8_UN:
902 ins->type = STACK_I8;
903 ins->opcode += ovf2ops_op_map [src1->type];
907 ins->type = STACK_R8;
908 ins->opcode += unops_op_map [src1->type];
911 ins->type = STACK_R8;
915 ins->type = STACK_I4;
916 ins->opcode += ovfops_op_map [src1->type];
921 ins->type = STACK_PTR;
922 ins->opcode += ovfops_op_map [src1->type];
930 ins->type = bin_num_table [src1->type] [src2->type];
931 ins->opcode += ovfops_op_map [src1->type];
932 if (ins->type == STACK_R8)
933 ins->type = STACK_INV;
935 case OP_LOAD_MEMBASE:
936 ins->type = STACK_PTR;
938 case OP_LOADI1_MEMBASE:
939 case OP_LOADU1_MEMBASE:
940 case OP_LOADI2_MEMBASE:
941 case OP_LOADU2_MEMBASE:
942 case OP_LOADI4_MEMBASE:
943 case OP_LOADU4_MEMBASE:
944 ins->type = STACK_PTR;
946 case OP_LOADI8_MEMBASE:
947 ins->type = STACK_I8;
949 case OP_LOADR4_MEMBASE:
950 case OP_LOADR8_MEMBASE:
951 ins->type = STACK_R8;
954 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
958 if (ins->type == STACK_MP)
959 ins->klass = mono_defaults.object_class;
964 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
970 param_table [STACK_MAX] [STACK_MAX] = {
975 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
979 switch (args->type) {
989 for (i = 0; i < sig->param_count; ++i) {
990 switch (args [i].type) {
994 if (!sig->params [i]->byref)
998 if (sig->params [i]->byref)
1000 switch (sig->params [i]->type) {
1001 case MONO_TYPE_CLASS:
1002 case MONO_TYPE_STRING:
1003 case MONO_TYPE_OBJECT:
1004 case MONO_TYPE_SZARRAY:
1005 case MONO_TYPE_ARRAY:
1012 if (sig->params [i]->byref)
1014 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1023 /*if (!param_table [args [i].type] [sig->params [i]->type])
1031 * When we need a pointer to the current domain many times in a method, we
1032 * call mono_domain_get() once and we store the result in a local variable.
1033 * This function returns the variable that represents the MonoDomain*.
1035 inline static MonoInst *
1036 mono_get_domainvar (MonoCompile *cfg)
1038 if (!cfg->domainvar)
1039 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1040 return cfg->domainvar;
1044 * The got_var contains the address of the Global Offset Table when AOT
1048 mono_get_got_var (MonoCompile *cfg)
1050 #ifdef MONO_ARCH_NEED_GOT_VAR
1051 if (!cfg->compile_aot)
1053 if (!cfg->got_var) {
1054 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1056 return cfg->got_var;
1063 mono_get_vtable_var (MonoCompile *cfg)
1065 g_assert (cfg->generic_sharing_context);
1067 if (!cfg->rgctx_var) {
1068 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1069 /* force the var to be stack allocated */
1070 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1073 return cfg->rgctx_var;
1077 type_from_stack_type (MonoInst *ins) {
1078 switch (ins->type) {
1079 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1080 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1081 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1082 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1084 return &ins->klass->this_arg;
1085 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1086 case STACK_VTYPE: return &ins->klass->byval_arg;
1088 g_error ("stack type %d to monotype not handled\n", ins->type);
1093 static G_GNUC_UNUSED int
1094 type_to_stack_type (MonoType *t)
1096 t = mono_type_get_underlying_type (t);
1100 case MONO_TYPE_BOOLEAN:
1103 case MONO_TYPE_CHAR:
1110 case MONO_TYPE_FNPTR:
1112 case MONO_TYPE_CLASS:
1113 case MONO_TYPE_STRING:
1114 case MONO_TYPE_OBJECT:
1115 case MONO_TYPE_SZARRAY:
1116 case MONO_TYPE_ARRAY:
1124 case MONO_TYPE_VALUETYPE:
1125 case MONO_TYPE_TYPEDBYREF:
1127 case MONO_TYPE_GENERICINST:
1128 if (mono_type_generic_inst_is_valuetype (t))
1134 g_assert_not_reached ();
1141 array_access_to_klass (int opcode)
1145 return mono_defaults.byte_class;
1147 return mono_defaults.uint16_class;
1150 return mono_defaults.int_class;
1153 return mono_defaults.sbyte_class;
1156 return mono_defaults.int16_class;
1159 return mono_defaults.int32_class;
1161 return mono_defaults.uint32_class;
1164 return mono_defaults.int64_class;
1167 return mono_defaults.single_class;
1170 return mono_defaults.double_class;
1171 case CEE_LDELEM_REF:
1172 case CEE_STELEM_REF:
1173 return mono_defaults.object_class;
1175 g_assert_not_reached ();
1181 * We try to share variables when possible
1184 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1189 /* inlining can result in deeper stacks */
1190 if (slot >= cfg->header->max_stack)
1191 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1193 pos = ins->type - 1 + slot * STACK_MAX;
1195 switch (ins->type) {
1202 if ((vnum = cfg->intvars [pos]))
1203 return cfg->varinfo [vnum];
1204 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1205 cfg->intvars [pos] = res->inst_c0;
1208 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1214 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1217 * Don't use this if a generic_context is set, since that means AOT can't
1218 * look up the method using just the image+token.
1219 * table == 0 means this is a reference made from a wrapper.
1221 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1222 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1223 jump_info_token->image = image;
1224 jump_info_token->token = token;
1225 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1230 * This function is called to handle items that are left on the evaluation stack
1231 * at basic block boundaries. What happens is that we save the values to local variables
1232 * and we reload them later when first entering the target basic block (with the
1233 * handle_loaded_temps () function).
1234 * A single joint point will use the same variables (stored in the array bb->out_stack or
1235 * bb->in_stack, if the basic block is before or after the joint point).
1237 * This function needs to be called _before_ emitting the last instruction of
1238 * the bb (i.e. before emitting a branch).
1239 * If the stack merge fails at a join point, cfg->unverifiable is set.
1242 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1245 MonoBasicBlock *bb = cfg->cbb;
1246 MonoBasicBlock *outb;
1247 MonoInst *inst, **locals;
1252 if (cfg->verbose_level > 3)
1253 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1254 if (!bb->out_scount) {
1255 bb->out_scount = count;
1256 //printf ("bblock %d has out:", bb->block_num);
1258 for (i = 0; i < bb->out_count; ++i) {
1259 outb = bb->out_bb [i];
1260 /* exception handlers are linked, but they should not be considered for stack args */
1261 if (outb->flags & BB_EXCEPTION_HANDLER)
1263 //printf (" %d", outb->block_num);
1264 if (outb->in_stack) {
1266 bb->out_stack = outb->in_stack;
1272 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1273 for (i = 0; i < count; ++i) {
1275 * try to reuse temps already allocated for this purpouse, if they occupy the same
1276 * stack slot and if they are of the same type.
1277 * This won't cause conflicts since if 'local' is used to
1278 * store one of the values in the in_stack of a bblock, then
1279 * the same variable will be used for the same outgoing stack
1281 * This doesn't work when inlining methods, since the bblocks
1282 * in the inlined methods do not inherit their in_stack from
1283 * the bblock they are inlined to. See bug #58863 for an
1286 if (cfg->inlined_method)
1287 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1289 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1294 for (i = 0; i < bb->out_count; ++i) {
1295 outb = bb->out_bb [i];
1296 /* exception handlers are linked, but they should not be considered for stack args */
1297 if (outb->flags & BB_EXCEPTION_HANDLER)
1299 if (outb->in_scount) {
1300 if (outb->in_scount != bb->out_scount) {
1301 cfg->unverifiable = TRUE;
1304 continue; /* check they are the same locals */
1306 outb->in_scount = count;
1307 outb->in_stack = bb->out_stack;
1310 locals = bb->out_stack;
1312 for (i = 0; i < count; ++i) {
1313 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1314 inst->cil_code = sp [i]->cil_code;
1315 sp [i] = locals [i];
1316 if (cfg->verbose_level > 3)
1317 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1321 * It is possible that the out bblocks already have in_stack assigned, and
1322 * the in_stacks differ. In this case, we will store to all the different
1329 /* Find a bblock which has a different in_stack */
1331 while (bindex < bb->out_count) {
1332 outb = bb->out_bb [bindex];
1333 /* exception handlers are linked, but they should not be considered for stack args */
1334 if (outb->flags & BB_EXCEPTION_HANDLER) {
1338 if (outb->in_stack != locals) {
1339 for (i = 0; i < count; ++i) {
1340 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1341 inst->cil_code = sp [i]->cil_code;
1342 sp [i] = locals [i];
1343 if (cfg->verbose_level > 3)
1344 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1346 locals = outb->in_stack;
1355 /* Emit code which loads interface_offsets [klass->interface_id]
1356 * The array is stored in memory before vtable.
1359 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1361 if (cfg->compile_aot) {
1362 int ioffset_reg = alloc_preg (cfg);
1363 int iid_reg = alloc_preg (cfg);
1365 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1366 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1367 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1370 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1375 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1377 int ibitmap_reg = alloc_preg (cfg);
1378 #ifdef COMPRESSED_INTERFACE_BITMAP
1380 MonoInst *res, *ins;
1381 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1382 MONO_ADD_INS (cfg->cbb, ins);
1384 if (cfg->compile_aot)
1385 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1387 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1388 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1389 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1391 int ibitmap_byte_reg = alloc_preg (cfg);
1393 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1395 if (cfg->compile_aot) {
1396 int iid_reg = alloc_preg (cfg);
1397 int shifted_iid_reg = alloc_preg (cfg);
1398 int ibitmap_byte_address_reg = alloc_preg (cfg);
1399 int masked_iid_reg = alloc_preg (cfg);
1400 int iid_one_bit_reg = alloc_preg (cfg);
1401 int iid_bit_reg = alloc_preg (cfg);
1402 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1403 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1404 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1405 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1406 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1407 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1408 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1409 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1411 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1412 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1418 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1419 * stored in "klass_reg" implements the interface "klass".
1422 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1424 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1428 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1429 * stored in "vtable_reg" implements the interface "klass".
1432 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1434 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1438 * Emit code which checks whenever the interface id of @klass is smaller than
1439 * than the value given by max_iid_reg.
1442 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1443 MonoBasicBlock *false_target)
1445 if (cfg->compile_aot) {
1446 int iid_reg = alloc_preg (cfg);
1447 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1448 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1451 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1453 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1455 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1458 /* Same as above, but obtains max_iid from a vtable */
1460 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1461 MonoBasicBlock *false_target)
1463 int max_iid_reg = alloc_preg (cfg);
1465 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1466 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1469 /* Same as above, but obtains max_iid from a klass */
1471 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1472 MonoBasicBlock *false_target)
1474 int max_iid_reg = alloc_preg (cfg);
1476 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1477 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1481 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1483 int idepth_reg = alloc_preg (cfg);
1484 int stypes_reg = alloc_preg (cfg);
1485 int stype = alloc_preg (cfg);
1487 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1488 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1489 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1490 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1492 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1493 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1495 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1496 } else if (cfg->compile_aot) {
1497 int const_reg = alloc_preg (cfg);
1498 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1499 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1501 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1503 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1507 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1509 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1513 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1515 int intf_reg = alloc_preg (cfg);
1517 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1518 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1519 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1521 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1523 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1527 * Variant of the above that takes a register to the class, not the vtable.
1530 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1532 int intf_bit_reg = alloc_preg (cfg);
1534 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1535 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1536 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1538 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1540 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1544 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1547 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1548 } else if (cfg->compile_aot) {
1549 int const_reg = alloc_preg (cfg);
1550 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1551 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1553 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1555 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1559 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1561 return mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1565 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1567 if (cfg->compile_aot) {
1568 int const_reg = alloc_preg (cfg);
1569 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1570 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1572 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1574 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1578 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1581 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1584 int rank_reg = alloc_preg (cfg);
1585 int eclass_reg = alloc_preg (cfg);
1587 g_assert (!klass_inst);
1588 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1589 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1590 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1591 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1592 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1593 if (klass->cast_class == mono_defaults.object_class) {
1594 int parent_reg = alloc_preg (cfg);
1595 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1596 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1597 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1598 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1599 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1600 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1601 } else if (klass->cast_class == mono_defaults.enum_class) {
1602 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1603 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1604 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1606 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1607 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1610 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1611 /* Check that the object is a vector too */
1612 int bounds_reg = alloc_preg (cfg);
1613 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1614 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1615 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1618 int idepth_reg = alloc_preg (cfg);
1619 int stypes_reg = alloc_preg (cfg);
1620 int stype = alloc_preg (cfg);
1622 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1623 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1624 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1625 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1627 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1628 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1629 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1634 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1636 return mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1640 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1644 g_assert (val == 0);
1649 if ((size <= 4) && (size <= align)) {
1652 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1655 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1658 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1660 #if SIZEOF_REGISTER == 8
1662 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1668 val_reg = alloc_preg (cfg);
1670 if (SIZEOF_REGISTER == 8)
1671 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1673 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1676 /* This could be optimized further if neccesary */
1678 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1685 #if !NO_UNALIGNED_ACCESS
1686 if (SIZEOF_REGISTER == 8) {
1688 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1693 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1701 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1706 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1711 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1718 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1725 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1726 g_assert (size < 10000);
1729 /* This could be optimized further if neccesary */
1731 cur_reg = alloc_preg (cfg);
1732 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1733 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1740 #if !NO_UNALIGNED_ACCESS
1741 if (SIZEOF_REGISTER == 8) {
1743 cur_reg = alloc_preg (cfg);
1744 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1745 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1754 cur_reg = alloc_preg (cfg);
1755 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1756 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1762 cur_reg = alloc_preg (cfg);
1763 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1764 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1770 cur_reg = alloc_preg (cfg);
1771 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1772 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1780 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1783 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1786 type = mini_get_basic_type_from_generic (gsctx, type);
1787 switch (type->type) {
1788 case MONO_TYPE_VOID:
1789 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1792 case MONO_TYPE_BOOLEAN:
1795 case MONO_TYPE_CHAR:
1798 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1802 case MONO_TYPE_FNPTR:
1803 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1804 case MONO_TYPE_CLASS:
1805 case MONO_TYPE_STRING:
1806 case MONO_TYPE_OBJECT:
1807 case MONO_TYPE_SZARRAY:
1808 case MONO_TYPE_ARRAY:
1809 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1812 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1815 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1816 case MONO_TYPE_VALUETYPE:
1817 if (type->data.klass->enumtype) {
1818 type = mono_class_enum_basetype (type->data.klass);
1821 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1822 case MONO_TYPE_TYPEDBYREF:
1823 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1824 case MONO_TYPE_GENERICINST:
1825 type = &type->data.generic_class->container_class->byval_arg;
1828 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1834 * target_type_is_incompatible:
1835 * @cfg: MonoCompile context
1837 * Check that the item @arg on the evaluation stack can be stored
1838 * in the target type (can be a local, or field, etc).
1839 * The cfg arg can be used to check if we need verification or just
1842 * Returns: non-0 value if arg can't be stored on a target.
1845 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1847 MonoType *simple_type;
1850 if (target->byref) {
1851 /* FIXME: check that the pointed to types match */
1852 if (arg->type == STACK_MP)
1853 return arg->klass != mono_class_from_mono_type (target);
1854 if (arg->type == STACK_PTR)
1859 simple_type = mono_type_get_underlying_type (target);
1860 switch (simple_type->type) {
1861 case MONO_TYPE_VOID:
1865 case MONO_TYPE_BOOLEAN:
1868 case MONO_TYPE_CHAR:
1871 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1875 /* STACK_MP is needed when setting pinned locals */
1876 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1881 case MONO_TYPE_FNPTR:
1882 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1885 case MONO_TYPE_CLASS:
1886 case MONO_TYPE_STRING:
1887 case MONO_TYPE_OBJECT:
1888 case MONO_TYPE_SZARRAY:
1889 case MONO_TYPE_ARRAY:
1890 if (arg->type != STACK_OBJ)
1892 /* FIXME: check type compatibility */
1896 if (arg->type != STACK_I8)
1901 if (arg->type != STACK_R8)
1904 case MONO_TYPE_VALUETYPE:
1905 if (arg->type != STACK_VTYPE)
1907 klass = mono_class_from_mono_type (simple_type);
1908 if (klass != arg->klass)
1911 case MONO_TYPE_TYPEDBYREF:
1912 if (arg->type != STACK_VTYPE)
1914 klass = mono_class_from_mono_type (simple_type);
1915 if (klass != arg->klass)
1918 case MONO_TYPE_GENERICINST:
1919 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1920 if (arg->type != STACK_VTYPE)
1922 klass = mono_class_from_mono_type (simple_type);
1923 if (klass != arg->klass)
1927 if (arg->type != STACK_OBJ)
1929 /* FIXME: check type compatibility */
1933 case MONO_TYPE_MVAR:
1934 /* FIXME: all the arguments must be references for now,
1935 * later look inside cfg and see if the arg num is
1936 * really a reference
1938 g_assert (cfg->generic_sharing_context);
1939 if (arg->type != STACK_OBJ)
1943 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1949 * Prepare arguments for passing to a function call.
1950 * Return a non-zero value if the arguments can't be passed to the given
1952 * The type checks are not yet complete and some conversions may need
1953 * casts on 32 or 64 bit architectures.
1955 * FIXME: implement this using target_type_is_incompatible ()
1958 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1960 MonoType *simple_type;
1964 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1968 for (i = 0; i < sig->param_count; ++i) {
1969 if (sig->params [i]->byref) {
1970 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1974 simple_type = sig->params [i];
1975 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1977 switch (simple_type->type) {
1978 case MONO_TYPE_VOID:
1983 case MONO_TYPE_BOOLEAN:
1986 case MONO_TYPE_CHAR:
1989 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
1995 case MONO_TYPE_FNPTR:
1996 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
1999 case MONO_TYPE_CLASS:
2000 case MONO_TYPE_STRING:
2001 case MONO_TYPE_OBJECT:
2002 case MONO_TYPE_SZARRAY:
2003 case MONO_TYPE_ARRAY:
2004 if (args [i]->type != STACK_OBJ)
2009 if (args [i]->type != STACK_I8)
2014 if (args [i]->type != STACK_R8)
2017 case MONO_TYPE_VALUETYPE:
2018 if (simple_type->data.klass->enumtype) {
2019 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2022 if (args [i]->type != STACK_VTYPE)
2025 case MONO_TYPE_TYPEDBYREF:
2026 if (args [i]->type != STACK_VTYPE)
2029 case MONO_TYPE_GENERICINST:
2030 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2034 g_error ("unknown type 0x%02x in check_call_signature",
2042 callvirt_to_call (int opcode)
2047 case OP_VOIDCALLVIRT:
2056 g_assert_not_reached ();
2063 callvirt_to_call_membase (int opcode)
2067 return OP_CALL_MEMBASE;
2068 case OP_VOIDCALLVIRT:
2069 return OP_VOIDCALL_MEMBASE;
2071 return OP_FCALL_MEMBASE;
2073 return OP_LCALL_MEMBASE;
2075 return OP_VCALL_MEMBASE;
2077 g_assert_not_reached ();
2083 #ifdef MONO_ARCH_HAVE_IMT
2085 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2089 if (COMPILE_LLVM (cfg)) {
2090 method_reg = alloc_preg (cfg);
2093 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2094 } else if (cfg->compile_aot) {
2095 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2098 MONO_INST_NEW (cfg, ins, OP_PCONST);
2099 ins->inst_p0 = call->method;
2100 ins->dreg = method_reg;
2101 MONO_ADD_INS (cfg->cbb, ins);
2105 call->imt_arg_reg = method_reg;
2107 #ifdef MONO_ARCH_IMT_REG
2108 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2110 /* Need this to keep the IMT arg alive */
2111 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2116 #ifdef MONO_ARCH_IMT_REG
2117 method_reg = alloc_preg (cfg);
2120 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2121 } else if (cfg->compile_aot) {
2122 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2125 MONO_INST_NEW (cfg, ins, OP_PCONST);
2126 ins->inst_p0 = call->method;
2127 ins->dreg = method_reg;
2128 MONO_ADD_INS (cfg->cbb, ins);
2131 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2133 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2138 static MonoJumpInfo *
2139 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2141 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2145 ji->data.target = target;
2150 inline static MonoCallInst *
2151 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2152 MonoInst **args, int calli, int virtual, int tail)
2155 #ifdef MONO_ARCH_SOFT_FLOAT
2160 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2162 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2165 call->signature = sig;
2167 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2170 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2171 call->vret_var = cfg->vret_addr;
2172 //g_assert_not_reached ();
2174 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2175 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2178 temp->backend.is_pinvoke = sig->pinvoke;
2181 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2182 * address of return value to increase optimization opportunities.
2183 * Before vtype decomposition, the dreg of the call ins itself represents the
2184 * fact the call modifies the return value. After decomposition, the call will
2185 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2186 * will be transformed into an LDADDR.
2188 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2189 loada->dreg = alloc_preg (cfg);
2190 loada->inst_p0 = temp;
2191 /* We reference the call too since call->dreg could change during optimization */
2192 loada->inst_p1 = call;
2193 MONO_ADD_INS (cfg->cbb, loada);
2195 call->inst.dreg = temp->dreg;
2197 call->vret_var = loada;
2198 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2199 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2201 #ifdef MONO_ARCH_SOFT_FLOAT
2202 if (COMPILE_SOFT_FLOAT (cfg)) {
2204 * If the call has a float argument, we would need to do an r8->r4 conversion using
2205 * an icall, but that cannot be done during the call sequence since it would clobber
2206 * the call registers + the stack. So we do it before emitting the call.
2208 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2210 MonoInst *in = call->args [i];
2212 if (i >= sig->hasthis)
2213 t = sig->params [i - sig->hasthis];
2215 t = &mono_defaults.int_class->byval_arg;
2216 t = mono_type_get_underlying_type (t);
2218 if (!t->byref && t->type == MONO_TYPE_R4) {
2219 MonoInst *iargs [1];
2223 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2225 /* The result will be in an int vreg */
2226 call->args [i] = conv;
2233 if (COMPILE_LLVM (cfg))
2234 mono_llvm_emit_call (cfg, call);
2236 mono_arch_emit_call (cfg, call);
2238 mono_arch_emit_call (cfg, call);
2241 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2242 cfg->flags |= MONO_CFG_HAS_CALLS;
2247 inline static MonoInst*
2248 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2250 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE);
2252 call->inst.sreg1 = addr->dreg;
2254 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2256 return (MonoInst*)call;
2260 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2262 #ifdef MONO_ARCH_RGCTX_REG
2263 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2264 cfg->uses_rgctx_reg = TRUE;
2265 call->rgctx_reg = TRUE;
2267 call->rgctx_arg_reg = rgctx_reg;
2274 inline static MonoInst*
2275 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2281 rgctx_reg = mono_alloc_preg (cfg);
2282 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2284 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2286 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2287 return (MonoInst*)call;
2291 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, int rgctx_type);
2293 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, int rgctx_type);
2296 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2297 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2299 gboolean might_be_remote;
2300 gboolean virtual = this != NULL;
2301 gboolean enable_for_aot = TRUE;
2305 if (method->string_ctor) {
2306 /* Create the real signature */
2307 /* FIXME: Cache these */
2308 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2309 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2314 might_be_remote = this && sig->hasthis &&
2315 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2316 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this);
2318 context_used = mono_method_check_context_used (method);
2319 if (might_be_remote && context_used) {
2322 g_assert (cfg->generic_sharing_context);
2324 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2326 return mono_emit_calli (cfg, sig, args, addr);
2329 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE);
2331 if (might_be_remote)
2332 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2334 call->method = method;
2335 call->inst.flags |= MONO_INST_HAS_METHOD;
2336 call->inst.inst_left = this;
2339 int vtable_reg, slot_reg, this_reg;
2341 this_reg = this->dreg;
2343 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2344 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2345 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2347 /* Make a call to delegate->invoke_impl */
2348 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2349 call->inst.inst_basereg = this_reg;
2350 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2351 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2353 return (MonoInst*)call;
2357 if ((!cfg->compile_aot || enable_for_aot) &&
2358 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2359 (MONO_METHOD_IS_FINAL (method) &&
2360 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2361 !(method->klass->marshalbyref && context_used)) {
2363 * the method is not virtual, we just need to ensure this is not null
2364 * and then we can call the method directly.
2366 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2368 * The check above ensures method is not gshared, this is needed since
2369 * gshared methods can't have wrappers.
2371 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2374 if (!method->string_ctor)
2375 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2377 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2379 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2381 return (MonoInst*)call;
2384 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2386 * the method is virtual, but we can statically dispatch since either
2387 * it's class or the method itself are sealed.
2388 * But first we need to ensure it's not a null reference.
2390 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2392 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2393 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2395 return (MonoInst*)call;
2398 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2400 vtable_reg = alloc_preg (cfg);
2401 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2402 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2404 #ifdef MONO_ARCH_HAVE_IMT
2406 guint32 imt_slot = mono_method_get_imt_slot (method);
2407 emit_imt_argument (cfg, call, imt_arg);
2408 slot_reg = vtable_reg;
2409 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2412 if (slot_reg == -1) {
2413 slot_reg = alloc_preg (cfg);
2414 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2415 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2418 slot_reg = vtable_reg;
2419 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2420 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2421 #ifdef MONO_ARCH_HAVE_IMT
2423 g_assert (mono_method_signature (method)->generic_param_count);
2424 emit_imt_argument (cfg, call, imt_arg);
2429 call->inst.sreg1 = slot_reg;
2430 call->virtual = TRUE;
2433 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2435 return (MonoInst*)call;
2439 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2440 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2447 rgctx_reg = mono_alloc_preg (cfg);
2448 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2450 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2452 call = (MonoCallInst*)ins;
2454 set_rgctx_arg (cfg, call, rgctx_reg, vtable_arg);
2460 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2462 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2466 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2473 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE);
2476 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2478 return (MonoInst*)call;
2482 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2484 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2488 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2492 * mono_emit_abs_call:
2494 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2496 inline static MonoInst*
2497 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2498 MonoMethodSignature *sig, MonoInst **args)
2500 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2504 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2507 if (cfg->abs_patches == NULL)
2508 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2509 g_hash_table_insert (cfg->abs_patches, ji, ji);
2510 ins = mono_emit_native_call (cfg, ji, sig, args);
2511 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2516 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2518 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2519 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2523 * Native code might return non register sized integers
2524 * without initializing the upper bits.
2526 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2527 case OP_LOADI1_MEMBASE:
2528 widen_op = OP_ICONV_TO_I1;
2530 case OP_LOADU1_MEMBASE:
2531 widen_op = OP_ICONV_TO_U1;
2533 case OP_LOADI2_MEMBASE:
2534 widen_op = OP_ICONV_TO_I2;
2536 case OP_LOADU2_MEMBASE:
2537 widen_op = OP_ICONV_TO_U2;
2543 if (widen_op != -1) {
2544 int dreg = alloc_preg (cfg);
2547 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2548 widen->type = ins->type;
2558 get_memcpy_method (void)
2560 static MonoMethod *memcpy_method = NULL;
2561 if (!memcpy_method) {
2562 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2564 g_error ("Old corlib found. Install a new one");
2566 return memcpy_method;
2570 create_write_barrier_bitmap (MonoClass *klass, unsigned *wb_bitmap, int offset)
2572 MonoClassField *field;
2573 gpointer iter = NULL;
2575 while ((field = mono_class_get_fields (klass, &iter))) {
2578 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2580 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2581 if (mono_type_is_reference (field->type)) {
2582 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2583 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2585 /*FIXME support nested value types so this works for: struct X { Y y; int z;} struct Y { object a,b; }*/
2586 MonoClass *field_class = mono_class_from_mono_type (field->type);
2587 if (field_class->has_references)
2588 create_write_barrier_bitmap (field_class, wb_bitmap, offset + foffset);
2594 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value, int value_reg)
2597 int card_table_shift_bits;
2598 guint8 *card_table = mono_gc_get_card_table (&card_table_shift_bits);
2599 gboolean need_dummy_use = TRUE;
2600 MonoInst *dummy_use;
2602 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
2603 int nursery_shift_bits;
2604 size_t nursery_size;
2606 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2608 if (card_table && nursery_shift_bits > 0) {
2611 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2612 wbarrier->sreg1 = ptr->dreg;
2614 wbarrier->sreg2 = value->dreg;
2616 wbarrier->sreg2 = value_reg;
2617 MONO_ADD_INS (cfg->cbb, wbarrier);
2619 need_dummy_use = FALSE;
2623 int offset_reg = alloc_preg (cfg);
2625 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2626 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PADD_IMM, offset_reg, offset_reg, card_table);
2627 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
2631 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2632 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
2636 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
2638 MONO_INST_NEW (cfg, dummy_use, OP_DUMMY_USE);
2639 dummy_use->sreg1 = value_reg;
2640 MONO_ADD_INS (cfg->cbb, dummy_use);
2646 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
2648 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
2649 unsigned need_wb = 0;
2654 /*types with references can't have alignment smaller than sizeof(void*) */
2655 if (align < SIZEOF_VOID_P)
2658 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
2659 if (size > 32 * SIZEOF_VOID_P)
2662 create_write_barrier_bitmap (klass, &need_wb, 0);
2664 /* We don't unroll more than 5 stores to avoid code bloat. */
2665 if (size > 5 * SIZEOF_VOID_P) {
2666 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
2667 size += (SIZEOF_VOID_P - 1);
2668 size &= ~(SIZEOF_VOID_P - 1);
2670 EMIT_NEW_ICONST (cfg, iargs [2], size);
2671 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
2672 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
2676 destreg = iargs [0]->dreg;
2677 srcreg = iargs [1]->dreg;
2680 dest_ptr_reg = alloc_preg (cfg);
2681 tmp_reg = alloc_preg (cfg);
2684 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
2686 while (size >= SIZEOF_VOID_P) {
2687 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, tmp_reg, srcreg, offset);
2688 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
2691 emit_write_barrier (cfg, iargs [0], NULL, tmp_reg);
2693 offset += SIZEOF_VOID_P;
2694 size -= SIZEOF_VOID_P;
2697 /*tmp += sizeof (void*)*/
2698 if (size >= SIZEOF_VOID_P) {
2699 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
2700 MONO_ADD_INS (cfg->cbb, iargs [0]);
2704 /* Those cannot be references since size < sizeof (void*) */
2706 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
2707 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
2713 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
2714 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
2720 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
2721 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
2730 * Emit code to copy a valuetype of type @klass whose address is stored in
2731 * @src->dreg to memory whose address is stored at @dest->dreg.
2734 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2736 MonoInst *iargs [4];
2739 MonoMethod *memcpy_method;
2743 * This check breaks with spilled vars... need to handle it during verification anyway.
2744 * g_assert (klass && klass == src->klass && klass == dest->klass);
2748 n = mono_class_native_size (klass, &align);
2750 n = mono_class_value_size (klass, &align);
2752 /* if native is true there should be no references in the struct */
2753 if (cfg->gen_write_barriers && klass->has_references && !native) {
2754 /* Avoid barriers when storing to the stack */
2755 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2756 (dest->opcode == OP_LDADDR))) {
2757 int context_used = 0;
2762 if (cfg->generic_sharing_context)
2763 context_used = mono_class_check_context_used (klass);
2765 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
2766 if ((cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
2768 } else if (context_used) {
2769 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2771 if (cfg->compile_aot) {
2772 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2774 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2775 mono_class_compute_gc_descriptor (klass);
2779 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2784 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2785 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2786 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2790 EMIT_NEW_ICONST (cfg, iargs [2], n);
2792 memcpy_method = get_memcpy_method ();
2793 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2798 get_memset_method (void)
2800 static MonoMethod *memset_method = NULL;
2801 if (!memset_method) {
2802 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2804 g_error ("Old corlib found. Install a new one");
2806 return memset_method;
2810 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2812 MonoInst *iargs [3];
2815 MonoMethod *memset_method;
2817 /* FIXME: Optimize this for the case when dest is an LDADDR */
2819 mono_class_init (klass);
2820 n = mono_class_value_size (klass, &align);
2822 if (n <= sizeof (gpointer) * 5) {
2823 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2826 memset_method = get_memset_method ();
2828 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2829 EMIT_NEW_ICONST (cfg, iargs [2], n);
2830 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2835 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2837 MonoInst *this = NULL;
2839 g_assert (cfg->generic_sharing_context);
2841 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2842 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2843 !method->klass->valuetype)
2844 EMIT_NEW_ARGLOAD (cfg, this, 0);
2846 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2847 MonoInst *mrgctx_loc, *mrgctx_var;
2850 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2852 mrgctx_loc = mono_get_vtable_var (cfg);
2853 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2856 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2857 MonoInst *vtable_loc, *vtable_var;
2861 vtable_loc = mono_get_vtable_var (cfg);
2862 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2864 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2865 MonoInst *mrgctx_var = vtable_var;
2868 vtable_reg = alloc_preg (cfg);
2869 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2870 vtable_var->type = STACK_PTR;
2876 int vtable_reg, res_reg;
2878 vtable_reg = alloc_preg (cfg);
2879 res_reg = alloc_preg (cfg);
2880 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2885 static MonoJumpInfoRgctxEntry *
2886 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2888 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2889 res->method = method;
2890 res->in_mrgctx = in_mrgctx;
2891 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2892 res->data->type = patch_type;
2893 res->data->data.target = patch_data;
2894 res->info_type = info_type;
2899 static inline MonoInst*
2900 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2902 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2906 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2907 MonoClass *klass, int rgctx_type)
2909 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2910 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2912 return emit_rgctx_fetch (cfg, rgctx, entry);
2916 * emit_get_rgctx_method:
2918 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2919 * normal constants, else emit a load from the rgctx.
2922 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2923 MonoMethod *cmethod, int rgctx_type)
2925 if (!context_used) {
2928 switch (rgctx_type) {
2929 case MONO_RGCTX_INFO_METHOD:
2930 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
2932 case MONO_RGCTX_INFO_METHOD_RGCTX:
2933 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
2936 g_assert_not_reached ();
2939 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2940 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2942 return emit_rgctx_fetch (cfg, rgctx, entry);
2947 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2948 MonoClassField *field, int rgctx_type)
2950 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2951 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2953 return emit_rgctx_fetch (cfg, rgctx, entry);
2957 * On return the caller must check @klass for load errors.
2960 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
2962 MonoInst *vtable_arg;
2964 int context_used = 0;
2966 if (cfg->generic_sharing_context)
2967 context_used = mono_class_check_context_used (klass);
2970 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
2971 klass, MONO_RGCTX_INFO_VTABLE);
2973 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2977 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
2980 if (COMPILE_LLVM (cfg))
2981 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
2983 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
2984 #ifdef MONO_ARCH_VTABLE_REG
2985 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
2986 cfg->uses_vtable_reg = TRUE;
2993 * On return the caller must check @array_class for load errors
2996 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
2998 int vtable_reg = alloc_preg (cfg);
2999 int context_used = 0;
3001 if (cfg->generic_sharing_context)
3002 context_used = mono_class_check_context_used (array_class);
3004 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3006 if (cfg->opt & MONO_OPT_SHARED) {
3007 int class_reg = alloc_preg (cfg);
3008 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3009 if (cfg->compile_aot) {
3010 int klass_reg = alloc_preg (cfg);
3011 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3012 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3014 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3016 } else if (context_used) {
3017 MonoInst *vtable_ins;
3019 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3020 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3022 if (cfg->compile_aot) {
3026 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3028 vt_reg = alloc_preg (cfg);
3029 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3030 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3033 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3035 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3039 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3043 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
3045 if (mini_get_debug_options ()->better_cast_details) {
3046 int to_klass_reg = alloc_preg (cfg);
3047 int vtable_reg = alloc_preg (cfg);
3048 int klass_reg = alloc_preg (cfg);
3049 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3052 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3056 MONO_ADD_INS (cfg->cbb, tls_get);
3057 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3058 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3060 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3061 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3062 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3067 reset_cast_details (MonoCompile *cfg)
3069 /* Reset the variables holding the cast details */
3070 if (mini_get_debug_options ()->better_cast_details) {
3071 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3073 MONO_ADD_INS (cfg->cbb, tls_get);
3074 /* It is enough to reset the from field */
3075 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3080 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3081 * generic code is generated.
3084 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3086 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3089 MonoInst *rgctx, *addr;
3091 /* FIXME: What if the class is shared? We might not
3092 have to get the address of the method from the
3094 addr = emit_get_rgctx_method (cfg, context_used, method,
3095 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3097 rgctx = emit_get_rgctx (cfg, method, context_used);
3099 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3101 return mono_emit_method_call (cfg, method, &val, NULL);
3106 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3110 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3111 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3112 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3113 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3115 obj_reg = sp [0]->dreg;
3116 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3117 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3119 /* FIXME: generics */
3120 g_assert (klass->rank == 0);
3123 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3124 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3126 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3127 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3130 MonoInst *element_class;
3132 /* This assertion is from the unboxcast insn */
3133 g_assert (klass->rank == 0);
3135 element_class = emit_get_rgctx_klass (cfg, context_used,
3136 klass->element_class, MONO_RGCTX_INFO_KLASS);
3138 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3139 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3141 save_cast_details (cfg, klass->element_class, obj_reg);
3142 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3143 reset_cast_details (cfg);
3146 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
3147 MONO_ADD_INS (cfg->cbb, add);
3148 add->type = STACK_MP;
3155 * Returns NULL and set the cfg exception on error.
3158 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3160 MonoInst *iargs [2];
3166 MonoInst *iargs [2];
3169 FIXME: we cannot get managed_alloc here because we can't get
3170 the class's vtable (because it's not a closed class)
3172 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3173 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3176 if (cfg->opt & MONO_OPT_SHARED)
3177 rgctx_info = MONO_RGCTX_INFO_KLASS;
3179 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3180 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3182 if (cfg->opt & MONO_OPT_SHARED) {
3183 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3185 alloc_ftn = mono_object_new;
3188 alloc_ftn = mono_object_new_specific;
3191 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3194 if (cfg->opt & MONO_OPT_SHARED) {
3195 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3196 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3198 alloc_ftn = mono_object_new;
3199 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3200 /* This happens often in argument checking code, eg. throw new FooException... */
3201 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3202 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3203 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3205 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3206 MonoMethod *managed_alloc = NULL;
3210 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3211 cfg->exception_ptr = klass;
3215 #ifndef MONO_CROSS_COMPILE
3216 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3219 if (managed_alloc) {
3220 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3221 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3223 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3225 guint32 lw = vtable->klass->instance_size;
3226 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3227 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3228 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3231 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3235 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3239 * Returns NULL and set the cfg exception on error.
3242 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
3244 MonoInst *alloc, *ins;
3246 if (mono_class_is_nullable (klass)) {
3247 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3250 /* FIXME: What if the class is shared? We might not
3251 have to get the method address from the RGCTX. */
3252 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3253 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3254 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3256 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3258 return mono_emit_method_call (cfg, method, &val, NULL);
3262 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3266 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3271 // FIXME: This doesn't work yet (class libs tests fail?)
3272 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || mono_class_has_variant_generic_params (klass) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3275 * Returns NULL and set the cfg exception on error.
3278 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3280 MonoBasicBlock *is_null_bb;
3281 int obj_reg = src->dreg;
3282 int vtable_reg = alloc_preg (cfg);
3283 MonoInst *klass_inst = NULL;
3288 klass_inst = emit_get_rgctx_klass (cfg, context_used,
3289 klass, MONO_RGCTX_INFO_KLASS);
3291 if (is_complex_isinst (klass)) {
3292 /* Complex case, handle by an icall */
3298 args [1] = klass_inst;
3300 return mono_emit_jit_icall (cfg, mono_object_castclass, args);
3302 /* Simple case, handled by the code below */
3306 NEW_BBLOCK (cfg, is_null_bb);
3308 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3309 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3311 save_cast_details (cfg, klass, obj_reg);
3313 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3314 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3315 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3317 int klass_reg = alloc_preg (cfg);
3319 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3321 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3322 /* the remoting code is broken, access the class for now */
3323 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3324 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3326 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3327 cfg->exception_ptr = klass;
3330 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3332 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3333 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3335 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3337 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3338 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3342 MONO_START_BB (cfg, is_null_bb);
3344 reset_cast_details (cfg);
3350 * Returns NULL and set the cfg exception on error.
3353 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3356 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3357 int obj_reg = src->dreg;
3358 int vtable_reg = alloc_preg (cfg);
3359 int res_reg = alloc_preg (cfg);
3360 MonoInst *klass_inst = NULL;
3363 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3365 if (is_complex_isinst (klass)) {
3368 /* Complex case, handle by an icall */
3374 args [1] = klass_inst;
3376 return mono_emit_jit_icall (cfg, mono_object_isinst, args);
3378 /* Simple case, the code below can handle it */
3382 NEW_BBLOCK (cfg, is_null_bb);
3383 NEW_BBLOCK (cfg, false_bb);
3384 NEW_BBLOCK (cfg, end_bb);
3386 /* Do the assignment at the beginning, so the other assignment can be if converted */
3387 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3388 ins->type = STACK_OBJ;
3391 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3392 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3394 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3396 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3397 g_assert (!context_used);
3398 /* the is_null_bb target simply copies the input register to the output */
3399 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3401 int klass_reg = alloc_preg (cfg);
3404 int rank_reg = alloc_preg (cfg);
3405 int eclass_reg = alloc_preg (cfg);
3407 g_assert (!context_used);
3408 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3409 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3410 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3411 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3412 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3413 if (klass->cast_class == mono_defaults.object_class) {
3414 int parent_reg = alloc_preg (cfg);
3415 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3416 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3417 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3418 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3419 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3420 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3421 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3422 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3423 } else if (klass->cast_class == mono_defaults.enum_class) {
3424 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3425 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3426 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3427 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3429 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3430 /* Check that the object is a vector too */
3431 int bounds_reg = alloc_preg (cfg);
3432 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3433 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3434 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3437 /* the is_null_bb target simply copies the input register to the output */
3438 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3440 } else if (mono_class_is_nullable (klass)) {
3441 g_assert (!context_used);
3442 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3443 /* the is_null_bb target simply copies the input register to the output */
3444 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3446 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3447 g_assert (!context_used);
3448 /* the remoting code is broken, access the class for now */
3449 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3450 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3452 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3453 cfg->exception_ptr = klass;
3456 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3458 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3459 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3461 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3462 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3464 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3465 /* the is_null_bb target simply copies the input register to the output */
3466 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
3471 MONO_START_BB (cfg, false_bb);
3473 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3474 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3476 MONO_START_BB (cfg, is_null_bb);
3478 MONO_START_BB (cfg, end_bb);
3484 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3486 /* This opcode takes as input an object reference and a class, and returns:
3487 0) if the object is an instance of the class,
3488 1) if the object is not instance of the class,
3489 2) if the object is a proxy whose type cannot be determined */
3492 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3493 int obj_reg = src->dreg;
3494 int dreg = alloc_ireg (cfg);
3496 int klass_reg = alloc_preg (cfg);
3498 NEW_BBLOCK (cfg, true_bb);
3499 NEW_BBLOCK (cfg, false_bb);
3500 NEW_BBLOCK (cfg, false2_bb);
3501 NEW_BBLOCK (cfg, end_bb);
3502 NEW_BBLOCK (cfg, no_proxy_bb);
3504 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3505 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3507 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3508 NEW_BBLOCK (cfg, interface_fail_bb);
3510 tmp_reg = alloc_preg (cfg);
3511 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3512 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3513 MONO_START_BB (cfg, interface_fail_bb);
3514 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3516 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3518 tmp_reg = alloc_preg (cfg);
3519 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3520 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3521 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3523 tmp_reg = alloc_preg (cfg);
3524 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3525 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3527 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3528 tmp_reg = alloc_preg (cfg);
3529 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3530 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3532 tmp_reg = alloc_preg (cfg);
3533 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3534 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3535 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3537 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3538 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3540 MONO_START_BB (cfg, no_proxy_bb);
3542 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3545 MONO_START_BB (cfg, false_bb);
3547 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3548 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3550 MONO_START_BB (cfg, false2_bb);
3552 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3553 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3555 MONO_START_BB (cfg, true_bb);
3557 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3559 MONO_START_BB (cfg, end_bb);
3562 MONO_INST_NEW (cfg, ins, OP_ICONST);
3564 ins->type = STACK_I4;
3570 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3572 /* This opcode takes as input an object reference and a class, and returns:
3573 0) if the object is an instance of the class,
3574 1) if the object is a proxy whose type cannot be determined
3575 an InvalidCastException exception is thrown otherwhise*/
3578 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3579 int obj_reg = src->dreg;
3580 int dreg = alloc_ireg (cfg);
3581 int tmp_reg = alloc_preg (cfg);
3582 int klass_reg = alloc_preg (cfg);
3584 NEW_BBLOCK (cfg, end_bb);
3585 NEW_BBLOCK (cfg, ok_result_bb);
3587 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3588 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3590 save_cast_details (cfg, klass, obj_reg);
3592 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3593 NEW_BBLOCK (cfg, interface_fail_bb);
3595 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3596 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3597 MONO_START_BB (cfg, interface_fail_bb);
3598 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3600 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3602 tmp_reg = alloc_preg (cfg);
3603 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3604 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3605 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3607 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3608 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3611 NEW_BBLOCK (cfg, no_proxy_bb);
3613 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3614 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3615 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3617 tmp_reg = alloc_preg (cfg);
3618 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3619 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3621 tmp_reg = alloc_preg (cfg);
3622 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3623 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3624 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3626 NEW_BBLOCK (cfg, fail_1_bb);
3628 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3630 MONO_START_BB (cfg, fail_1_bb);
3632 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3633 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3635 MONO_START_BB (cfg, no_proxy_bb);
3637 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3640 MONO_START_BB (cfg, ok_result_bb);
3642 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3644 MONO_START_BB (cfg, end_bb);
3647 MONO_INST_NEW (cfg, ins, OP_ICONST);
3649 ins->type = STACK_I4;
3655 * Returns NULL and set the cfg exception on error.
3657 static G_GNUC_UNUSED MonoInst*
3658 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3660 gpointer *trampoline;
3661 MonoInst *obj, *method_ins, *tramp_ins;
3665 obj = handle_alloc (cfg, klass, FALSE, 0);
3669 /* Inline the contents of mono_delegate_ctor */
3671 /* Set target field */
3672 /* Optimize away setting of NULL target */
3673 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3674 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3676 /* Set method field */
3677 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3678 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3681 * To avoid looking up the compiled code belonging to the target method
3682 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3683 * store it, and we fill it after the method has been compiled.
3685 if (!cfg->compile_aot && !method->dynamic) {
3686 MonoInst *code_slot_ins;
3689 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3691 domain = mono_domain_get ();
3692 mono_domain_lock (domain);
3693 if (!domain_jit_info (domain)->method_code_hash)
3694 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3695 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3697 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3698 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3700 mono_domain_unlock (domain);
3702 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3704 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3707 /* Set invoke_impl field */
3708 if (cfg->compile_aot) {
3709 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3711 trampoline = mono_create_delegate_trampoline (klass);
3712 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3714 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3716 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3722 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3724 MonoJitICallInfo *info;
3726 /* Need to register the icall so it gets an icall wrapper */
3727 info = mono_get_array_new_va_icall (rank);
3729 cfg->flags |= MONO_CFG_HAS_VARARGS;
3731 /* mono_array_new_va () needs a vararg calling convention */
3732 cfg->disable_llvm = TRUE;
3734 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3735 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3739 mono_emit_load_got_addr (MonoCompile *cfg)
3741 MonoInst *getaddr, *dummy_use;
3743 if (!cfg->got_var || cfg->got_var_allocated)
3746 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3747 getaddr->dreg = cfg->got_var->dreg;
3749 /* Add it to the start of the first bblock */
3750 if (cfg->bb_entry->code) {
3751 getaddr->next = cfg->bb_entry->code;
3752 cfg->bb_entry->code = getaddr;
3755 MONO_ADD_INS (cfg->bb_entry, getaddr);
3757 cfg->got_var_allocated = TRUE;
3760 * Add a dummy use to keep the got_var alive, since real uses might
3761 * only be generated by the back ends.
3762 * Add it to end_bblock, so the variable's lifetime covers the whole
3764 * It would be better to make the usage of the got var explicit in all
3765 * cases when the backend needs it (i.e. calls, throw etc.), so this
3766 * wouldn't be needed.
3768 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3769 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3772 static int inline_limit;
3773 static gboolean inline_limit_inited;
3776 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3778 MonoMethodHeaderSummary header;
3780 #ifdef MONO_ARCH_SOFT_FLOAT
3781 MonoMethodSignature *sig = mono_method_signature (method);
3785 if (cfg->generic_sharing_context)
3788 if (cfg->inline_depth > 10)
3791 #ifdef MONO_ARCH_HAVE_LMF_OPS
3792 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3793 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3794 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3799 if (!mono_method_get_header_summary (method, &header))
3802 /*runtime, icall and pinvoke are checked by summary call*/
3803 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3804 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3805 (method->klass->marshalbyref) ||
3809 /* also consider num_locals? */
3810 /* Do the size check early to avoid creating vtables */
3811 if (!inline_limit_inited) {
3812 if (getenv ("MONO_INLINELIMIT"))
3813 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3815 inline_limit = INLINE_LENGTH_LIMIT;
3816 inline_limit_inited = TRUE;
3818 if (header.code_size >= inline_limit)
3822 * if we can initialize the class of the method right away, we do,
3823 * otherwise we don't allow inlining if the class needs initialization,
3824 * since it would mean inserting a call to mono_runtime_class_init()
3825 * inside the inlined code
3827 if (!(cfg->opt & MONO_OPT_SHARED)) {
3828 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3829 if (cfg->run_cctors && method->klass->has_cctor) {
3830 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3831 if (!method->klass->runtime_info)
3832 /* No vtable created yet */
3834 vtable = mono_class_vtable (cfg->domain, method->klass);
3837 /* This makes so that inline cannot trigger */
3838 /* .cctors: too many apps depend on them */
3839 /* running with a specific order... */
3840 if (! vtable->initialized)
3842 mono_runtime_class_init (vtable);
3844 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3845 if (!method->klass->runtime_info)
3846 /* No vtable created yet */
3848 vtable = mono_class_vtable (cfg->domain, method->klass);
3851 if (!vtable->initialized)
3856 * If we're compiling for shared code
3857 * the cctor will need to be run at aot method load time, for example,
3858 * or at the end of the compilation of the inlining method.
3860 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3865 * CAS - do not inline methods with declarative security
3866 * Note: this has to be before any possible return TRUE;
3868 if (mono_method_has_declsec (method))
3871 #ifdef MONO_ARCH_SOFT_FLOAT
3873 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3875 for (i = 0; i < sig->param_count; ++i)
3876 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3884 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3886 if (vtable->initialized && !cfg->compile_aot)
3889 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3892 if (!mono_class_needs_cctor_run (vtable->klass, method))
3895 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3896 /* The initialization is already done before the method is called */
3903 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
3907 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3909 mono_class_init (klass);
3910 size = mono_class_array_element_size (klass);
3912 mult_reg = alloc_preg (cfg);
3913 array_reg = arr->dreg;
3914 index_reg = index->dreg;
3916 #if SIZEOF_REGISTER == 8
3917 /* The array reg is 64 bits but the index reg is only 32 */
3918 if (COMPILE_LLVM (cfg)) {
3920 index2_reg = index_reg;
3922 index2_reg = alloc_preg (cfg);
3923 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3926 if (index->type == STACK_I8) {
3927 index2_reg = alloc_preg (cfg);
3928 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
3930 index2_reg = index_reg;
3935 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3937 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3938 if (size == 1 || size == 2 || size == 4 || size == 8) {
3939 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3941 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3942 ins->type = STACK_PTR;
3948 add_reg = alloc_preg (cfg);
3950 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3951 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3952 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3953 ins->type = STACK_PTR;
3954 MONO_ADD_INS (cfg->cbb, ins);
3959 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3961 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3963 int bounds_reg = alloc_preg (cfg);
3964 int add_reg = alloc_preg (cfg);
3965 int mult_reg = alloc_preg (cfg);
3966 int mult2_reg = alloc_preg (cfg);
3967 int low1_reg = alloc_preg (cfg);
3968 int low2_reg = alloc_preg (cfg);
3969 int high1_reg = alloc_preg (cfg);
3970 int high2_reg = alloc_preg (cfg);
3971 int realidx1_reg = alloc_preg (cfg);
3972 int realidx2_reg = alloc_preg (cfg);
3973 int sum_reg = alloc_preg (cfg);
3978 mono_class_init (klass);
3979 size = mono_class_array_element_size (klass);
3981 index1 = index_ins1->dreg;
3982 index2 = index_ins2->dreg;
3984 /* range checking */
3985 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3986 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3988 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3989 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3990 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3991 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3992 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3993 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3994 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3996 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3997 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3998 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3999 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4000 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
4001 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4002 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4004 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4005 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4006 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4007 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4008 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4010 ins->type = STACK_MP;
4012 MONO_ADD_INS (cfg->cbb, ins);
4019 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4023 MonoMethod *addr_method;
4026 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4029 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
4031 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4032 /* emit_ldelema_2 depends on OP_LMUL */
4033 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
4034 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
4038 element_size = mono_class_array_element_size (cmethod->klass->element_class);
4039 addr_method = mono_marshal_get_array_address (rank, element_size);
4040 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4045 static MonoBreakPolicy
4046 always_insert_breakpoint (MonoMethod *method)
4048 return MONO_BREAK_POLICY_ALWAYS;
4051 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4054 * mono_set_break_policy:
4055 * policy_callback: the new callback function
4057 * Allow embedders to decide wherther to actually obey breakpoint instructions
4058 * (both break IL instructions and Debugger.Break () method calls), for example
4059 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4060 * untrusted or semi-trusted code.
4062 * @policy_callback will be called every time a break point instruction needs to
4063 * be inserted with the method argument being the method that calls Debugger.Break()
4064 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4065 * if it wants the breakpoint to not be effective in the given method.
4066 * #MONO_BREAK_POLICY_ALWAYS is the default.
4069 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4071 if (policy_callback)
4072 break_policy_func = policy_callback;
4074 break_policy_func = always_insert_breakpoint;
4078 should_insert_brekpoint (MonoMethod *method) {
4079 switch (break_policy_func (method)) {
4080 case MONO_BREAK_POLICY_ALWAYS:
4082 case MONO_BREAK_POLICY_NEVER:
4084 case MONO_BREAK_POLICY_ON_DBG:
4085 return mono_debug_using_mono_debugger ();
4087 g_warning ("Incorrect value returned from break policy callback");
4092 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4094 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4096 MonoInst *addr, *store, *load;
4097 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4099 /* the bounds check is already done by the callers */
4100 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4102 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4103 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4105 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4106 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4112 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4114 MonoInst *ins = NULL;
4115 #ifdef MONO_ARCH_SIMD_INTRINSICS
4116 if (cfg->opt & MONO_OPT_SIMD) {
4117 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4127 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4129 MonoInst *ins = NULL;
4131 static MonoClass *runtime_helpers_class = NULL;
4132 if (! runtime_helpers_class)
4133 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
4134 "System.Runtime.CompilerServices", "RuntimeHelpers");
4136 if (cmethod->klass == mono_defaults.string_class) {
4137 if (strcmp (cmethod->name, "get_Chars") == 0) {
4138 int dreg = alloc_ireg (cfg);
4139 int index_reg = alloc_preg (cfg);
4140 int mult_reg = alloc_preg (cfg);
4141 int add_reg = alloc_preg (cfg);
4143 #if SIZEOF_REGISTER == 8
4144 /* The array reg is 64 bits but the index reg is only 32 */
4145 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
4147 index_reg = args [1]->dreg;
4149 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
4151 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4152 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
4153 add_reg = ins->dreg;
4154 /* Avoid a warning */
4156 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4159 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
4160 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4161 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4162 add_reg, G_STRUCT_OFFSET (MonoString, chars));
4164 type_from_op (ins, NULL, NULL);
4166 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4167 int dreg = alloc_ireg (cfg);
4168 /* Decompose later to allow more optimizations */
4169 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
4170 ins->type = STACK_I4;
4171 ins->flags |= MONO_INST_FAULT;
4172 cfg->cbb->has_array_access = TRUE;
4173 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
4176 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
4177 int mult_reg = alloc_preg (cfg);
4178 int add_reg = alloc_preg (cfg);
4180 /* The corlib functions check for oob already. */
4181 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
4182 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4183 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
4184 return cfg->cbb->last_ins;
4187 } else if (cmethod->klass == mono_defaults.object_class) {
4189 if (strcmp (cmethod->name, "GetType") == 0) {
4190 int dreg = alloc_preg (cfg);
4191 int vt_reg = alloc_preg (cfg);
4192 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4193 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
4194 type_from_op (ins, NULL, NULL);
4197 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
4198 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
4199 int dreg = alloc_ireg (cfg);
4200 int t1 = alloc_ireg (cfg);
4202 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
4203 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
4204 ins->type = STACK_I4;
4208 } else if (strcmp (cmethod->name, ".ctor") == 0) {
4209 MONO_INST_NEW (cfg, ins, OP_NOP);
4210 MONO_ADD_INS (cfg->cbb, ins);
4214 } else if (cmethod->klass == mono_defaults.array_class) {
4215 if (strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
4216 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
4218 #ifndef MONO_BIG_ARRAYS
4220 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
4223 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
4224 int dreg = alloc_ireg (cfg);
4225 int bounds_reg = alloc_ireg (cfg);
4226 MonoBasicBlock *end_bb, *szarray_bb;
4227 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
4229 NEW_BBLOCK (cfg, end_bb);
4230 NEW_BBLOCK (cfg, szarray_bb);
4232 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
4233 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4234 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4235 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
4236 /* Non-szarray case */
4238 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4239 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4241 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4242 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4243 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4244 MONO_START_BB (cfg, szarray_bb);
4247 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4248 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4250 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4251 MONO_START_BB (cfg, end_bb);
4253 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
4254 ins->type = STACK_I4;
4260 if (cmethod->name [0] != 'g')
4263 if (strcmp (cmethod->name, "get_Rank") == 0) {
4264 int dreg = alloc_ireg (cfg);
4265 int vtable_reg = alloc_preg (cfg);
4266 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
4267 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4268 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
4269 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4270 type_from_op (ins, NULL, NULL);
4273 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4274 int dreg = alloc_ireg (cfg);
4276 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4277 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4278 type_from_op (ins, NULL, NULL);
4283 } else if (cmethod->klass == runtime_helpers_class) {
4285 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
4286 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
4290 } else if (cmethod->klass == mono_defaults.thread_class) {
4291 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
4292 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
4293 MONO_ADD_INS (cfg->cbb, ins);
4295 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
4296 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4297 MONO_ADD_INS (cfg->cbb, ins);
4300 } else if (cmethod->klass == mono_defaults.monitor_class) {
4301 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4302 /* The trampolines don't work under SGEN */
4303 gboolean is_moving_gc = mono_gc_is_moving ();
4305 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1 && !is_moving_gc) {
4308 if (COMPILE_LLVM (cfg)) {
4310 * Pass the argument normally, the LLVM backend will handle the
4311 * calling convention problems.
4313 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4315 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
4316 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4317 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4318 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4321 return (MonoInst*)call;
4322 } else if (strcmp (cmethod->name, "Exit") == 0 && !is_moving_gc) {
4325 if (COMPILE_LLVM (cfg)) {
4326 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4328 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
4329 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4330 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4331 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4334 return (MonoInst*)call;
4336 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4337 MonoMethod *fast_method = NULL;
4339 /* Avoid infinite recursion */
4340 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
4341 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
4342 strcmp (cfg->method->name, "FastMonitorExit") == 0))
4345 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) ||
4346 strcmp (cmethod->name, "Exit") == 0)
4347 fast_method = mono_monitor_get_fast_path (cmethod);
4351 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4353 } else if (cmethod->klass->image == mono_defaults.corlib &&
4354 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4355 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4358 #if SIZEOF_REGISTER == 8
4359 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
4360 /* 64 bit reads are already atomic */
4361 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
4362 ins->dreg = mono_alloc_preg (cfg);
4363 ins->inst_basereg = args [0]->dreg;
4364 ins->inst_offset = 0;
4365 MONO_ADD_INS (cfg->cbb, ins);
4369 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4370 if (strcmp (cmethod->name, "Increment") == 0) {
4371 MonoInst *ins_iconst;
4374 if (fsig->params [0]->type == MONO_TYPE_I4)
4375 opcode = OP_ATOMIC_ADD_NEW_I4;
4376 #if SIZEOF_REGISTER == 8
4377 else if (fsig->params [0]->type == MONO_TYPE_I8)
4378 opcode = OP_ATOMIC_ADD_NEW_I8;
4381 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4382 ins_iconst->inst_c0 = 1;
4383 ins_iconst->dreg = mono_alloc_ireg (cfg);
4384 MONO_ADD_INS (cfg->cbb, ins_iconst);
4386 MONO_INST_NEW (cfg, ins, opcode);
4387 ins->dreg = mono_alloc_ireg (cfg);
4388 ins->inst_basereg = args [0]->dreg;
4389 ins->inst_offset = 0;
4390 ins->sreg2 = ins_iconst->dreg;
4391 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4392 MONO_ADD_INS (cfg->cbb, ins);
4394 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4395 MonoInst *ins_iconst;
4398 if (fsig->params [0]->type == MONO_TYPE_I4)
4399 opcode = OP_ATOMIC_ADD_NEW_I4;
4400 #if SIZEOF_REGISTER == 8
4401 else if (fsig->params [0]->type == MONO_TYPE_I8)
4402 opcode = OP_ATOMIC_ADD_NEW_I8;
4405 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4406 ins_iconst->inst_c0 = -1;
4407 ins_iconst->dreg = mono_alloc_ireg (cfg);
4408 MONO_ADD_INS (cfg->cbb, ins_iconst);
4410 MONO_INST_NEW (cfg, ins, opcode);
4411 ins->dreg = mono_alloc_ireg (cfg);
4412 ins->inst_basereg = args [0]->dreg;
4413 ins->inst_offset = 0;
4414 ins->sreg2 = ins_iconst->dreg;
4415 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4416 MONO_ADD_INS (cfg->cbb, ins);
4418 } else if (strcmp (cmethod->name, "Add") == 0) {
4421 if (fsig->params [0]->type == MONO_TYPE_I4)
4422 opcode = OP_ATOMIC_ADD_NEW_I4;
4423 #if SIZEOF_REGISTER == 8
4424 else if (fsig->params [0]->type == MONO_TYPE_I8)
4425 opcode = OP_ATOMIC_ADD_NEW_I8;
4429 MONO_INST_NEW (cfg, ins, opcode);
4430 ins->dreg = mono_alloc_ireg (cfg);
4431 ins->inst_basereg = args [0]->dreg;
4432 ins->inst_offset = 0;
4433 ins->sreg2 = args [1]->dreg;
4434 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4435 MONO_ADD_INS (cfg->cbb, ins);
4438 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4440 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4441 if (strcmp (cmethod->name, "Exchange") == 0) {
4443 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4445 if (fsig->params [0]->type == MONO_TYPE_I4)
4446 opcode = OP_ATOMIC_EXCHANGE_I4;
4447 #if SIZEOF_REGISTER == 8
4448 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4449 (fsig->params [0]->type == MONO_TYPE_I))
4450 opcode = OP_ATOMIC_EXCHANGE_I8;
4452 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4453 opcode = OP_ATOMIC_EXCHANGE_I4;
4458 MONO_INST_NEW (cfg, ins, opcode);
4459 ins->dreg = mono_alloc_ireg (cfg);
4460 ins->inst_basereg = args [0]->dreg;
4461 ins->inst_offset = 0;
4462 ins->sreg2 = args [1]->dreg;
4463 MONO_ADD_INS (cfg->cbb, ins);
4465 switch (fsig->params [0]->type) {
4467 ins->type = STACK_I4;
4471 ins->type = STACK_I8;
4473 case MONO_TYPE_OBJECT:
4474 ins->type = STACK_OBJ;
4477 g_assert_not_reached ();
4480 if (cfg->gen_write_barriers && is_ref)
4481 emit_write_barrier (cfg, args [0], args [1], -1);
4483 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4485 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4486 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4488 gboolean is_ref = MONO_TYPE_IS_REFERENCE (fsig->params [1]);
4489 if (fsig->params [1]->type == MONO_TYPE_I4)
4491 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4492 size = sizeof (gpointer);
4493 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
4496 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4497 ins->dreg = alloc_ireg (cfg);
4498 ins->sreg1 = args [0]->dreg;
4499 ins->sreg2 = args [1]->dreg;
4500 ins->sreg3 = args [2]->dreg;
4501 ins->type = STACK_I4;
4502 MONO_ADD_INS (cfg->cbb, ins);
4503 } else if (size == 8) {
4504 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4505 ins->dreg = alloc_ireg (cfg);
4506 ins->sreg1 = args [0]->dreg;
4507 ins->sreg2 = args [1]->dreg;
4508 ins->sreg3 = args [2]->dreg;
4509 ins->type = STACK_I8;
4510 MONO_ADD_INS (cfg->cbb, ins);
4512 /* g_assert_not_reached (); */
4514 if (cfg->gen_write_barriers && is_ref)
4515 emit_write_barrier (cfg, args [0], args [1], -1);
4517 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4521 } else if (cmethod->klass->image == mono_defaults.corlib) {
4522 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4523 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4524 if (should_insert_brekpoint (cfg->method))
4525 MONO_INST_NEW (cfg, ins, OP_BREAK);
4527 MONO_INST_NEW (cfg, ins, OP_NOP);
4528 MONO_ADD_INS (cfg->cbb, ins);
4531 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4532 && strcmp (cmethod->klass->name, "Environment") == 0) {
4534 EMIT_NEW_ICONST (cfg, ins, 1);
4536 EMIT_NEW_ICONST (cfg, ins, 0);
4540 } else if (cmethod->klass == mono_defaults.math_class) {
4542 * There is general branches code for Min/Max, but it does not work for
4544 * http://everything2.com/?node_id=1051618
4548 #ifdef MONO_ARCH_SIMD_INTRINSICS
4549 if (cfg->opt & MONO_OPT_SIMD) {
4550 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4556 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4560 * This entry point could be used later for arbitrary method
4563 inline static MonoInst*
4564 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4565 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4567 if (method->klass == mono_defaults.string_class) {
4568 /* managed string allocation support */
4569 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
4570 MonoInst *iargs [2];
4571 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4572 MonoMethod *managed_alloc = NULL;
4574 g_assert (vtable); /*Should not fail since it System.String*/
4575 #ifndef MONO_CROSS_COMPILE
4576 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4580 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4581 iargs [1] = args [0];
4582 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4589 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4591 MonoInst *store, *temp;
4594 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4595 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4598 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4599 * would be different than the MonoInst's used to represent arguments, and
4600 * the ldelema implementation can't deal with that.
4601 * Solution: When ldelema is used on an inline argument, create a var for
4602 * it, emit ldelema on that var, and emit the saving code below in
4603 * inline_method () if needed.
4605 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4606 cfg->args [i] = temp;
4607 /* This uses cfg->args [i] which is set by the preceeding line */
4608 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4609 store->cil_code = sp [0]->cil_code;
4614 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4615 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4617 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4619 check_inline_called_method_name_limit (MonoMethod *called_method)
4622 static char *limit = NULL;
4624 if (limit == NULL) {
4625 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4627 if (limit_string != NULL)
4628 limit = limit_string;
4630 limit = (char *) "";
4633 if (limit [0] != '\0') {
4634 char *called_method_name = mono_method_full_name (called_method, TRUE);
4636 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4637 g_free (called_method_name);
4639 //return (strncmp_result <= 0);
4640 return (strncmp_result == 0);
4647 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4649 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4652 static char *limit = NULL;
4654 if (limit == NULL) {
4655 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4656 if (limit_string != NULL) {
4657 limit = limit_string;
4659 limit = (char *) "";
4663 if (limit [0] != '\0') {
4664 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4666 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4667 g_free (caller_method_name);
4669 //return (strncmp_result <= 0);
4670 return (strncmp_result == 0);
4678 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4679 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4681 MonoInst *ins, *rvar = NULL;
4682 MonoMethodHeader *cheader;
4683 MonoBasicBlock *ebblock, *sbblock;
4685 MonoMethod *prev_inlined_method;
4686 MonoInst **prev_locals, **prev_args;
4687 MonoType **prev_arg_types;
4688 guint prev_real_offset;
4689 GHashTable *prev_cbb_hash;
4690 MonoBasicBlock **prev_cil_offset_to_bb;
4691 MonoBasicBlock *prev_cbb;
4692 unsigned char* prev_cil_start;
4693 guint32 prev_cil_offset_to_bb_len;
4694 MonoMethod *prev_current_method;
4695 MonoGenericContext *prev_generic_context;
4696 gboolean ret_var_set, prev_ret_var_set;
4698 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4700 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4701 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4704 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4705 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4709 if (cfg->verbose_level > 2)
4710 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4712 if (!cmethod->inline_info) {
4713 mono_jit_stats.inlineable_methods++;
4714 cmethod->inline_info = 1;
4717 /* allocate local variables */
4718 cheader = mono_method_get_header (cmethod);
4720 if (cheader == NULL || mono_loader_get_last_error ()) {
4722 mono_metadata_free_mh (cheader);
4723 mono_loader_clear_error ();
4727 /* allocate space to store the return value */
4728 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4729 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4733 prev_locals = cfg->locals;
4734 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4735 for (i = 0; i < cheader->num_locals; ++i)
4736 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4738 /* allocate start and end blocks */
4739 /* This is needed so if the inline is aborted, we can clean up */
4740 NEW_BBLOCK (cfg, sbblock);
4741 sbblock->real_offset = real_offset;
4743 NEW_BBLOCK (cfg, ebblock);
4744 ebblock->block_num = cfg->num_bblocks++;
4745 ebblock->real_offset = real_offset;
4747 prev_args = cfg->args;
4748 prev_arg_types = cfg->arg_types;
4749 prev_inlined_method = cfg->inlined_method;
4750 cfg->inlined_method = cmethod;
4751 cfg->ret_var_set = FALSE;
4752 cfg->inline_depth ++;
4753 prev_real_offset = cfg->real_offset;
4754 prev_cbb_hash = cfg->cbb_hash;
4755 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4756 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4757 prev_cil_start = cfg->cil_start;
4758 prev_cbb = cfg->cbb;
4759 prev_current_method = cfg->current_method;
4760 prev_generic_context = cfg->generic_context;
4761 prev_ret_var_set = cfg->ret_var_set;
4763 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4765 ret_var_set = cfg->ret_var_set;
4767 cfg->inlined_method = prev_inlined_method;
4768 cfg->real_offset = prev_real_offset;
4769 cfg->cbb_hash = prev_cbb_hash;
4770 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4771 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4772 cfg->cil_start = prev_cil_start;
4773 cfg->locals = prev_locals;
4774 cfg->args = prev_args;
4775 cfg->arg_types = prev_arg_types;
4776 cfg->current_method = prev_current_method;
4777 cfg->generic_context = prev_generic_context;
4778 cfg->ret_var_set = prev_ret_var_set;
4779 cfg->inline_depth --;
4781 if ((costs >= 0 && costs < 60) || inline_allways) {
4782 if (cfg->verbose_level > 2)
4783 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4785 mono_jit_stats.inlined_methods++;
4787 /* always add some code to avoid block split failures */
4788 MONO_INST_NEW (cfg, ins, OP_NOP);
4789 MONO_ADD_INS (prev_cbb, ins);
4791 prev_cbb->next_bb = sbblock;
4792 link_bblock (cfg, prev_cbb, sbblock);
4795 * Get rid of the begin and end bblocks if possible to aid local
4798 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4800 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4801 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4803 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4804 MonoBasicBlock *prev = ebblock->in_bb [0];
4805 mono_merge_basic_blocks (cfg, prev, ebblock);
4807 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4808 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4809 cfg->cbb = prev_cbb;
4817 * If the inlined method contains only a throw, then the ret var is not
4818 * set, so set it to a dummy value.
4821 static double r8_0 = 0.0;
4823 switch (rvar->type) {
4825 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4828 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4833 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4836 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4837 ins->type = STACK_R8;
4838 ins->inst_p0 = (void*)&r8_0;
4839 ins->dreg = rvar->dreg;
4840 MONO_ADD_INS (cfg->cbb, ins);
4843 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4846 g_assert_not_reached ();
4850 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4853 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4856 if (cfg->verbose_level > 2)
4857 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4858 cfg->exception_type = MONO_EXCEPTION_NONE;
4859 mono_loader_clear_error ();
4861 /* This gets rid of the newly added bblocks */
4862 cfg->cbb = prev_cbb;
4864 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4869 * Some of these comments may well be out-of-date.
4870 * Design decisions: we do a single pass over the IL code (and we do bblock
4871 * splitting/merging in the few cases when it's required: a back jump to an IL
4872 * address that was not already seen as bblock starting point).
4873 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4874 * Complex operations are decomposed in simpler ones right away. We need to let the
4875 * arch-specific code peek and poke inside this process somehow (except when the
4876 * optimizations can take advantage of the full semantic info of coarse opcodes).
4877 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4878 * MonoInst->opcode initially is the IL opcode or some simplification of that
4879 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4880 * opcode with value bigger than OP_LAST.
4881 * At this point the IR can be handed over to an interpreter, a dumb code generator
4882 * or to the optimizing code generator that will translate it to SSA form.
4884 * Profiling directed optimizations.
4885 * We may compile by default with few or no optimizations and instrument the code
4886 * or the user may indicate what methods to optimize the most either in a config file
4887 * or through repeated runs where the compiler applies offline the optimizations to
4888 * each method and then decides if it was worth it.
4891 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4892 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4893 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4894 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4895 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4896 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4897 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4898 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4900 /* offset from br.s -> br like opcodes */
4901 #define BIG_BRANCH_OFFSET 13
4904 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4906 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4908 return b == NULL || b == bb;
4912 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4914 unsigned char *ip = start;
4915 unsigned char *target;
4918 MonoBasicBlock *bblock;
4919 const MonoOpcode *opcode;
4922 cli_addr = ip - start;
4923 i = mono_opcode_value ((const guint8 **)&ip, end);
4926 opcode = &mono_opcodes [i];
4927 switch (opcode->argument) {
4928 case MonoInlineNone:
4931 case MonoInlineString:
4932 case MonoInlineType:
4933 case MonoInlineField:
4934 case MonoInlineMethod:
4937 case MonoShortInlineR:
4944 case MonoShortInlineVar:
4945 case MonoShortInlineI:
4948 case MonoShortInlineBrTarget:
4949 target = start + cli_addr + 2 + (signed char)ip [1];
4950 GET_BBLOCK (cfg, bblock, target);
4953 GET_BBLOCK (cfg, bblock, ip);
4955 case MonoInlineBrTarget:
4956 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4957 GET_BBLOCK (cfg, bblock, target);
4960 GET_BBLOCK (cfg, bblock, ip);
4962 case MonoInlineSwitch: {
4963 guint32 n = read32 (ip + 1);
4966 cli_addr += 5 + 4 * n;
4967 target = start + cli_addr;
4968 GET_BBLOCK (cfg, bblock, target);
4970 for (j = 0; j < n; ++j) {
4971 target = start + cli_addr + (gint32)read32 (ip);
4972 GET_BBLOCK (cfg, bblock, target);
4982 g_assert_not_reached ();
4985 if (i == CEE_THROW) {
4986 unsigned char *bb_start = ip - 1;
4988 /* Find the start of the bblock containing the throw */
4990 while ((bb_start >= start) && !bblock) {
4991 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4995 bblock->out_of_line = 1;
5004 static inline MonoMethod *
5005 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5009 if (m->wrapper_type != MONO_WRAPPER_NONE)
5010 return mono_method_get_wrapper_data (m, token);
5012 method = mono_get_method_full (m->klass->image, token, klass, context);
5017 static inline MonoMethod *
5018 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5020 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
5022 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
5028 static inline MonoClass*
5029 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
5033 if (method->wrapper_type != MONO_WRAPPER_NONE)
5034 klass = mono_method_get_wrapper_data (method, token);
5036 klass = mono_class_get_full (method->klass->image, token, context);
5038 mono_class_init (klass);
5043 * Returns TRUE if the JIT should abort inlining because "callee"
5044 * is influenced by security attributes.
5047 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
5051 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
5055 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
5056 if (result == MONO_JIT_SECURITY_OK)
5059 if (result == MONO_JIT_LINKDEMAND_ECMA) {
5060 /* Generate code to throw a SecurityException before the actual call/link */
5061 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5064 NEW_ICONST (cfg, args [0], 4);
5065 NEW_METHODCONST (cfg, args [1], caller);
5066 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
5067 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
5068 /* don't hide previous results */
5069 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
5070 cfg->exception_data = result;
5078 throw_exception (void)
5080 static MonoMethod *method = NULL;
5083 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5084 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
5091 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
5093 MonoMethod *thrower = throw_exception ();
5096 EMIT_NEW_PCONST (cfg, args [0], ex);
5097 mono_emit_method_call (cfg, thrower, args, NULL);
5101 * Return the original method is a wrapper is specified. We can only access
5102 * the custom attributes from the original method.
5105 get_original_method (MonoMethod *method)
5107 if (method->wrapper_type == MONO_WRAPPER_NONE)
5110 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
5111 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
5114 /* in other cases we need to find the original method */
5115 return mono_marshal_method_from_wrapper (method);
5119 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
5120 MonoBasicBlock *bblock, unsigned char *ip)
5122 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5123 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
5125 emit_throw_exception (cfg, ex);
5129 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
5130 MonoBasicBlock *bblock, unsigned char *ip)
5132 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5133 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
5135 emit_throw_exception (cfg, ex);
5139 * Check that the IL instructions at ip are the array initialization
5140 * sequence and return the pointer to the data and the size.
5143 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
5146 * newarr[System.Int32]
5148 * ldtoken field valuetype ...
5149 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
5151 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
5152 guint32 token = read32 (ip + 7);
5153 guint32 field_token = read32 (ip + 2);
5154 guint32 field_index = field_token & 0xffffff;
5156 const char *data_ptr;
5158 MonoMethod *cmethod;
5159 MonoClass *dummy_class;
5160 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
5166 *out_field_token = field_token;
5168 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
5171 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
5173 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
5174 case MONO_TYPE_BOOLEAN:
5178 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
5179 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
5180 case MONO_TYPE_CHAR:
5190 return NULL; /* stupid ARM FP swapped format */
5200 if (size > mono_type_size (field->type, &dummy_align))
5203 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
5204 if (!method->klass->image->dynamic) {
5205 field_index = read32 (ip + 2) & 0xffffff;
5206 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
5207 data_ptr = mono_image_rva_map (method->klass->image, rva);
5208 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
5209 /* for aot code we do the lookup on load */
5210 if (aot && data_ptr)
5211 return GUINT_TO_POINTER (rva);
5213 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
5215 data_ptr = mono_field_get_data (field);
5223 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
5225 char *method_fname = mono_method_full_name (method, TRUE);
5227 MonoMethodHeader *header = mono_method_get_header (method);
5229 if (header->code_size == 0)
5230 method_code = g_strdup ("method body is empty.");
5232 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
5233 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
5234 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
5235 g_free (method_fname);
5236 g_free (method_code);
5237 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
5241 set_exception_object (MonoCompile *cfg, MonoException *exception)
5243 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
5244 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
5245 cfg->exception_ptr = exception;
5249 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5253 if (cfg->generic_sharing_context)
5254 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
5256 type = &klass->byval_arg;
5257 return MONO_TYPE_IS_REFERENCE (type);
5261 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5264 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5265 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5266 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5267 /* Optimize reg-reg moves away */
5269 * Can't optimize other opcodes, since sp[0] might point to
5270 * the last ins of a decomposed opcode.
5272 sp [0]->dreg = (cfg)->locals [n]->dreg;
5274 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5279 * ldloca inhibits many optimizations so try to get rid of it in common
5282 static inline unsigned char *
5283 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5292 local = read16 (ip + 2);
5296 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5297 gboolean skip = FALSE;
5299 /* From the INITOBJ case */
5300 token = read32 (ip + 2);
5301 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5302 CHECK_TYPELOAD (klass);
5303 if (generic_class_is_reference_type (cfg, klass)) {
5304 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5305 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5306 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5307 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5308 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5321 is_exception_class (MonoClass *class)
5324 if (class == mono_defaults.exception_class)
5326 class = class->parent;
5332 * mono_method_to_ir:
5334 * Translate the .net IL into linear IR.
5337 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5338 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5339 guint inline_offset, gboolean is_virtual_call)
5342 MonoInst *ins, **sp, **stack_start;
5343 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5344 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
5345 MonoMethod *cmethod, *method_definition;
5346 MonoInst **arg_array;
5347 MonoMethodHeader *header;
5349 guint32 token, ins_flag;
5351 MonoClass *constrained_call = NULL;
5352 unsigned char *ip, *end, *target, *err_pos;
5353 static double r8_0 = 0.0;
5354 MonoMethodSignature *sig;
5355 MonoGenericContext *generic_context = NULL;
5356 MonoGenericContainer *generic_container = NULL;
5357 MonoType **param_types;
5358 int i, n, start_new_bblock, dreg;
5359 int num_calls = 0, inline_costs = 0;
5360 int breakpoint_id = 0;
5362 MonoBoolean security, pinvoke;
5363 MonoSecurityManager* secman = NULL;
5364 MonoDeclSecurityActions actions;
5365 GSList *class_inits = NULL;
5366 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5368 gboolean init_locals, seq_points, skip_dead_blocks;
5370 /* serialization and xdomain stuff may need access to private fields and methods */
5371 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5372 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5373 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5374 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5375 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5376 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5378 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5380 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5381 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5382 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5383 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5385 image = method->klass->image;
5386 header = mono_method_get_header (method);
5388 MonoLoaderError *error;
5390 if ((error = mono_loader_get_last_error ())) {
5391 cfg->exception_type = error->exception_type;
5393 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
5394 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
5396 goto exception_exit;
5398 generic_container = mono_method_get_generic_container (method);
5399 sig = mono_method_signature (method);
5400 num_args = sig->hasthis + sig->param_count;
5401 ip = (unsigned char*)header->code;
5402 cfg->cil_start = ip;
5403 end = ip + header->code_size;
5404 mono_jit_stats.cil_code_size += header->code_size;
5405 init_locals = header->init_locals;
5407 seq_points = cfg->gen_seq_points && cfg->method == method;
5410 * Methods without init_locals set could cause asserts in various passes
5415 method_definition = method;
5416 while (method_definition->is_inflated) {
5417 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5418 method_definition = imethod->declaring;
5421 /* SkipVerification is not allowed if core-clr is enabled */
5422 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5424 dont_verify_stloc = TRUE;
5427 if (!dont_verify && mini_method_verify (cfg, method_definition))
5428 goto exception_exit;
5430 if (mono_debug_using_mono_debugger ())
5431 cfg->keep_cil_nops = TRUE;
5433 if (sig->is_inflated)
5434 generic_context = mono_method_get_context (method);
5435 else if (generic_container)
5436 generic_context = &generic_container->context;
5437 cfg->generic_context = generic_context;
5439 if (!cfg->generic_sharing_context)
5440 g_assert (!sig->has_type_parameters);
5442 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5443 g_assert (method->is_inflated);
5444 g_assert (mono_method_get_context (method)->method_inst);
5446 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5447 g_assert (sig->generic_param_count);
5449 if (cfg->method == method) {
5450 cfg->real_offset = 0;
5452 cfg->real_offset = inline_offset;
5455 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5456 cfg->cil_offset_to_bb_len = header->code_size;
5458 cfg->current_method = method;
5460 if (cfg->verbose_level > 2)
5461 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5463 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5465 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5466 for (n = 0; n < sig->param_count; ++n)
5467 param_types [n + sig->hasthis] = sig->params [n];
5468 cfg->arg_types = param_types;
5470 dont_inline = g_list_prepend (dont_inline, method);
5471 if (cfg->method == method) {
5473 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5474 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5477 NEW_BBLOCK (cfg, start_bblock);
5478 cfg->bb_entry = start_bblock;
5479 start_bblock->cil_code = NULL;
5480 start_bblock->cil_length = 0;
5483 NEW_BBLOCK (cfg, end_bblock);
5484 cfg->bb_exit = end_bblock;
5485 end_bblock->cil_code = NULL;
5486 end_bblock->cil_length = 0;
5487 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
5488 g_assert (cfg->num_bblocks == 2);
5490 arg_array = cfg->args;
5492 if (header->num_clauses) {
5493 cfg->spvars = g_hash_table_new (NULL, NULL);
5494 cfg->exvars = g_hash_table_new (NULL, NULL);
5496 /* handle exception clauses */
5497 for (i = 0; i < header->num_clauses; ++i) {
5498 MonoBasicBlock *try_bb;
5499 MonoExceptionClause *clause = &header->clauses [i];
5500 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5501 try_bb->real_offset = clause->try_offset;
5502 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5503 tblock->real_offset = clause->handler_offset;
5504 tblock->flags |= BB_EXCEPTION_HANDLER;
5506 link_bblock (cfg, try_bb, tblock);
5508 if (*(ip + clause->handler_offset) == CEE_POP)
5509 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5511 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5512 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5513 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5514 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5515 MONO_ADD_INS (tblock, ins);
5517 /* todo: is a fault block unsafe to optimize? */
5518 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5519 tblock->flags |= BB_EXCEPTION_UNSAFE;
5523 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5525 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5527 /* catch and filter blocks get the exception object on the stack */
5528 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5529 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5530 MonoInst *dummy_use;
5532 /* mostly like handle_stack_args (), but just sets the input args */
5533 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5534 tblock->in_scount = 1;
5535 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5536 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5539 * Add a dummy use for the exvar so its liveness info will be
5543 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5545 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5546 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5547 tblock->flags |= BB_EXCEPTION_HANDLER;
5548 tblock->real_offset = clause->data.filter_offset;
5549 tblock->in_scount = 1;
5550 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5551 /* The filter block shares the exvar with the handler block */
5552 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5553 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5554 MONO_ADD_INS (tblock, ins);
5558 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5559 clause->data.catch_class &&
5560 cfg->generic_sharing_context &&
5561 mono_class_check_context_used (clause->data.catch_class)) {
5563 * In shared generic code with catch
5564 * clauses containing type variables
5565 * the exception handling code has to
5566 * be able to get to the rgctx.
5567 * Therefore we have to make sure that
5568 * the vtable/mrgctx argument (for
5569 * static or generic methods) or the
5570 * "this" argument (for non-static
5571 * methods) are live.
5573 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5574 mini_method_get_context (method)->method_inst ||
5575 method->klass->valuetype) {
5576 mono_get_vtable_var (cfg);
5578 MonoInst *dummy_use;
5580 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5585 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5586 cfg->cbb = start_bblock;
5587 cfg->args = arg_array;
5588 mono_save_args (cfg, sig, inline_args);
5591 /* FIRST CODE BLOCK */
5592 NEW_BBLOCK (cfg, bblock);
5593 bblock->cil_code = ip;
5597 ADD_BBLOCK (cfg, bblock);
5599 if (cfg->method == method) {
5600 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5601 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5602 MONO_INST_NEW (cfg, ins, OP_BREAK);
5603 MONO_ADD_INS (bblock, ins);
5607 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5608 secman = mono_security_manager_get_methods ();
5610 security = (secman && mono_method_has_declsec (method));
5611 /* at this point having security doesn't mean we have any code to generate */
5612 if (security && (cfg->method == method)) {
5613 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5614 * And we do not want to enter the next section (with allocation) if we
5615 * have nothing to generate */
5616 security = mono_declsec_get_demands (method, &actions);
5619 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5620 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5622 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5623 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5624 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5626 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5627 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5631 mono_custom_attrs_free (custom);
5634 custom = mono_custom_attrs_from_class (wrapped->klass);
5635 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5639 mono_custom_attrs_free (custom);
5642 /* not a P/Invoke after all */
5647 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5648 /* we use a separate basic block for the initialization code */
5649 NEW_BBLOCK (cfg, init_localsbb);
5650 cfg->bb_init = init_localsbb;
5651 init_localsbb->real_offset = cfg->real_offset;
5652 start_bblock->next_bb = init_localsbb;
5653 init_localsbb->next_bb = bblock;
5654 link_bblock (cfg, start_bblock, init_localsbb);
5655 link_bblock (cfg, init_localsbb, bblock);
5657 cfg->cbb = init_localsbb;
5659 start_bblock->next_bb = bblock;
5660 link_bblock (cfg, start_bblock, bblock);
5663 /* at this point we know, if security is TRUE, that some code needs to be generated */
5664 if (security && (cfg->method == method)) {
5667 mono_jit_stats.cas_demand_generation++;
5669 if (actions.demand.blob) {
5670 /* Add code for SecurityAction.Demand */
5671 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5672 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5673 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5674 mono_emit_method_call (cfg, secman->demand, args, NULL);
5676 if (actions.noncasdemand.blob) {
5677 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5678 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5679 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5680 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5681 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5682 mono_emit_method_call (cfg, secman->demand, args, NULL);
5684 if (actions.demandchoice.blob) {
5685 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5686 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5687 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5688 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5689 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5693 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5695 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5698 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5699 /* check if this is native code, e.g. an icall or a p/invoke */
5700 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5701 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5703 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
5704 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
5706 /* if this ia a native call then it can only be JITted from platform code */
5707 if ((icall || pinvk) && method->klass && method->klass->image) {
5708 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
5709 MonoException *ex = icall ? mono_get_exception_security () :
5710 mono_get_exception_method_access ();
5711 emit_throw_exception (cfg, ex);
5718 if (header->code_size == 0)
5721 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5726 if (cfg->method == method)
5727 mono_debug_init_method (cfg, bblock, breakpoint_id);
5729 for (n = 0; n < header->num_locals; ++n) {
5730 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5735 /* We force the vtable variable here for all shared methods
5736 for the possibility that they might show up in a stack
5737 trace where their exact instantiation is needed. */
5738 if (cfg->generic_sharing_context && method == cfg->method) {
5739 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5740 mini_method_get_context (method)->method_inst ||
5741 method->klass->valuetype) {
5742 mono_get_vtable_var (cfg);
5744 /* FIXME: Is there a better way to do this?
5745 We need the variable live for the duration
5746 of the whole method. */
5747 cfg->args [0]->flags |= MONO_INST_INDIRECT;
5751 /* add a check for this != NULL to inlined methods */
5752 if (is_virtual_call) {
5755 NEW_ARGLOAD (cfg, arg_ins, 0);
5756 MONO_ADD_INS (cfg->cbb, arg_ins);
5757 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
5760 skip_dead_blocks = !dont_verify;
5761 if (skip_dead_blocks) {
5762 original_bb = bb = mono_basic_block_split (method, &error);
5763 if (!mono_error_ok (&error)) {
5764 mono_error_cleanup (&error);
5770 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5771 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5774 start_new_bblock = 0;
5777 if (cfg->method == method)
5778 cfg->real_offset = ip - header->code;
5780 cfg->real_offset = inline_offset;
5785 if (start_new_bblock) {
5786 bblock->cil_length = ip - bblock->cil_code;
5787 if (start_new_bblock == 2) {
5788 g_assert (ip == tblock->cil_code);
5790 GET_BBLOCK (cfg, tblock, ip);
5792 bblock->next_bb = tblock;
5795 start_new_bblock = 0;
5796 for (i = 0; i < bblock->in_scount; ++i) {
5797 if (cfg->verbose_level > 3)
5798 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5799 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5803 g_slist_free (class_inits);
5806 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5807 link_bblock (cfg, bblock, tblock);
5808 if (sp != stack_start) {
5809 handle_stack_args (cfg, stack_start, sp - stack_start);
5811 CHECK_UNVERIFIABLE (cfg);
5813 bblock->next_bb = tblock;
5816 for (i = 0; i < bblock->in_scount; ++i) {
5817 if (cfg->verbose_level > 3)
5818 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5819 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5822 g_slist_free (class_inits);
5827 if (skip_dead_blocks) {
5828 int ip_offset = ip - header->code;
5830 if (ip_offset == bb->end)
5834 int op_size = mono_opcode_size (ip, end);
5835 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
5837 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
5839 if (ip_offset + op_size == bb->end) {
5840 MONO_INST_NEW (cfg, ins, OP_NOP);
5841 MONO_ADD_INS (bblock, ins);
5842 start_new_bblock = 1;
5850 * Sequence points are points where the debugger can place a breakpoint.
5851 * Currently, we generate these automatically at points where the IL
5854 if (seq_points && sp == stack_start) {
5855 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
5856 MONO_ADD_INS (cfg->cbb, ins);
5859 bblock->real_offset = cfg->real_offset;
5861 if ((cfg->method == method) && cfg->coverage_info) {
5862 guint32 cil_offset = ip - header->code;
5863 cfg->coverage_info->data [cil_offset].cil_code = ip;
5865 /* TODO: Use an increment here */
5866 #if defined(TARGET_X86)
5867 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5868 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5870 MONO_ADD_INS (cfg->cbb, ins);
5872 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5873 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5877 if (cfg->verbose_level > 3)
5878 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5882 if (cfg->keep_cil_nops)
5883 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
5885 MONO_INST_NEW (cfg, ins, OP_NOP);
5887 MONO_ADD_INS (bblock, ins);
5890 if (should_insert_brekpoint (cfg->method))
5891 MONO_INST_NEW (cfg, ins, OP_BREAK);
5893 MONO_INST_NEW (cfg, ins, OP_NOP);
5895 MONO_ADD_INS (bblock, ins);
5901 CHECK_STACK_OVF (1);
5902 n = (*ip)-CEE_LDARG_0;
5904 EMIT_NEW_ARGLOAD (cfg, ins, n);
5912 CHECK_STACK_OVF (1);
5913 n = (*ip)-CEE_LDLOC_0;
5915 EMIT_NEW_LOCLOAD (cfg, ins, n);
5924 n = (*ip)-CEE_STLOC_0;
5927 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5929 emit_stloc_ir (cfg, sp, header, n);
5936 CHECK_STACK_OVF (1);
5939 EMIT_NEW_ARGLOAD (cfg, ins, n);
5945 CHECK_STACK_OVF (1);
5948 NEW_ARGLOADA (cfg, ins, n);
5949 MONO_ADD_INS (cfg->cbb, ins);
5959 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5961 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5966 CHECK_STACK_OVF (1);
5969 EMIT_NEW_LOCLOAD (cfg, ins, n);
5973 case CEE_LDLOCA_S: {
5974 unsigned char *tmp_ip;
5976 CHECK_STACK_OVF (1);
5977 CHECK_LOCAL (ip [1]);
5979 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
5985 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5994 CHECK_LOCAL (ip [1]);
5995 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5997 emit_stloc_ir (cfg, sp, header, ip [1]);
6002 CHECK_STACK_OVF (1);
6003 EMIT_NEW_PCONST (cfg, ins, NULL);
6004 ins->type = STACK_OBJ;
6009 CHECK_STACK_OVF (1);
6010 EMIT_NEW_ICONST (cfg, ins, -1);
6023 CHECK_STACK_OVF (1);
6024 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
6030 CHECK_STACK_OVF (1);
6032 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
6038 CHECK_STACK_OVF (1);
6039 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
6045 CHECK_STACK_OVF (1);
6046 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6047 ins->type = STACK_I8;
6048 ins->dreg = alloc_dreg (cfg, STACK_I8);
6050 ins->inst_l = (gint64)read64 (ip);
6051 MONO_ADD_INS (bblock, ins);
6057 gboolean use_aotconst = FALSE;
6059 #ifdef TARGET_POWERPC
6060 /* FIXME: Clean this up */
6061 if (cfg->compile_aot)
6062 use_aotconst = TRUE;
6065 /* FIXME: we should really allocate this only late in the compilation process */
6066 f = mono_domain_alloc (cfg->domain, sizeof (float));
6068 CHECK_STACK_OVF (1);
6074 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
6076 dreg = alloc_freg (cfg);
6077 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
6078 ins->type = STACK_R8;
6080 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6081 ins->type = STACK_R8;
6082 ins->dreg = alloc_dreg (cfg, STACK_R8);
6084 MONO_ADD_INS (bblock, ins);
6094 gboolean use_aotconst = FALSE;
6096 #ifdef TARGET_POWERPC
6097 /* FIXME: Clean this up */
6098 if (cfg->compile_aot)
6099 use_aotconst = TRUE;
6102 /* FIXME: we should really allocate this only late in the compilation process */
6103 d = mono_domain_alloc (cfg->domain, sizeof (double));
6105 CHECK_STACK_OVF (1);
6111 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
6113 dreg = alloc_freg (cfg);
6114 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
6115 ins->type = STACK_R8;
6117 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6118 ins->type = STACK_R8;
6119 ins->dreg = alloc_dreg (cfg, STACK_R8);
6121 MONO_ADD_INS (bblock, ins);
6130 MonoInst *temp, *store;
6132 CHECK_STACK_OVF (1);
6136 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
6137 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
6139 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6142 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6155 if (sp [0]->type == STACK_R8)
6156 /* we need to pop the value from the x86 FP stack */
6157 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
6166 if (stack_start != sp)
6168 token = read32 (ip + 1);
6169 /* FIXME: check the signature matches */
6170 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6172 if (!cmethod || mono_loader_get_last_error ())
6175 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
6176 GENERIC_SHARING_FAILURE (CEE_JMP);
6178 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
6179 CHECK_CFG_EXCEPTION;
6181 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6183 MonoMethodSignature *fsig = mono_method_signature (cmethod);
6186 /* Handle tail calls similarly to calls */
6187 n = fsig->param_count + fsig->hasthis;
6189 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
6190 call->method = cmethod;
6191 call->tail_call = TRUE;
6192 call->signature = mono_method_signature (cmethod);
6193 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
6194 call->inst.inst_p0 = cmethod;
6195 for (i = 0; i < n; ++i)
6196 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
6198 mono_arch_emit_call (cfg, call);
6199 MONO_ADD_INS (bblock, (MonoInst*)call);
6202 for (i = 0; i < num_args; ++i)
6203 /* Prevent arguments from being optimized away */
6204 arg_array [i]->flags |= MONO_INST_VOLATILE;
6206 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6207 ins = (MonoInst*)call;
6208 ins->inst_p0 = cmethod;
6209 MONO_ADD_INS (bblock, ins);
6213 start_new_bblock = 1;
6218 case CEE_CALLVIRT: {
6219 MonoInst *addr = NULL;
6220 MonoMethodSignature *fsig = NULL;
6222 int virtual = *ip == CEE_CALLVIRT;
6223 int calli = *ip == CEE_CALLI;
6224 gboolean pass_imt_from_rgctx = FALSE;
6225 MonoInst *imt_arg = NULL;
6226 gboolean pass_vtable = FALSE;
6227 gboolean pass_mrgctx = FALSE;
6228 MonoInst *vtable_arg = NULL;
6229 gboolean check_this = FALSE;
6230 gboolean supported_tail_call = FALSE;
6233 token = read32 (ip + 1);
6240 if (method->wrapper_type != MONO_WRAPPER_NONE)
6241 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6243 fsig = mono_metadata_parse_signature (image, token);
6245 n = fsig->param_count + fsig->hasthis;
6247 if (method->dynamic && fsig->pinvoke) {
6251 * This is a call through a function pointer using a pinvoke
6252 * signature. Have to create a wrapper and call that instead.
6253 * FIXME: This is very slow, need to create a wrapper at JIT time
6254 * instead based on the signature.
6256 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
6257 EMIT_NEW_PCONST (cfg, args [1], fsig);
6259 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
6262 MonoMethod *cil_method;
6264 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6265 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6266 cil_method = cmethod;
6267 } else if (constrained_call) {
6268 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6270 * This is needed since get_method_constrained can't find
6271 * the method in klass representing a type var.
6272 * The type var is guaranteed to be a reference type in this
6275 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6276 cil_method = cmethod;
6277 g_assert (!cmethod->klass->valuetype);
6279 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6282 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6283 cil_method = cmethod;
6286 if (!cmethod || mono_loader_get_last_error ())
6288 if (!dont_verify && !cfg->skip_visibility) {
6289 MonoMethod *target_method = cil_method;
6290 if (method->is_inflated) {
6291 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6293 if (!mono_method_can_access_method (method_definition, target_method) &&
6294 !mono_method_can_access_method (method, cil_method))
6295 METHOD_ACCESS_FAILURE;
6298 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6299 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6301 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6302 /* MS.NET seems to silently convert this to a callvirt */
6307 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
6308 * converts to a callvirt.
6310 * tests/bug-515884.il is an example of this behavior
6312 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
6313 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
6314 if (!virtual && cmethod->klass->marshalbyref && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
6318 if (!cmethod->klass->inited)
6319 if (!mono_class_init (cmethod->klass))
6322 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6323 mini_class_is_system_array (cmethod->klass)) {
6324 array_rank = cmethod->klass->rank;
6325 fsig = mono_method_signature (cmethod);
6327 fsig = mono_method_signature (cmethod);
6332 if (fsig->pinvoke) {
6333 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6334 check_for_pending_exc, FALSE);
6335 fsig = mono_method_signature (wrapper);
6336 } else if (constrained_call) {
6337 fsig = mono_method_signature (cmethod);
6339 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6343 mono_save_token_info (cfg, image, token, cil_method);
6345 n = fsig->param_count + fsig->hasthis;
6347 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6348 if (check_linkdemand (cfg, method, cmethod))
6350 CHECK_CFG_EXCEPTION;
6353 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6354 g_assert_not_reached ();
6357 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6360 if (!cfg->generic_sharing_context && cmethod)
6361 g_assert (!mono_method_check_context_used (cmethod));
6365 //g_assert (!virtual || fsig->hasthis);
6369 if (constrained_call) {
6371 * We have the `constrained.' prefix opcode.
6373 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6375 * The type parameter is instantiated as a valuetype,
6376 * but that type doesn't override the method we're
6377 * calling, so we need to box `this'.
6379 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
6380 ins->klass = constrained_call;
6381 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call));
6382 CHECK_CFG_EXCEPTION;
6383 } else if (!constrained_call->valuetype) {
6384 int dreg = alloc_preg (cfg);
6387 * The type parameter is instantiated as a reference
6388 * type. We have a managed pointer on the stack, so
6389 * we need to dereference it here.
6391 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6392 ins->type = STACK_OBJ;
6394 } else if (cmethod->klass->valuetype)
6396 constrained_call = NULL;
6399 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6403 * If the callee is a shared method, then its static cctor
6404 * might not get called after the call was patched.
6406 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6407 emit_generic_class_init (cfg, cmethod->klass);
6408 CHECK_TYPELOAD (cmethod->klass);
6411 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6412 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6413 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6414 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6415 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6418 * Pass vtable iff target method might
6419 * be shared, which means that sharing
6420 * is enabled for its class and its
6421 * context is sharable (and it's not a
6424 if (sharing_enabled && context_sharable &&
6425 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6429 if (cmethod && mini_method_get_context (cmethod) &&
6430 mini_method_get_context (cmethod)->method_inst) {
6431 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6432 MonoGenericContext *context = mini_method_get_context (cmethod);
6433 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6435 g_assert (!pass_vtable);
6437 if (sharing_enabled && context_sharable)
6441 if (cfg->generic_sharing_context && cmethod) {
6442 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6444 context_used = mono_method_check_context_used (cmethod);
6446 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6447 /* Generic method interface
6448 calls are resolved via a
6449 helper function and don't
6451 if (!cmethod_context || !cmethod_context->method_inst)
6452 pass_imt_from_rgctx = TRUE;
6456 * If a shared method calls another
6457 * shared method then the caller must
6458 * have a generic sharing context
6459 * because the magic trampoline
6460 * requires it. FIXME: We shouldn't
6461 * have to force the vtable/mrgctx
6462 * variable here. Instead there
6463 * should be a flag in the cfg to
6464 * request a generic sharing context.
6467 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6468 mono_get_vtable_var (cfg);
6473 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6475 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6477 CHECK_TYPELOAD (cmethod->klass);
6478 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6483 g_assert (!vtable_arg);
6485 if (!cfg->compile_aot) {
6487 * emit_get_rgctx_method () calls mono_class_vtable () so check
6488 * for type load errors before.
6490 mono_class_setup_vtable (cmethod->klass);
6491 CHECK_TYPELOAD (cmethod->klass);
6494 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6496 /* !marshalbyref is needed to properly handle generic methods + remoting */
6497 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6498 MONO_METHOD_IS_FINAL (cmethod)) &&
6499 !cmethod->klass->marshalbyref) {
6506 if (pass_imt_from_rgctx) {
6507 g_assert (!pass_vtable);
6510 imt_arg = emit_get_rgctx_method (cfg, context_used,
6511 cmethod, MONO_RGCTX_INFO_METHOD);
6515 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
6517 /* Calling virtual generic methods */
6518 if (cmethod && virtual &&
6519 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6520 !(MONO_METHOD_IS_FINAL (cmethod) &&
6521 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6522 mono_method_signature (cmethod)->generic_param_count) {
6523 MonoInst *this_temp, *this_arg_temp, *store;
6524 MonoInst *iargs [4];
6526 g_assert (mono_method_signature (cmethod)->is_inflated);
6528 /* Prevent inlining of methods that contain indirect calls */
6531 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
6532 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt) {
6533 g_assert (!imt_arg);
6535 g_assert (cmethod->is_inflated);
6536 imt_arg = emit_get_rgctx_method (cfg, context_used,
6537 cmethod, MONO_RGCTX_INFO_METHOD);
6538 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6542 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6543 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6544 MONO_ADD_INS (bblock, store);
6546 /* FIXME: This should be a managed pointer */
6547 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6549 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6550 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6551 cmethod, MONO_RGCTX_INFO_METHOD);
6552 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6553 addr = mono_emit_jit_icall (cfg,
6554 mono_helper_compile_generic_method, iargs);
6556 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6558 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6561 if (!MONO_TYPE_IS_VOID (fsig->ret))
6562 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6564 CHECK_CFG_EXCEPTION;
6571 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6572 supported_tail_call = cmethod && MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
6574 supported_tail_call = cmethod && mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6578 /* FIXME: runtime generic context pointer for jumps? */
6579 /* FIXME: handle this for generic sharing eventually */
6580 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) && supported_tail_call) {
6583 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6586 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6587 /* Handle tail calls similarly to calls */
6588 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE);
6590 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6591 call->tail_call = TRUE;
6592 call->method = cmethod;
6593 call->signature = mono_method_signature (cmethod);
6596 * We implement tail calls by storing the actual arguments into the
6597 * argument variables, then emitting a CEE_JMP.
6599 for (i = 0; i < n; ++i) {
6600 /* Prevent argument from being register allocated */
6601 arg_array [i]->flags |= MONO_INST_VOLATILE;
6602 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6606 ins = (MonoInst*)call;
6607 ins->inst_p0 = cmethod;
6608 ins->inst_p1 = arg_array [0];
6609 MONO_ADD_INS (bblock, ins);
6610 link_bblock (cfg, bblock, end_bblock);
6611 start_new_bblock = 1;
6613 CHECK_CFG_EXCEPTION;
6615 /* skip CEE_RET as well */
6621 /* Conversion to a JIT intrinsic */
6622 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6624 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6625 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6630 CHECK_CFG_EXCEPTION;
6638 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6639 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6640 mono_method_check_inlining (cfg, cmethod) &&
6641 !g_list_find (dont_inline, cmethod)) {
6643 gboolean allways = FALSE;
6645 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6646 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6647 /* Prevent inlining of methods that call wrappers */
6649 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6653 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6655 cfg->real_offset += 5;
6658 if (!MONO_TYPE_IS_VOID (fsig->ret))
6659 /* *sp is already set by inline_method */
6662 inline_costs += costs;
6668 inline_costs += 10 * num_calls++;
6670 /* Tail recursion elimination */
6671 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6672 gboolean has_vtargs = FALSE;
6675 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6678 /* keep it simple */
6679 for (i = fsig->param_count - 1; i >= 0; i--) {
6680 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6685 for (i = 0; i < n; ++i)
6686 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6687 MONO_INST_NEW (cfg, ins, OP_BR);
6688 MONO_ADD_INS (bblock, ins);
6689 tblock = start_bblock->out_bb [0];
6690 link_bblock (cfg, bblock, tblock);
6691 ins->inst_target_bb = tblock;
6692 start_new_bblock = 1;
6694 /* skip the CEE_RET, too */
6695 if (ip_in_bb (cfg, bblock, ip + 5))
6705 /* Generic sharing */
6706 /* FIXME: only do this for generic methods if
6707 they are not shared! */
6708 if (context_used && !imt_arg && !array_rank &&
6709 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6710 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6711 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6712 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6715 g_assert (cfg->generic_sharing_context && cmethod);
6719 * We are compiling a call to a
6720 * generic method from shared code,
6721 * which means that we have to look up
6722 * the method in the rgctx and do an
6725 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6728 /* Indirect calls */
6730 g_assert (!imt_arg);
6732 if (*ip == CEE_CALL)
6733 g_assert (context_used);
6734 else if (*ip == CEE_CALLI)
6735 g_assert (!vtable_arg);
6737 /* FIXME: what the hell is this??? */
6738 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6739 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6741 /* Prevent inlining of methods with indirect calls */
6746 int rgctx_reg = mono_alloc_preg (cfg);
6748 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6749 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6750 call = (MonoCallInst*)ins;
6751 set_rgctx_arg (cfg, call, rgctx_reg, vtable_arg);
6753 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6755 * Instead of emitting an indirect call, emit a direct call
6756 * with the contents of the aotconst as the patch info.
6758 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
6760 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6761 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
6764 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6767 if (!MONO_TYPE_IS_VOID (fsig->ret))
6768 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6770 CHECK_CFG_EXCEPTION;
6781 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6782 if (sp [fsig->param_count]->type == STACK_OBJ) {
6783 MonoInst *iargs [2];
6786 iargs [1] = sp [fsig->param_count];
6788 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6791 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6792 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6793 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6794 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6796 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6799 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6800 if (!cmethod->klass->element_class->valuetype && !readonly)
6801 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6802 CHECK_TYPELOAD (cmethod->klass);
6805 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6808 g_assert_not_reached ();
6811 CHECK_CFG_EXCEPTION;
6818 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6820 if (!MONO_TYPE_IS_VOID (fsig->ret))
6821 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6823 CHECK_CFG_EXCEPTION;
6833 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6835 } else if (imt_arg) {
6836 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6838 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6841 if (!MONO_TYPE_IS_VOID (fsig->ret))
6842 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6844 CHECK_CFG_EXCEPTION;
6851 if (cfg->method != method) {
6852 /* return from inlined method */
6854 * If in_count == 0, that means the ret is unreachable due to
6855 * being preceeded by a throw. In that case, inline_method () will
6856 * handle setting the return value
6857 * (test case: test_0_inline_throw ()).
6859 if (return_var && cfg->cbb->in_count) {
6863 //g_assert (returnvar != -1);
6864 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6865 cfg->ret_var_set = TRUE;
6869 MonoType *ret_type = mono_method_signature (method)->ret;
6873 * Place a seq point here too even through the IL stack is not
6874 * empty, so a step over on
6877 * will work correctly.
6879 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
6880 MONO_ADD_INS (cfg->cbb, ins);
6883 g_assert (!return_var);
6886 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6889 if (!cfg->vret_addr) {
6892 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6894 EMIT_NEW_RETLOADA (cfg, ret_addr);
6896 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6897 ins->klass = mono_class_from_mono_type (ret_type);
6900 #ifdef MONO_ARCH_SOFT_FLOAT
6901 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6902 MonoInst *iargs [1];
6906 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6907 mono_arch_emit_setret (cfg, method, conv);
6909 mono_arch_emit_setret (cfg, method, *sp);
6912 mono_arch_emit_setret (cfg, method, *sp);
6917 if (sp != stack_start)
6919 MONO_INST_NEW (cfg, ins, OP_BR);
6921 ins->inst_target_bb = end_bblock;
6922 MONO_ADD_INS (bblock, ins);
6923 link_bblock (cfg, bblock, end_bblock);
6924 start_new_bblock = 1;
6928 MONO_INST_NEW (cfg, ins, OP_BR);
6930 target = ip + 1 + (signed char)(*ip);
6932 GET_BBLOCK (cfg, tblock, target);
6933 link_bblock (cfg, bblock, tblock);
6934 ins->inst_target_bb = tblock;
6935 if (sp != stack_start) {
6936 handle_stack_args (cfg, stack_start, sp - stack_start);
6938 CHECK_UNVERIFIABLE (cfg);
6940 MONO_ADD_INS (bblock, ins);
6941 start_new_bblock = 1;
6942 inline_costs += BRANCH_COST;
6956 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6958 target = ip + 1 + *(signed char*)ip;
6964 inline_costs += BRANCH_COST;
6968 MONO_INST_NEW (cfg, ins, OP_BR);
6971 target = ip + 4 + (gint32)read32(ip);
6973 GET_BBLOCK (cfg, tblock, target);
6974 link_bblock (cfg, bblock, tblock);
6975 ins->inst_target_bb = tblock;
6976 if (sp != stack_start) {
6977 handle_stack_args (cfg, stack_start, sp - stack_start);
6979 CHECK_UNVERIFIABLE (cfg);
6982 MONO_ADD_INS (bblock, ins);
6984 start_new_bblock = 1;
6985 inline_costs += BRANCH_COST;
6992 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6993 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6994 guint32 opsize = is_short ? 1 : 4;
6996 CHECK_OPSIZE (opsize);
6998 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
7001 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
7006 GET_BBLOCK (cfg, tblock, target);
7007 link_bblock (cfg, bblock, tblock);
7008 GET_BBLOCK (cfg, tblock, ip);
7009 link_bblock (cfg, bblock, tblock);
7011 if (sp != stack_start) {
7012 handle_stack_args (cfg, stack_start, sp - stack_start);
7013 CHECK_UNVERIFIABLE (cfg);
7016 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
7017 cmp->sreg1 = sp [0]->dreg;
7018 type_from_op (cmp, sp [0], NULL);
7021 #if SIZEOF_REGISTER == 4
7022 if (cmp->opcode == OP_LCOMPARE_IMM) {
7023 /* Convert it to OP_LCOMPARE */
7024 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7025 ins->type = STACK_I8;
7026 ins->dreg = alloc_dreg (cfg, STACK_I8);
7028 MONO_ADD_INS (bblock, ins);
7029 cmp->opcode = OP_LCOMPARE;
7030 cmp->sreg2 = ins->dreg;
7033 MONO_ADD_INS (bblock, cmp);
7035 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
7036 type_from_op (ins, sp [0], NULL);
7037 MONO_ADD_INS (bblock, ins);
7038 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
7039 GET_BBLOCK (cfg, tblock, target);
7040 ins->inst_true_bb = tblock;
7041 GET_BBLOCK (cfg, tblock, ip);
7042 ins->inst_false_bb = tblock;
7043 start_new_bblock = 2;
7046 inline_costs += BRANCH_COST;
7061 MONO_INST_NEW (cfg, ins, *ip);
7063 target = ip + 4 + (gint32)read32(ip);
7069 inline_costs += BRANCH_COST;
7073 MonoBasicBlock **targets;
7074 MonoBasicBlock *default_bblock;
7075 MonoJumpInfoBBTable *table;
7076 int offset_reg = alloc_preg (cfg);
7077 int target_reg = alloc_preg (cfg);
7078 int table_reg = alloc_preg (cfg);
7079 int sum_reg = alloc_preg (cfg);
7080 gboolean use_op_switch;
7084 n = read32 (ip + 1);
7087 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
7091 CHECK_OPSIZE (n * sizeof (guint32));
7092 target = ip + n * sizeof (guint32);
7094 GET_BBLOCK (cfg, default_bblock, target);
7095 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7097 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
7098 for (i = 0; i < n; ++i) {
7099 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
7100 targets [i] = tblock;
7101 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
7105 if (sp != stack_start) {
7107 * Link the current bb with the targets as well, so handle_stack_args
7108 * will set their in_stack correctly.
7110 link_bblock (cfg, bblock, default_bblock);
7111 for (i = 0; i < n; ++i)
7112 link_bblock (cfg, bblock, targets [i]);
7114 handle_stack_args (cfg, stack_start, sp - stack_start);
7116 CHECK_UNVERIFIABLE (cfg);
7119 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
7120 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
7123 for (i = 0; i < n; ++i)
7124 link_bblock (cfg, bblock, targets [i]);
7126 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
7127 table->table = targets;
7128 table->table_size = n;
7130 use_op_switch = FALSE;
7132 /* ARM implements SWITCH statements differently */
7133 /* FIXME: Make it use the generic implementation */
7134 if (!cfg->compile_aot)
7135 use_op_switch = TRUE;
7138 if (COMPILE_LLVM (cfg))
7139 use_op_switch = TRUE;
7141 cfg->cbb->has_jump_table = 1;
7143 if (use_op_switch) {
7144 MONO_INST_NEW (cfg, ins, OP_SWITCH);
7145 ins->sreg1 = src1->dreg;
7146 ins->inst_p0 = table;
7147 ins->inst_many_bb = targets;
7148 ins->klass = GUINT_TO_POINTER (n);
7149 MONO_ADD_INS (cfg->cbb, ins);
7151 if (sizeof (gpointer) == 8)
7152 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
7154 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
7156 #if SIZEOF_REGISTER == 8
7157 /* The upper word might not be zero, and we add it to a 64 bit address later */
7158 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
7161 if (cfg->compile_aot) {
7162 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
7164 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
7165 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
7166 ins->inst_p0 = table;
7167 ins->dreg = table_reg;
7168 MONO_ADD_INS (cfg->cbb, ins);
7171 /* FIXME: Use load_memindex */
7172 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
7173 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
7174 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
7176 start_new_bblock = 1;
7177 inline_costs += (BRANCH_COST * 2);
7197 dreg = alloc_freg (cfg);
7200 dreg = alloc_lreg (cfg);
7203 dreg = alloc_preg (cfg);
7206 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
7207 ins->type = ldind_type [*ip - CEE_LDIND_I1];
7208 ins->flags |= ins_flag;
7210 MONO_ADD_INS (bblock, ins);
7225 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
7226 ins->flags |= ins_flag;
7228 MONO_ADD_INS (bblock, ins);
7230 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
7231 emit_write_barrier (cfg, sp [0], sp [1], -1);
7240 MONO_INST_NEW (cfg, ins, (*ip));
7242 ins->sreg1 = sp [0]->dreg;
7243 ins->sreg2 = sp [1]->dreg;
7244 type_from_op (ins, sp [0], sp [1]);
7246 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7248 /* Use the immediate opcodes if possible */
7249 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
7250 int imm_opcode = mono_op_to_op_imm (ins->opcode);
7251 if (imm_opcode != -1) {
7252 ins->opcode = imm_opcode;
7253 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7256 sp [1]->opcode = OP_NOP;
7260 MONO_ADD_INS ((cfg)->cbb, (ins));
7262 *sp++ = mono_decompose_opcode (cfg, ins);
7279 MONO_INST_NEW (cfg, ins, (*ip));
7281 ins->sreg1 = sp [0]->dreg;
7282 ins->sreg2 = sp [1]->dreg;
7283 type_from_op (ins, sp [0], sp [1]);
7285 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7286 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7288 /* FIXME: Pass opcode to is_inst_imm */
7290 /* Use the immediate opcodes if possible */
7291 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7294 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7295 if (imm_opcode != -1) {
7296 ins->opcode = imm_opcode;
7297 if (sp [1]->opcode == OP_I8CONST) {
7298 #if SIZEOF_REGISTER == 8
7299 ins->inst_imm = sp [1]->inst_l;
7301 ins->inst_ls_word = sp [1]->inst_ls_word;
7302 ins->inst_ms_word = sp [1]->inst_ms_word;
7306 ins->inst_imm = (gssize)(sp [1]->inst_c0);
7309 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7310 if (sp [1]->next == NULL)
7311 sp [1]->opcode = OP_NOP;
7314 MONO_ADD_INS ((cfg)->cbb, (ins));
7316 *sp++ = mono_decompose_opcode (cfg, ins);
7329 case CEE_CONV_OVF_I8:
7330 case CEE_CONV_OVF_U8:
7334 /* Special case this earlier so we have long constants in the IR */
7335 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7336 int data = sp [-1]->inst_c0;
7337 sp [-1]->opcode = OP_I8CONST;
7338 sp [-1]->type = STACK_I8;
7339 #if SIZEOF_REGISTER == 8
7340 if ((*ip) == CEE_CONV_U8)
7341 sp [-1]->inst_c0 = (guint32)data;
7343 sp [-1]->inst_c0 = data;
7345 sp [-1]->inst_ls_word = data;
7346 if ((*ip) == CEE_CONV_U8)
7347 sp [-1]->inst_ms_word = 0;
7349 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7351 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7358 case CEE_CONV_OVF_I4:
7359 case CEE_CONV_OVF_I1:
7360 case CEE_CONV_OVF_I2:
7361 case CEE_CONV_OVF_I:
7362 case CEE_CONV_OVF_U:
7365 if (sp [-1]->type == STACK_R8) {
7366 ADD_UNOP (CEE_CONV_OVF_I8);
7373 case CEE_CONV_OVF_U1:
7374 case CEE_CONV_OVF_U2:
7375 case CEE_CONV_OVF_U4:
7378 if (sp [-1]->type == STACK_R8) {
7379 ADD_UNOP (CEE_CONV_OVF_U8);
7386 case CEE_CONV_OVF_I1_UN:
7387 case CEE_CONV_OVF_I2_UN:
7388 case CEE_CONV_OVF_I4_UN:
7389 case CEE_CONV_OVF_I8_UN:
7390 case CEE_CONV_OVF_U1_UN:
7391 case CEE_CONV_OVF_U2_UN:
7392 case CEE_CONV_OVF_U4_UN:
7393 case CEE_CONV_OVF_U8_UN:
7394 case CEE_CONV_OVF_I_UN:
7395 case CEE_CONV_OVF_U_UN:
7402 CHECK_CFG_EXCEPTION;
7406 case CEE_ADD_OVF_UN:
7408 case CEE_MUL_OVF_UN:
7410 case CEE_SUB_OVF_UN:
7418 token = read32 (ip + 1);
7419 klass = mini_get_class (method, token, generic_context);
7420 CHECK_TYPELOAD (klass);
7422 if (generic_class_is_reference_type (cfg, klass)) {
7423 MonoInst *store, *load;
7424 int dreg = alloc_preg (cfg);
7426 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7427 load->flags |= ins_flag;
7428 MONO_ADD_INS (cfg->cbb, load);
7430 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7431 store->flags |= ins_flag;
7432 MONO_ADD_INS (cfg->cbb, store);
7434 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
7435 emit_write_barrier (cfg, sp [0], sp [1], -1);
7437 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7449 token = read32 (ip + 1);
7450 klass = mini_get_class (method, token, generic_context);
7451 CHECK_TYPELOAD (klass);
7453 /* Optimize the common ldobj+stloc combination */
7463 loc_index = ip [5] - CEE_STLOC_0;
7470 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7471 CHECK_LOCAL (loc_index);
7473 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7474 ins->dreg = cfg->locals [loc_index]->dreg;
7480 /* Optimize the ldobj+stobj combination */
7481 /* The reference case ends up being a load+store anyway */
7482 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7487 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7494 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7503 CHECK_STACK_OVF (1);
7505 n = read32 (ip + 1);
7507 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7508 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7509 ins->type = STACK_OBJ;
7512 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7513 MonoInst *iargs [1];
7515 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7516 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7518 if (cfg->opt & MONO_OPT_SHARED) {
7519 MonoInst *iargs [3];
7521 if (cfg->compile_aot) {
7522 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7524 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7525 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7526 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7527 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7528 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7530 if (bblock->out_of_line) {
7531 MonoInst *iargs [2];
7533 if (image == mono_defaults.corlib) {
7535 * Avoid relocations in AOT and save some space by using a
7536 * version of helper_ldstr specialized to mscorlib.
7538 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7539 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7541 /* Avoid creating the string object */
7542 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7543 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7544 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7548 if (cfg->compile_aot) {
7549 NEW_LDSTRCONST (cfg, ins, image, n);
7551 MONO_ADD_INS (bblock, ins);
7554 NEW_PCONST (cfg, ins, NULL);
7555 ins->type = STACK_OBJ;
7556 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7558 MONO_ADD_INS (bblock, ins);
7567 MonoInst *iargs [2];
7568 MonoMethodSignature *fsig;
7571 MonoInst *vtable_arg = NULL;
7574 token = read32 (ip + 1);
7575 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7576 if (!cmethod || mono_loader_get_last_error ())
7578 fsig = mono_method_get_signature (cmethod, image, token);
7582 mono_save_token_info (cfg, image, token, cmethod);
7584 if (!mono_class_init (cmethod->klass))
7587 if (cfg->generic_sharing_context)
7588 context_used = mono_method_check_context_used (cmethod);
7590 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7591 if (check_linkdemand (cfg, method, cmethod))
7593 CHECK_CFG_EXCEPTION;
7594 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7595 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7598 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
7599 emit_generic_class_init (cfg, cmethod->klass);
7600 CHECK_TYPELOAD (cmethod->klass);
7603 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7604 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7605 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7606 mono_class_vtable (cfg->domain, cmethod->klass);
7607 CHECK_TYPELOAD (cmethod->klass);
7609 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7610 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7613 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7614 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7616 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7618 CHECK_TYPELOAD (cmethod->klass);
7619 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7624 n = fsig->param_count;
7628 * Generate smaller code for the common newobj <exception> instruction in
7629 * argument checking code.
7631 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7632 is_exception_class (cmethod->klass) && n <= 2 &&
7633 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7634 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7635 MonoInst *iargs [3];
7637 g_assert (!vtable_arg);
7641 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7644 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7648 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7653 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7656 g_assert_not_reached ();
7664 /* move the args to allow room for 'this' in the first position */
7670 /* check_call_signature () requires sp[0] to be set */
7671 this_ins.type = STACK_OBJ;
7673 if (check_call_signature (cfg, fsig, sp))
7678 if (mini_class_is_system_array (cmethod->klass)) {
7679 g_assert (!vtable_arg);
7681 *sp = emit_get_rgctx_method (cfg, context_used,
7682 cmethod, MONO_RGCTX_INFO_METHOD);
7684 /* Avoid varargs in the common case */
7685 if (fsig->param_count == 1)
7686 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7687 else if (fsig->param_count == 2)
7688 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7689 else if (fsig->param_count == 3)
7690 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
7692 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7693 } else if (cmethod->string_ctor) {
7694 g_assert (!context_used);
7695 g_assert (!vtable_arg);
7696 /* we simply pass a null pointer */
7697 EMIT_NEW_PCONST (cfg, *sp, NULL);
7698 /* now call the string ctor */
7699 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7701 MonoInst* callvirt_this_arg = NULL;
7703 if (cmethod->klass->valuetype) {
7704 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7705 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7706 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7711 * The code generated by mini_emit_virtual_call () expects
7712 * iargs [0] to be a boxed instance, but luckily the vcall
7713 * will be transformed into a normal call there.
7715 } else if (context_used) {
7716 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
7719 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7721 CHECK_TYPELOAD (cmethod->klass);
7724 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7725 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7726 * As a workaround, we call class cctors before allocating objects.
7728 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7729 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7730 if (cfg->verbose_level > 2)
7731 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7732 class_inits = g_slist_prepend (class_inits, vtable);
7735 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
7738 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
7741 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7743 /* Now call the actual ctor */
7744 /* Avoid virtual calls to ctors if possible */
7745 if (cmethod->klass->marshalbyref)
7746 callvirt_this_arg = sp [0];
7749 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7750 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7751 type_to_eval_stack_type ((cfg), fsig->ret, ins);
7756 CHECK_CFG_EXCEPTION;
7761 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7762 mono_method_check_inlining (cfg, cmethod) &&
7763 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7764 !g_list_find (dont_inline, cmethod)) {
7767 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7768 cfg->real_offset += 5;
7771 inline_costs += costs - 5;
7774 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7776 } else if (context_used &&
7777 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7778 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7779 MonoInst *cmethod_addr;
7781 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7782 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7784 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7787 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7788 callvirt_this_arg, NULL, vtable_arg);
7792 if (alloc == NULL) {
7794 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7795 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7809 token = read32 (ip + 1);
7810 klass = mini_get_class (method, token, generic_context);
7811 CHECK_TYPELOAD (klass);
7812 if (sp [0]->type != STACK_OBJ)
7815 if (cfg->generic_sharing_context)
7816 context_used = mono_class_check_context_used (klass);
7818 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7825 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7827 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7831 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7832 MonoMethod *mono_castclass;
7833 MonoInst *iargs [1];
7836 mono_castclass = mono_marshal_get_castclass (klass);
7839 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7840 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7841 g_assert (costs > 0);
7844 cfg->real_offset += 5;
7849 inline_costs += costs;
7852 ins = handle_castclass (cfg, klass, *sp, context_used);
7853 CHECK_CFG_EXCEPTION;
7863 token = read32 (ip + 1);
7864 klass = mini_get_class (method, token, generic_context);
7865 CHECK_TYPELOAD (klass);
7866 if (sp [0]->type != STACK_OBJ)
7869 if (cfg->generic_sharing_context)
7870 context_used = mono_class_check_context_used (klass);
7872 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7879 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7881 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7885 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7886 MonoMethod *mono_isinst;
7887 MonoInst *iargs [1];
7890 mono_isinst = mono_marshal_get_isinst (klass);
7893 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7894 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7895 g_assert (costs > 0);
7898 cfg->real_offset += 5;
7903 inline_costs += costs;
7906 ins = handle_isinst (cfg, klass, *sp, context_used);
7907 CHECK_CFG_EXCEPTION;
7914 case CEE_UNBOX_ANY: {
7918 token = read32 (ip + 1);
7919 klass = mini_get_class (method, token, generic_context);
7920 CHECK_TYPELOAD (klass);
7922 mono_save_token_info (cfg, image, token, klass);
7924 if (cfg->generic_sharing_context)
7925 context_used = mono_class_check_context_used (klass);
7927 if (generic_class_is_reference_type (cfg, klass)) {
7928 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
7929 if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7930 MonoMethod *mono_castclass;
7931 MonoInst *iargs [1];
7934 mono_castclass = mono_marshal_get_castclass (klass);
7937 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7938 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7940 g_assert (costs > 0);
7943 cfg->real_offset += 5;
7947 inline_costs += costs;
7949 ins = handle_castclass (cfg, klass, *sp, context_used);
7950 CHECK_CFG_EXCEPTION;
7958 if (mono_class_is_nullable (klass)) {
7959 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
7966 ins = handle_unbox (cfg, klass, sp, context_used);
7972 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7985 token = read32 (ip + 1);
7986 klass = mini_get_class (method, token, generic_context);
7987 CHECK_TYPELOAD (klass);
7989 mono_save_token_info (cfg, image, token, klass);
7991 if (cfg->generic_sharing_context)
7992 context_used = mono_class_check_context_used (klass);
7994 if (generic_class_is_reference_type (cfg, klass)) {
8000 if (klass == mono_defaults.void_class)
8002 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
8004 /* frequent check in generic code: box (struct), brtrue */
8006 // FIXME: LLVM can't handle the inconsistent bb linking
8007 if (!mono_class_is_nullable (klass) &&
8008 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
8009 (ip [5] == CEE_BRTRUE ||
8010 ip [5] == CEE_BRTRUE_S ||
8011 ip [5] == CEE_BRFALSE ||
8012 ip [5] == CEE_BRFALSE_S)) {
8013 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
8015 MonoBasicBlock *true_bb, *false_bb;
8019 if (cfg->verbose_level > 3) {
8020 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8021 printf ("<box+brtrue opt>\n");
8029 target = ip + 1 + (signed char)(*ip);
8036 target = ip + 4 + (gint)(read32 (ip));
8040 g_assert_not_reached ();
8044 * We need to link both bblocks, since it is needed for handling stack
8045 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
8046 * Branching to only one of them would lead to inconsistencies, so
8047 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
8049 GET_BBLOCK (cfg, true_bb, target);
8050 GET_BBLOCK (cfg, false_bb, ip);
8052 mono_link_bblock (cfg, cfg->cbb, true_bb);
8053 mono_link_bblock (cfg, cfg->cbb, false_bb);
8055 if (sp != stack_start) {
8056 handle_stack_args (cfg, stack_start, sp - stack_start);
8058 CHECK_UNVERIFIABLE (cfg);
8061 if (COMPILE_LLVM (cfg)) {
8062 dreg = alloc_ireg (cfg);
8063 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
8064 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
8066 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
8068 /* The JIT can't eliminate the iconst+compare */
8069 MONO_INST_NEW (cfg, ins, OP_BR);
8070 ins->inst_target_bb = is_true ? true_bb : false_bb;
8071 MONO_ADD_INS (cfg->cbb, ins);
8074 start_new_bblock = 1;
8078 *sp++ = handle_box (cfg, val, klass, context_used);
8080 CHECK_CFG_EXCEPTION;
8089 token = read32 (ip + 1);
8090 klass = mini_get_class (method, token, generic_context);
8091 CHECK_TYPELOAD (klass);
8093 mono_save_token_info (cfg, image, token, klass);
8095 if (cfg->generic_sharing_context)
8096 context_used = mono_class_check_context_used (klass);
8098 if (mono_class_is_nullable (klass)) {
8101 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
8102 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
8106 ins = handle_unbox (cfg, klass, sp, context_used);
8116 MonoClassField *field;
8120 if (*ip == CEE_STFLD) {
8127 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
8129 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
8132 token = read32 (ip + 1);
8133 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8134 field = mono_method_get_wrapper_data (method, token);
8135 klass = field->parent;
8138 field = mono_field_from_token (image, token, &klass, generic_context);
8142 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8143 FIELD_ACCESS_FAILURE;
8144 mono_class_init (klass);
8146 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
8147 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
8148 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8149 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8152 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
8153 if (*ip == CEE_STFLD) {
8154 if (target_type_is_incompatible (cfg, field->type, sp [1]))
8156 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8157 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
8158 MonoInst *iargs [5];
8161 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8162 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8163 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
8167 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8168 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
8169 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8170 g_assert (costs > 0);
8172 cfg->real_offset += 5;
8175 inline_costs += costs;
8177 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
8182 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8184 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
8185 if (sp [0]->opcode != OP_LDADDR)
8186 store->flags |= MONO_INST_FAULT;
8188 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
8189 /* insert call to write barrier */
8193 dreg = alloc_preg (cfg);
8194 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8195 emit_write_barrier (cfg, ptr, sp [1], -1);
8198 store->flags |= ins_flag;
8205 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8206 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
8207 MonoInst *iargs [4];
8210 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8211 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8212 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
8213 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8214 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
8215 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8217 g_assert (costs > 0);
8219 cfg->real_offset += 5;
8223 inline_costs += costs;
8225 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
8229 if (sp [0]->type == STACK_VTYPE) {
8232 /* Have to compute the address of the variable */
8234 var = get_vreg_to_inst (cfg, sp [0]->dreg);
8236 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
8238 g_assert (var->klass == klass);
8240 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
8244 if (*ip == CEE_LDFLDA) {
8245 if (sp [0]->type == STACK_OBJ) {
8246 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
8247 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
8250 dreg = alloc_preg (cfg);
8252 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8253 ins->klass = mono_class_from_mono_type (field->type);
8254 ins->type = STACK_MP;
8259 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8261 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
8262 load->flags |= ins_flag;
8263 if (sp [0]->opcode != OP_LDADDR)
8264 load->flags |= MONO_INST_FAULT;
8275 MonoClassField *field;
8276 gpointer addr = NULL;
8277 gboolean is_special_static;
8280 token = read32 (ip + 1);
8282 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8283 field = mono_method_get_wrapper_data (method, token);
8284 klass = field->parent;
8287 field = mono_field_from_token (image, token, &klass, generic_context);
8290 mono_class_init (klass);
8291 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8292 FIELD_ACCESS_FAILURE;
8294 /* if the class is Critical then transparent code cannot access it's fields */
8295 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8296 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8299 * We can only support shared generic static
8300 * field access on architectures where the
8301 * trampoline code has been extended to handle
8302 * the generic class init.
8304 #ifndef MONO_ARCH_VTABLE_REG
8305 GENERIC_SHARING_FAILURE (*ip);
8308 if (cfg->generic_sharing_context)
8309 context_used = mono_class_check_context_used (klass);
8311 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
8313 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8314 * to be called here.
8316 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8317 mono_class_vtable (cfg->domain, klass);
8318 CHECK_TYPELOAD (klass);
8320 mono_domain_lock (cfg->domain);
8321 if (cfg->domain->special_static_fields)
8322 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8323 mono_domain_unlock (cfg->domain);
8325 is_special_static = mono_class_field_is_special_static (field);
8327 /* Generate IR to compute the field address */
8328 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
8330 * Fast access to TLS data
8331 * Inline version of get_thread_static_data () in
8335 int idx, static_data_reg, array_reg, dreg;
8336 MonoInst *thread_ins;
8338 // offset &= 0x7fffffff;
8339 // idx = (offset >> 24) - 1;
8340 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
8342 thread_ins = mono_get_thread_intrinsic (cfg);
8343 MONO_ADD_INS (cfg->cbb, thread_ins);
8344 static_data_reg = alloc_ireg (cfg);
8345 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
8347 if (cfg->compile_aot) {
8348 int offset_reg, offset2_reg, idx_reg;
8350 /* For TLS variables, this will return the TLS offset */
8351 EMIT_NEW_SFLDACONST (cfg, ins, field);
8352 offset_reg = ins->dreg;
8353 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
8354 idx_reg = alloc_ireg (cfg);
8355 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
8356 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
8357 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
8358 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
8359 array_reg = alloc_ireg (cfg);
8360 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
8361 offset2_reg = alloc_ireg (cfg);
8362 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
8363 dreg = alloc_ireg (cfg);
8364 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
8366 offset = (gsize)addr & 0x7fffffff;
8367 idx = (offset >> 24) - 1;
8369 array_reg = alloc_ireg (cfg);
8370 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
8371 dreg = alloc_ireg (cfg);
8372 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
8374 } else if ((cfg->opt & MONO_OPT_SHARED) ||
8375 (cfg->compile_aot && is_special_static) ||
8376 (context_used && is_special_static)) {
8377 MonoInst *iargs [2];
8379 g_assert (field->parent);
8380 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8382 iargs [1] = emit_get_rgctx_field (cfg, context_used,
8383 field, MONO_RGCTX_INFO_CLASS_FIELD);
8385 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8387 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8388 } else if (context_used) {
8389 MonoInst *static_data;
8392 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8393 method->klass->name_space, method->klass->name, method->name,
8394 depth, field->offset);
8397 if (mono_class_needs_cctor_run (klass, method))
8398 emit_generic_class_init (cfg, klass);
8401 * The pointer we're computing here is
8403 * super_info.static_data + field->offset
8405 static_data = emit_get_rgctx_klass (cfg, context_used,
8406 klass, MONO_RGCTX_INFO_STATIC_DATA);
8408 if (field->offset == 0) {
8411 int addr_reg = mono_alloc_preg (cfg);
8412 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8414 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8415 MonoInst *iargs [2];
8417 g_assert (field->parent);
8418 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8419 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8420 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8422 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8424 CHECK_TYPELOAD (klass);
8426 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8427 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8428 if (cfg->verbose_level > 2)
8429 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8430 class_inits = g_slist_prepend (class_inits, vtable);
8432 if (cfg->run_cctors) {
8434 /* This makes so that inline cannot trigger */
8435 /* .cctors: too many apps depend on them */
8436 /* running with a specific order... */
8437 if (! vtable->initialized)
8439 ex = mono_runtime_class_init_full (vtable, FALSE);
8441 set_exception_object (cfg, ex);
8442 goto exception_exit;
8446 addr = (char*)vtable->data + field->offset;
8448 if (cfg->compile_aot)
8449 EMIT_NEW_SFLDACONST (cfg, ins, field);
8451 EMIT_NEW_PCONST (cfg, ins, addr);
8453 MonoInst *iargs [1];
8454 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8455 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8459 /* Generate IR to do the actual load/store operation */
8461 if (*ip == CEE_LDSFLDA) {
8462 ins->klass = mono_class_from_mono_type (field->type);
8463 ins->type = STACK_PTR;
8465 } else if (*ip == CEE_STSFLD) {
8470 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
8471 store->flags |= ins_flag;
8473 gboolean is_const = FALSE;
8474 MonoVTable *vtable = NULL;
8476 if (!context_used) {
8477 vtable = mono_class_vtable (cfg->domain, klass);
8478 CHECK_TYPELOAD (klass);
8480 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8481 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8482 gpointer addr = (char*)vtable->data + field->offset;
8483 int ro_type = field->type->type;
8484 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
8485 ro_type = mono_class_enum_basetype (field->type->data.klass)->type;
8487 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8490 case MONO_TYPE_BOOLEAN:
8492 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8496 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8499 case MONO_TYPE_CHAR:
8501 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8505 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8510 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8514 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8520 case MONO_TYPE_FNPTR:
8521 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8522 type_to_eval_stack_type ((cfg), field->type, *sp);
8525 case MONO_TYPE_STRING:
8526 case MONO_TYPE_OBJECT:
8527 case MONO_TYPE_CLASS:
8528 case MONO_TYPE_SZARRAY:
8529 case MONO_TYPE_ARRAY:
8530 if (!mono_gc_is_moving ()) {
8531 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8532 type_to_eval_stack_type ((cfg), field->type, *sp);
8540 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8545 case MONO_TYPE_VALUETYPE:
8555 CHECK_STACK_OVF (1);
8557 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8558 load->flags |= ins_flag;
8571 token = read32 (ip + 1);
8572 klass = mini_get_class (method, token, generic_context);
8573 CHECK_TYPELOAD (klass);
8574 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8575 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8576 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
8577 generic_class_is_reference_type (cfg, klass)) {
8578 /* insert call to write barrier */
8579 emit_write_barrier (cfg, sp [0], sp [1], -1);
8591 const char *data_ptr;
8593 guint32 field_token;
8599 token = read32 (ip + 1);
8601 klass = mini_get_class (method, token, generic_context);
8602 CHECK_TYPELOAD (klass);
8604 if (cfg->generic_sharing_context)
8605 context_used = mono_class_check_context_used (klass);
8607 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
8608 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
8609 ins->sreg1 = sp [0]->dreg;
8610 ins->type = STACK_I4;
8611 ins->dreg = alloc_ireg (cfg);
8612 MONO_ADD_INS (cfg->cbb, ins);
8613 *sp = mono_decompose_opcode (cfg, ins);
8618 MonoClass *array_class = mono_array_class_get (klass, 1);
8619 /* FIXME: we cannot get a managed
8620 allocator because we can't get the
8621 open generic class's vtable. We
8622 have the same problem in
8623 handle_alloc(). This
8624 needs to be solved so that we can
8625 have managed allocs of shared
8628 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
8629 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
8631 MonoMethod *managed_alloc = NULL;
8633 /* FIXME: Decompose later to help abcrem */
8636 args [0] = emit_get_rgctx_klass (cfg, context_used,
8637 array_class, MONO_RGCTX_INFO_VTABLE);
8642 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
8644 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8646 if (cfg->opt & MONO_OPT_SHARED) {
8647 /* Decompose now to avoid problems with references to the domainvar */
8648 MonoInst *iargs [3];
8650 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8651 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8654 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8656 /* Decompose later since it is needed by abcrem */
8657 MonoClass *array_type = mono_array_class_get (klass, 1);
8658 mono_class_vtable (cfg->domain, array_type);
8659 CHECK_TYPELOAD (array_type);
8661 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8662 ins->dreg = alloc_preg (cfg);
8663 ins->sreg1 = sp [0]->dreg;
8664 ins->inst_newa_class = klass;
8665 ins->type = STACK_OBJ;
8667 MONO_ADD_INS (cfg->cbb, ins);
8668 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8669 cfg->cbb->has_array_access = TRUE;
8671 /* Needed so mono_emit_load_get_addr () gets called */
8672 mono_get_got_var (cfg);
8682 * we inline/optimize the initialization sequence if possible.
8683 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8684 * for small sizes open code the memcpy
8685 * ensure the rva field is big enough
8687 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8688 MonoMethod *memcpy_method = get_memcpy_method ();
8689 MonoInst *iargs [3];
8690 int add_reg = alloc_preg (cfg);
8692 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8693 if (cfg->compile_aot) {
8694 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8696 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8698 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8699 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8708 if (sp [0]->type != STACK_OBJ)
8711 dreg = alloc_preg (cfg);
8712 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8713 ins->dreg = alloc_preg (cfg);
8714 ins->sreg1 = sp [0]->dreg;
8715 ins->type = STACK_I4;
8716 /* This flag will be inherited by the decomposition */
8717 ins->flags |= MONO_INST_FAULT;
8718 MONO_ADD_INS (cfg->cbb, ins);
8719 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8720 cfg->cbb->has_array_access = TRUE;
8728 if (sp [0]->type != STACK_OBJ)
8731 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8733 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8734 CHECK_TYPELOAD (klass);
8735 /* we need to make sure that this array is exactly the type it needs
8736 * to be for correctness. the wrappers are lax with their usage
8737 * so we need to ignore them here
8739 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
8740 MonoClass *array_class = mono_array_class_get (klass, 1);
8741 mini_emit_check_array_type (cfg, sp [0], array_class);
8742 CHECK_TYPELOAD (array_class);
8746 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8761 case CEE_LDELEM_REF: {
8767 if (*ip == CEE_LDELEM) {
8769 token = read32 (ip + 1);
8770 klass = mini_get_class (method, token, generic_context);
8771 CHECK_TYPELOAD (klass);
8772 mono_class_init (klass);
8775 klass = array_access_to_klass (*ip);
8777 if (sp [0]->type != STACK_OBJ)
8780 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8782 if (sp [1]->opcode == OP_ICONST) {
8783 int array_reg = sp [0]->dreg;
8784 int index_reg = sp [1]->dreg;
8785 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8787 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8788 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8790 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8791 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8794 if (*ip == CEE_LDELEM)
8807 case CEE_STELEM_REF:
8814 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8816 if (*ip == CEE_STELEM) {
8818 token = read32 (ip + 1);
8819 klass = mini_get_class (method, token, generic_context);
8820 CHECK_TYPELOAD (klass);
8821 mono_class_init (klass);
8824 klass = array_access_to_klass (*ip);
8826 if (sp [0]->type != STACK_OBJ)
8829 /* storing a NULL doesn't need any of the complex checks in stelemref */
8830 if (generic_class_is_reference_type (cfg, klass) &&
8831 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8832 MonoMethod* helper = mono_marshal_get_stelemref ();
8833 MonoInst *iargs [3];
8835 if (sp [0]->type != STACK_OBJ)
8837 if (sp [2]->type != STACK_OBJ)
8844 mono_emit_method_call (cfg, helper, iargs, NULL);
8846 if (sp [1]->opcode == OP_ICONST) {
8847 int array_reg = sp [0]->dreg;
8848 int index_reg = sp [1]->dreg;
8849 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8851 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8852 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8854 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8855 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8859 if (*ip == CEE_STELEM)
8866 case CEE_CKFINITE: {
8870 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8871 ins->sreg1 = sp [0]->dreg;
8872 ins->dreg = alloc_freg (cfg);
8873 ins->type = STACK_R8;
8874 MONO_ADD_INS (bblock, ins);
8876 *sp++ = mono_decompose_opcode (cfg, ins);
8881 case CEE_REFANYVAL: {
8882 MonoInst *src_var, *src;
8884 int klass_reg = alloc_preg (cfg);
8885 int dreg = alloc_preg (cfg);
8888 MONO_INST_NEW (cfg, ins, *ip);
8891 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8892 CHECK_TYPELOAD (klass);
8893 mono_class_init (klass);
8895 if (cfg->generic_sharing_context)
8896 context_used = mono_class_check_context_used (klass);
8899 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8901 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8902 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8903 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8906 MonoInst *klass_ins;
8908 klass_ins = emit_get_rgctx_klass (cfg, context_used,
8909 klass, MONO_RGCTX_INFO_KLASS);
8912 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8913 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8915 mini_emit_class_check (cfg, klass_reg, klass);
8917 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8918 ins->type = STACK_MP;
8923 case CEE_MKREFANY: {
8924 MonoInst *loc, *addr;
8927 MONO_INST_NEW (cfg, ins, *ip);
8930 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8931 CHECK_TYPELOAD (klass);
8932 mono_class_init (klass);
8934 if (cfg->generic_sharing_context)
8935 context_used = mono_class_check_context_used (klass);
8937 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8938 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8941 MonoInst *const_ins;
8942 int type_reg = alloc_preg (cfg);
8944 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
8945 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8946 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8947 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8948 } else if (cfg->compile_aot) {
8949 int const_reg = alloc_preg (cfg);
8950 int type_reg = alloc_preg (cfg);
8952 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8953 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8954 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8955 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8957 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8958 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8960 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8962 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8963 ins->type = STACK_VTYPE;
8964 ins->klass = mono_defaults.typed_reference_class;
8971 MonoClass *handle_class;
8973 CHECK_STACK_OVF (1);
8976 n = read32 (ip + 1);
8978 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
8979 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8980 handle = mono_method_get_wrapper_data (method, n);
8981 handle_class = mono_method_get_wrapper_data (method, n + 1);
8982 if (handle_class == mono_defaults.typehandle_class)
8983 handle = &((MonoClass*)handle)->byval_arg;
8986 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8990 mono_class_init (handle_class);
8991 if (cfg->generic_sharing_context) {
8992 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
8993 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
8994 /* This case handles ldtoken
8995 of an open type, like for
8998 } else if (handle_class == mono_defaults.typehandle_class) {
8999 /* If we get a MONO_TYPE_CLASS
9000 then we need to provide the
9002 instantiation of it. */
9003 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
9006 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
9007 } else if (handle_class == mono_defaults.fieldhandle_class)
9008 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
9009 else if (handle_class == mono_defaults.methodhandle_class)
9010 context_used = mono_method_check_context_used (handle);
9012 g_assert_not_reached ();
9015 if ((cfg->opt & MONO_OPT_SHARED) &&
9016 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
9017 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
9018 MonoInst *addr, *vtvar, *iargs [3];
9019 int method_context_used;
9021 if (cfg->generic_sharing_context)
9022 method_context_used = mono_method_check_context_used (method);
9024 method_context_used = 0;
9026 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9028 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9029 EMIT_NEW_ICONST (cfg, iargs [1], n);
9030 if (method_context_used) {
9031 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
9032 method, MONO_RGCTX_INFO_METHOD);
9033 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
9035 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
9036 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
9038 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9040 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9042 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9044 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
9045 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
9046 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
9047 (cmethod->klass == mono_defaults.monotype_class->parent) &&
9048 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
9049 MonoClass *tclass = mono_class_from_mono_type (handle);
9051 mono_class_init (tclass);
9053 ins = emit_get_rgctx_klass (cfg, context_used,
9054 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
9055 } else if (cfg->compile_aot) {
9056 if (method->wrapper_type) {
9057 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
9058 /* Special case for static synchronized wrappers */
9059 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
9061 /* FIXME: n is not a normal token */
9062 cfg->disable_aot = TRUE;
9063 EMIT_NEW_PCONST (cfg, ins, NULL);
9066 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
9069 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
9071 ins->type = STACK_OBJ;
9072 ins->klass = cmethod->klass;
9075 MonoInst *addr, *vtvar;
9077 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9080 if (handle_class == mono_defaults.typehandle_class) {
9081 ins = emit_get_rgctx_klass (cfg, context_used,
9082 mono_class_from_mono_type (handle),
9083 MONO_RGCTX_INFO_TYPE);
9084 } else if (handle_class == mono_defaults.methodhandle_class) {
9085 ins = emit_get_rgctx_method (cfg, context_used,
9086 handle, MONO_RGCTX_INFO_METHOD);
9087 } else if (handle_class == mono_defaults.fieldhandle_class) {
9088 ins = emit_get_rgctx_field (cfg, context_used,
9089 handle, MONO_RGCTX_INFO_CLASS_FIELD);
9091 g_assert_not_reached ();
9093 } else if (cfg->compile_aot) {
9094 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
9096 EMIT_NEW_PCONST (cfg, ins, handle);
9098 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9099 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9100 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9110 MONO_INST_NEW (cfg, ins, OP_THROW);
9112 ins->sreg1 = sp [0]->dreg;
9114 bblock->out_of_line = TRUE;
9115 MONO_ADD_INS (bblock, ins);
9116 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
9117 MONO_ADD_INS (bblock, ins);
9120 link_bblock (cfg, bblock, end_bblock);
9121 start_new_bblock = 1;
9123 case CEE_ENDFINALLY:
9124 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
9125 MONO_ADD_INS (bblock, ins);
9127 start_new_bblock = 1;
9130 * Control will leave the method so empty the stack, otherwise
9131 * the next basic block will start with a nonempty stack.
9133 while (sp != stack_start) {
9141 if (*ip == CEE_LEAVE) {
9143 target = ip + 5 + (gint32)read32(ip + 1);
9146 target = ip + 2 + (signed char)(ip [1]);
9149 /* empty the stack */
9150 while (sp != stack_start) {
9155 * If this leave statement is in a catch block, check for a
9156 * pending exception, and rethrow it if necessary.
9157 * We avoid doing this in runtime invoke wrappers, since those are called
9158 * by native code which excepts the wrapper to catch all exceptions.
9160 for (i = 0; i < header->num_clauses; ++i) {
9161 MonoExceptionClause *clause = &header->clauses [i];
9164 * Use <= in the final comparison to handle clauses with multiple
9165 * leave statements, like in bug #78024.
9166 * The ordering of the exception clauses guarantees that we find the
9169 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
9171 MonoBasicBlock *dont_throw;
9176 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
9179 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
9181 NEW_BBLOCK (cfg, dont_throw);
9184 * Currently, we allways rethrow the abort exception, despite the
9185 * fact that this is not correct. See thread6.cs for an example.
9186 * But propagating the abort exception is more important than
9187 * getting the sematics right.
9189 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
9190 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
9191 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
9193 MONO_START_BB (cfg, dont_throw);
9198 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
9200 MonoExceptionClause *clause;
9202 for (tmp = handlers; tmp; tmp = tmp->next) {
9204 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
9206 link_bblock (cfg, bblock, tblock);
9207 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
9208 ins->inst_target_bb = tblock;
9209 ins->inst_eh_block = clause;
9210 MONO_ADD_INS (bblock, ins);
9211 bblock->has_call_handler = 1;
9212 if (COMPILE_LLVM (cfg)) {
9213 MonoBasicBlock *target_bb;
9216 * Link the finally bblock with the target, since it will
9217 * conceptually branch there.
9218 * FIXME: Have to link the bblock containing the endfinally.
9220 GET_BBLOCK (cfg, target_bb, target);
9221 link_bblock (cfg, tblock, target_bb);
9224 g_list_free (handlers);
9227 MONO_INST_NEW (cfg, ins, OP_BR);
9228 MONO_ADD_INS (bblock, ins);
9229 GET_BBLOCK (cfg, tblock, target);
9230 link_bblock (cfg, bblock, tblock);
9231 ins->inst_target_bb = tblock;
9232 start_new_bblock = 1;
9234 if (*ip == CEE_LEAVE)
9243 * Mono specific opcodes
9245 case MONO_CUSTOM_PREFIX: {
9247 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
9251 case CEE_MONO_ICALL: {
9253 MonoJitICallInfo *info;
9255 token = read32 (ip + 2);
9256 func = mono_method_get_wrapper_data (method, token);
9257 info = mono_find_jit_icall_by_addr (func);
9260 CHECK_STACK (info->sig->param_count);
9261 sp -= info->sig->param_count;
9263 ins = mono_emit_jit_icall (cfg, info->func, sp);
9264 if (!MONO_TYPE_IS_VOID (info->sig->ret))
9268 inline_costs += 10 * num_calls++;
9272 case CEE_MONO_LDPTR: {
9275 CHECK_STACK_OVF (1);
9277 token = read32 (ip + 2);
9279 ptr = mono_method_get_wrapper_data (method, token);
9280 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
9281 MonoJitICallInfo *callinfo;
9282 const char *icall_name;
9284 icall_name = method->name + strlen ("__icall_wrapper_");
9285 g_assert (icall_name);
9286 callinfo = mono_find_jit_icall_by_name (icall_name);
9287 g_assert (callinfo);
9289 if (ptr == callinfo->func) {
9290 /* Will be transformed into an AOTCONST later */
9291 EMIT_NEW_PCONST (cfg, ins, ptr);
9297 /* FIXME: Generalize this */
9298 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
9299 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
9304 EMIT_NEW_PCONST (cfg, ins, ptr);
9307 inline_costs += 10 * num_calls++;
9308 /* Can't embed random pointers into AOT code */
9309 cfg->disable_aot = 1;
9312 case CEE_MONO_ICALL_ADDR: {
9313 MonoMethod *cmethod;
9316 CHECK_STACK_OVF (1);
9318 token = read32 (ip + 2);
9320 cmethod = mono_method_get_wrapper_data (method, token);
9322 if (cfg->compile_aot) {
9323 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
9325 ptr = mono_lookup_internal_call (cmethod);
9327 EMIT_NEW_PCONST (cfg, ins, ptr);
9333 case CEE_MONO_VTADDR: {
9334 MonoInst *src_var, *src;
9340 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9341 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
9346 case CEE_MONO_NEWOBJ: {
9347 MonoInst *iargs [2];
9349 CHECK_STACK_OVF (1);
9351 token = read32 (ip + 2);
9352 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9353 mono_class_init (klass);
9354 NEW_DOMAINCONST (cfg, iargs [0]);
9355 MONO_ADD_INS (cfg->cbb, iargs [0]);
9356 NEW_CLASSCONST (cfg, iargs [1], klass);
9357 MONO_ADD_INS (cfg->cbb, iargs [1]);
9358 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
9360 inline_costs += 10 * num_calls++;
9363 case CEE_MONO_OBJADDR:
9366 MONO_INST_NEW (cfg, ins, OP_MOVE);
9367 ins->dreg = alloc_preg (cfg);
9368 ins->sreg1 = sp [0]->dreg;
9369 ins->type = STACK_MP;
9370 MONO_ADD_INS (cfg->cbb, ins);
9374 case CEE_MONO_LDNATIVEOBJ:
9376 * Similar to LDOBJ, but instead load the unmanaged
9377 * representation of the vtype to the stack.
9382 token = read32 (ip + 2);
9383 klass = mono_method_get_wrapper_data (method, token);
9384 g_assert (klass->valuetype);
9385 mono_class_init (klass);
9388 MonoInst *src, *dest, *temp;
9391 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
9392 temp->backend.is_pinvoke = 1;
9393 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
9394 mini_emit_stobj (cfg, dest, src, klass, TRUE);
9396 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
9397 dest->type = STACK_VTYPE;
9398 dest->klass = klass;
9404 case CEE_MONO_RETOBJ: {
9406 * Same as RET, but return the native representation of a vtype
9409 g_assert (cfg->ret);
9410 g_assert (mono_method_signature (method)->pinvoke);
9415 token = read32 (ip + 2);
9416 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9418 if (!cfg->vret_addr) {
9419 g_assert (cfg->ret_var_is_local);
9421 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
9423 EMIT_NEW_RETLOADA (cfg, ins);
9425 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
9427 if (sp != stack_start)
9430 MONO_INST_NEW (cfg, ins, OP_BR);
9431 ins->inst_target_bb = end_bblock;
9432 MONO_ADD_INS (bblock, ins);
9433 link_bblock (cfg, bblock, end_bblock);
9434 start_new_bblock = 1;
9438 case CEE_MONO_CISINST:
9439 case CEE_MONO_CCASTCLASS: {
9444 token = read32 (ip + 2);
9445 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9446 if (ip [1] == CEE_MONO_CISINST)
9447 ins = handle_cisinst (cfg, klass, sp [0]);
9449 ins = handle_ccastclass (cfg, klass, sp [0]);
9455 case CEE_MONO_SAVE_LMF:
9456 case CEE_MONO_RESTORE_LMF:
9457 #ifdef MONO_ARCH_HAVE_LMF_OPS
9458 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9459 MONO_ADD_INS (bblock, ins);
9460 cfg->need_lmf_area = TRUE;
9464 case CEE_MONO_CLASSCONST:
9465 CHECK_STACK_OVF (1);
9467 token = read32 (ip + 2);
9468 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9471 inline_costs += 10 * num_calls++;
9473 case CEE_MONO_NOT_TAKEN:
9474 bblock->out_of_line = TRUE;
9478 CHECK_STACK_OVF (1);
9480 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9481 ins->dreg = alloc_preg (cfg);
9482 ins->inst_offset = (gint32)read32 (ip + 2);
9483 ins->type = STACK_PTR;
9484 MONO_ADD_INS (bblock, ins);
9488 case CEE_MONO_DYN_CALL: {
9491 /* It would be easier to call a trampoline, but that would put an
9492 * extra frame on the stack, confusing exception handling. So
9493 * implement it inline using an opcode for now.
9496 if (!cfg->dyn_call_var) {
9497 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9498 /* prevent it from being register allocated */
9499 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
9502 /* Has to use a call inst since it local regalloc expects it */
9503 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
9504 ins = (MonoInst*)call;
9506 ins->sreg1 = sp [0]->dreg;
9507 ins->sreg2 = sp [1]->dreg;
9508 MONO_ADD_INS (bblock, ins);
9510 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
9511 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
9515 inline_costs += 10 * num_calls++;
9520 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9530 /* somewhat similar to LDTOKEN */
9531 MonoInst *addr, *vtvar;
9532 CHECK_STACK_OVF (1);
9533 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9535 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9536 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9538 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9539 ins->type = STACK_VTYPE;
9540 ins->klass = mono_defaults.argumenthandle_class;
9553 * The following transforms:
9554 * CEE_CEQ into OP_CEQ
9555 * CEE_CGT into OP_CGT
9556 * CEE_CGT_UN into OP_CGT_UN
9557 * CEE_CLT into OP_CLT
9558 * CEE_CLT_UN into OP_CLT_UN
9560 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9562 MONO_INST_NEW (cfg, ins, cmp->opcode);
9564 cmp->sreg1 = sp [0]->dreg;
9565 cmp->sreg2 = sp [1]->dreg;
9566 type_from_op (cmp, sp [0], sp [1]);
9568 if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9569 cmp->opcode = OP_LCOMPARE;
9570 else if (sp [0]->type == STACK_R8)
9571 cmp->opcode = OP_FCOMPARE;
9573 cmp->opcode = OP_ICOMPARE;
9574 MONO_ADD_INS (bblock, cmp);
9575 ins->type = STACK_I4;
9576 ins->dreg = alloc_dreg (cfg, ins->type);
9577 type_from_op (ins, sp [0], sp [1]);
9579 if (cmp->opcode == OP_FCOMPARE) {
9581 * The backends expect the fceq opcodes to do the
9584 cmp->opcode = OP_NOP;
9585 ins->sreg1 = cmp->sreg1;
9586 ins->sreg2 = cmp->sreg2;
9588 MONO_ADD_INS (bblock, ins);
9595 MonoMethod *cil_method;
9596 gboolean needs_static_rgctx_invoke;
9598 CHECK_STACK_OVF (1);
9600 n = read32 (ip + 2);
9601 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9602 if (!cmethod || mono_loader_get_last_error ())
9604 mono_class_init (cmethod->klass);
9606 mono_save_token_info (cfg, image, n, cmethod);
9608 if (cfg->generic_sharing_context)
9609 context_used = mono_method_check_context_used (cmethod);
9611 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9613 cil_method = cmethod;
9614 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9615 METHOD_ACCESS_FAILURE;
9617 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9618 if (check_linkdemand (cfg, method, cmethod))
9620 CHECK_CFG_EXCEPTION;
9621 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9622 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9626 * Optimize the common case of ldftn+delegate creation
9628 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9629 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9630 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9632 int invoke_context_used = 0;
9634 invoke = mono_get_delegate_invoke (ctor_method->klass);
9635 if (!invoke || !mono_method_signature (invoke))
9638 if (cfg->generic_sharing_context)
9639 invoke_context_used = mono_method_check_context_used (invoke);
9641 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
9642 /* FIXME: SGEN support */
9643 if (!cfg->gen_write_barriers && invoke_context_used == 0) {
9644 MonoInst *target_ins;
9647 if (cfg->verbose_level > 3)
9648 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9649 target_ins = sp [-1];
9651 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
9652 CHECK_CFG_EXCEPTION;
9661 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
9662 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
9666 inline_costs += 10 * num_calls++;
9669 case CEE_LDVIRTFTN: {
9674 n = read32 (ip + 2);
9675 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9676 if (!cmethod || mono_loader_get_last_error ())
9678 mono_class_init (cmethod->klass);
9680 if (cfg->generic_sharing_context)
9681 context_used = mono_method_check_context_used (cmethod);
9683 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9684 if (check_linkdemand (cfg, method, cmethod))
9686 CHECK_CFG_EXCEPTION;
9687 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9688 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9694 args [1] = emit_get_rgctx_method (cfg, context_used,
9695 cmethod, MONO_RGCTX_INFO_METHOD);
9698 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
9700 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
9703 inline_costs += 10 * num_calls++;
9707 CHECK_STACK_OVF (1);
9709 n = read16 (ip + 2);
9711 EMIT_NEW_ARGLOAD (cfg, ins, n);
9716 CHECK_STACK_OVF (1);
9718 n = read16 (ip + 2);
9720 NEW_ARGLOADA (cfg, ins, n);
9721 MONO_ADD_INS (cfg->cbb, ins);
9729 n = read16 (ip + 2);
9731 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9733 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9737 CHECK_STACK_OVF (1);
9739 n = read16 (ip + 2);
9741 EMIT_NEW_LOCLOAD (cfg, ins, n);
9746 unsigned char *tmp_ip;
9747 CHECK_STACK_OVF (1);
9749 n = read16 (ip + 2);
9752 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9758 EMIT_NEW_LOCLOADA (cfg, ins, n);
9767 n = read16 (ip + 2);
9769 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9771 emit_stloc_ir (cfg, sp, header, n);
9778 if (sp != stack_start)
9780 if (cfg->method != method)
9782 * Inlining this into a loop in a parent could lead to
9783 * stack overflows which is different behavior than the
9784 * non-inlined case, thus disable inlining in this case.
9786 goto inline_failure;
9788 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9789 ins->dreg = alloc_preg (cfg);
9790 ins->sreg1 = sp [0]->dreg;
9791 ins->type = STACK_PTR;
9792 MONO_ADD_INS (cfg->cbb, ins);
9794 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9796 ins->flags |= MONO_INST_INIT;
9801 case CEE_ENDFILTER: {
9802 MonoExceptionClause *clause, *nearest;
9803 int cc, nearest_num;
9807 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9809 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9810 ins->sreg1 = (*sp)->dreg;
9811 MONO_ADD_INS (bblock, ins);
9812 start_new_bblock = 1;
9817 for (cc = 0; cc < header->num_clauses; ++cc) {
9818 clause = &header->clauses [cc];
9819 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9820 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9821 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9827 if ((ip - header->code) != nearest->handler_offset)
9832 case CEE_UNALIGNED_:
9833 ins_flag |= MONO_INST_UNALIGNED;
9834 /* FIXME: record alignment? we can assume 1 for now */
9839 ins_flag |= MONO_INST_VOLATILE;
9843 ins_flag |= MONO_INST_TAILCALL;
9844 cfg->flags |= MONO_CFG_HAS_TAIL;
9845 /* Can't inline tail calls at this time */
9846 inline_costs += 100000;
9853 token = read32 (ip + 2);
9854 klass = mini_get_class (method, token, generic_context);
9855 CHECK_TYPELOAD (klass);
9856 if (generic_class_is_reference_type (cfg, klass))
9857 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9859 mini_emit_initobj (cfg, *sp, NULL, klass);
9863 case CEE_CONSTRAINED_:
9865 token = read32 (ip + 2);
9866 if (method->wrapper_type != MONO_WRAPPER_NONE)
9867 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
9869 constrained_call = mono_class_get_full (image, token, generic_context);
9870 CHECK_TYPELOAD (constrained_call);
9875 MonoInst *iargs [3];
9879 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9880 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9881 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9882 /* emit_memset only works when val == 0 */
9883 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9888 if (ip [1] == CEE_CPBLK) {
9889 MonoMethod *memcpy_method = get_memcpy_method ();
9890 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9892 MonoMethod *memset_method = get_memset_method ();
9893 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9903 ins_flag |= MONO_INST_NOTYPECHECK;
9905 ins_flag |= MONO_INST_NORANGECHECK;
9906 /* we ignore the no-nullcheck for now since we
9907 * really do it explicitly only when doing callvirt->call
9913 int handler_offset = -1;
9915 for (i = 0; i < header->num_clauses; ++i) {
9916 MonoExceptionClause *clause = &header->clauses [i];
9917 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9918 handler_offset = clause->handler_offset;
9923 bblock->flags |= BB_EXCEPTION_UNSAFE;
9925 g_assert (handler_offset != -1);
9927 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9928 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9929 ins->sreg1 = load->dreg;
9930 MONO_ADD_INS (bblock, ins);
9932 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
9933 MONO_ADD_INS (bblock, ins);
9936 link_bblock (cfg, bblock, end_bblock);
9937 start_new_bblock = 1;
9945 CHECK_STACK_OVF (1);
9947 token = read32 (ip + 2);
9948 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic) {
9949 MonoType *type = mono_type_create_from_typespec (image, token);
9950 token = mono_type_size (type, &ialign);
9952 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9953 CHECK_TYPELOAD (klass);
9954 mono_class_init (klass);
9955 token = mono_class_value_size (klass, &align);
9957 EMIT_NEW_ICONST (cfg, ins, token);
9962 case CEE_REFANYTYPE: {
9963 MonoInst *src_var, *src;
9969 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9971 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9972 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9973 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9991 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
10001 g_warning ("opcode 0x%02x not handled", *ip);
10005 if (start_new_bblock != 1)
10008 bblock->cil_length = ip - bblock->cil_code;
10009 bblock->next_bb = end_bblock;
10011 if (cfg->method == method && cfg->domainvar) {
10013 MonoInst *get_domain;
10015 cfg->cbb = init_localsbb;
10017 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
10018 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
10021 get_domain->dreg = alloc_preg (cfg);
10022 MONO_ADD_INS (cfg->cbb, get_domain);
10024 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
10025 MONO_ADD_INS (cfg->cbb, store);
10028 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
10029 if (cfg->compile_aot)
10030 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
10031 mono_get_got_var (cfg);
10034 if (cfg->method == method && cfg->got_var)
10035 mono_emit_load_got_addr (cfg);
10040 cfg->cbb = init_localsbb;
10042 for (i = 0; i < header->num_locals; ++i) {
10043 MonoType *ptype = header->locals [i];
10044 int t = ptype->type;
10045 dreg = cfg->locals [i]->dreg;
10047 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
10048 t = mono_class_enum_basetype (ptype->data.klass)->type;
10049 if (ptype->byref) {
10050 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10051 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
10052 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
10053 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
10054 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
10055 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
10056 MONO_INST_NEW (cfg, ins, OP_R8CONST);
10057 ins->type = STACK_R8;
10058 ins->inst_p0 = (void*)&r8_0;
10059 ins->dreg = alloc_dreg (cfg, STACK_R8);
10060 MONO_ADD_INS (init_localsbb, ins);
10061 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
10062 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
10063 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
10064 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
10066 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10071 if (cfg->init_ref_vars && cfg->method == method) {
10072 /* Emit initialization for ref vars */
10073 // FIXME: Avoid duplication initialization for IL locals.
10074 for (i = 0; i < cfg->num_varinfo; ++i) {
10075 MonoInst *ins = cfg->varinfo [i];
10077 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
10078 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
10082 /* Add a sequence point for method entry/exit events */
10084 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
10085 MONO_ADD_INS (init_localsbb, ins);
10086 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
10087 MONO_ADD_INS (cfg->bb_exit, ins);
10092 if (cfg->method == method) {
10093 MonoBasicBlock *bb;
10094 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10095 bb->region = mono_find_block_region (cfg, bb->real_offset);
10097 mono_create_spvar_for_region (cfg, bb->region);
10098 if (cfg->verbose_level > 2)
10099 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
10103 g_slist_free (class_inits);
10104 dont_inline = g_list_remove (dont_inline, method);
10106 if (inline_costs < 0) {
10109 /* Method is too large */
10110 mname = mono_method_full_name (method, TRUE);
10111 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
10112 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
10114 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10115 mono_basic_block_free (original_bb);
10119 if ((cfg->verbose_level > 2) && (cfg->method == method))
10120 mono_print_code (cfg, "AFTER METHOD-TO-IR");
10122 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10123 mono_basic_block_free (original_bb);
10124 return inline_costs;
10127 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
10134 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
10138 set_exception_type_from_invalid_il (cfg, method, ip);
10142 g_slist_free (class_inits);
10143 mono_basic_block_free (original_bb);
10144 dont_inline = g_list_remove (dont_inline, method);
10145 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10150 store_membase_reg_to_store_membase_imm (int opcode)
10153 case OP_STORE_MEMBASE_REG:
10154 return OP_STORE_MEMBASE_IMM;
10155 case OP_STOREI1_MEMBASE_REG:
10156 return OP_STOREI1_MEMBASE_IMM;
10157 case OP_STOREI2_MEMBASE_REG:
10158 return OP_STOREI2_MEMBASE_IMM;
10159 case OP_STOREI4_MEMBASE_REG:
10160 return OP_STOREI4_MEMBASE_IMM;
10161 case OP_STOREI8_MEMBASE_REG:
10162 return OP_STOREI8_MEMBASE_IMM;
10164 g_assert_not_reached ();
10170 #endif /* DISABLE_JIT */
10173 mono_op_to_op_imm (int opcode)
10177 return OP_IADD_IMM;
10179 return OP_ISUB_IMM;
10181 return OP_IDIV_IMM;
10183 return OP_IDIV_UN_IMM;
10185 return OP_IREM_IMM;
10187 return OP_IREM_UN_IMM;
10189 return OP_IMUL_IMM;
10191 return OP_IAND_IMM;
10195 return OP_IXOR_IMM;
10197 return OP_ISHL_IMM;
10199 return OP_ISHR_IMM;
10201 return OP_ISHR_UN_IMM;
10204 return OP_LADD_IMM;
10206 return OP_LSUB_IMM;
10208 return OP_LAND_IMM;
10212 return OP_LXOR_IMM;
10214 return OP_LSHL_IMM;
10216 return OP_LSHR_IMM;
10218 return OP_LSHR_UN_IMM;
10221 return OP_COMPARE_IMM;
10223 return OP_ICOMPARE_IMM;
10225 return OP_LCOMPARE_IMM;
10227 case OP_STORE_MEMBASE_REG:
10228 return OP_STORE_MEMBASE_IMM;
10229 case OP_STOREI1_MEMBASE_REG:
10230 return OP_STOREI1_MEMBASE_IMM;
10231 case OP_STOREI2_MEMBASE_REG:
10232 return OP_STOREI2_MEMBASE_IMM;
10233 case OP_STOREI4_MEMBASE_REG:
10234 return OP_STOREI4_MEMBASE_IMM;
10236 #if defined(TARGET_X86) || defined (TARGET_AMD64)
10238 return OP_X86_PUSH_IMM;
10239 case OP_X86_COMPARE_MEMBASE_REG:
10240 return OP_X86_COMPARE_MEMBASE_IMM;
10242 #if defined(TARGET_AMD64)
10243 case OP_AMD64_ICOMPARE_MEMBASE_REG:
10244 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10246 case OP_VOIDCALL_REG:
10247 return OP_VOIDCALL;
10255 return OP_LOCALLOC_IMM;
10262 ldind_to_load_membase (int opcode)
10266 return OP_LOADI1_MEMBASE;
10268 return OP_LOADU1_MEMBASE;
10270 return OP_LOADI2_MEMBASE;
10272 return OP_LOADU2_MEMBASE;
10274 return OP_LOADI4_MEMBASE;
10276 return OP_LOADU4_MEMBASE;
10278 return OP_LOAD_MEMBASE;
10279 case CEE_LDIND_REF:
10280 return OP_LOAD_MEMBASE;
10282 return OP_LOADI8_MEMBASE;
10284 return OP_LOADR4_MEMBASE;
10286 return OP_LOADR8_MEMBASE;
10288 g_assert_not_reached ();
10295 stind_to_store_membase (int opcode)
10299 return OP_STOREI1_MEMBASE_REG;
10301 return OP_STOREI2_MEMBASE_REG;
10303 return OP_STOREI4_MEMBASE_REG;
10305 case CEE_STIND_REF:
10306 return OP_STORE_MEMBASE_REG;
10308 return OP_STOREI8_MEMBASE_REG;
10310 return OP_STORER4_MEMBASE_REG;
10312 return OP_STORER8_MEMBASE_REG;
10314 g_assert_not_reached ();
10321 mono_load_membase_to_load_mem (int opcode)
10323 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
10324 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10326 case OP_LOAD_MEMBASE:
10327 return OP_LOAD_MEM;
10328 case OP_LOADU1_MEMBASE:
10329 return OP_LOADU1_MEM;
10330 case OP_LOADU2_MEMBASE:
10331 return OP_LOADU2_MEM;
10332 case OP_LOADI4_MEMBASE:
10333 return OP_LOADI4_MEM;
10334 case OP_LOADU4_MEMBASE:
10335 return OP_LOADU4_MEM;
10336 #if SIZEOF_REGISTER == 8
10337 case OP_LOADI8_MEMBASE:
10338 return OP_LOADI8_MEM;
10347 op_to_op_dest_membase (int store_opcode, int opcode)
10349 #if defined(TARGET_X86)
10350 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
10355 return OP_X86_ADD_MEMBASE_REG;
10357 return OP_X86_SUB_MEMBASE_REG;
10359 return OP_X86_AND_MEMBASE_REG;
10361 return OP_X86_OR_MEMBASE_REG;
10363 return OP_X86_XOR_MEMBASE_REG;
10366 return OP_X86_ADD_MEMBASE_IMM;
10369 return OP_X86_SUB_MEMBASE_IMM;
10372 return OP_X86_AND_MEMBASE_IMM;
10375 return OP_X86_OR_MEMBASE_IMM;
10378 return OP_X86_XOR_MEMBASE_IMM;
10384 #if defined(TARGET_AMD64)
10385 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
10390 return OP_X86_ADD_MEMBASE_REG;
10392 return OP_X86_SUB_MEMBASE_REG;
10394 return OP_X86_AND_MEMBASE_REG;
10396 return OP_X86_OR_MEMBASE_REG;
10398 return OP_X86_XOR_MEMBASE_REG;
10400 return OP_X86_ADD_MEMBASE_IMM;
10402 return OP_X86_SUB_MEMBASE_IMM;
10404 return OP_X86_AND_MEMBASE_IMM;
10406 return OP_X86_OR_MEMBASE_IMM;
10408 return OP_X86_XOR_MEMBASE_IMM;
10410 return OP_AMD64_ADD_MEMBASE_REG;
10412 return OP_AMD64_SUB_MEMBASE_REG;
10414 return OP_AMD64_AND_MEMBASE_REG;
10416 return OP_AMD64_OR_MEMBASE_REG;
10418 return OP_AMD64_XOR_MEMBASE_REG;
10421 return OP_AMD64_ADD_MEMBASE_IMM;
10424 return OP_AMD64_SUB_MEMBASE_IMM;
10427 return OP_AMD64_AND_MEMBASE_IMM;
10430 return OP_AMD64_OR_MEMBASE_IMM;
10433 return OP_AMD64_XOR_MEMBASE_IMM;
10443 op_to_op_store_membase (int store_opcode, int opcode)
10445 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10448 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10449 return OP_X86_SETEQ_MEMBASE;
10451 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10452 return OP_X86_SETNE_MEMBASE;
10460 op_to_op_src1_membase (int load_opcode, int opcode)
10463 /* FIXME: This has sign extension issues */
10465 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10466 return OP_X86_COMPARE_MEMBASE8_IMM;
10469 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10474 return OP_X86_PUSH_MEMBASE;
10475 case OP_COMPARE_IMM:
10476 case OP_ICOMPARE_IMM:
10477 return OP_X86_COMPARE_MEMBASE_IMM;
10480 return OP_X86_COMPARE_MEMBASE_REG;
10484 #ifdef TARGET_AMD64
10485 /* FIXME: This has sign extension issues */
10487 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10488 return OP_X86_COMPARE_MEMBASE8_IMM;
10493 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10494 return OP_X86_PUSH_MEMBASE;
10496 /* FIXME: This only works for 32 bit immediates
10497 case OP_COMPARE_IMM:
10498 case OP_LCOMPARE_IMM:
10499 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10500 return OP_AMD64_COMPARE_MEMBASE_IMM;
10502 case OP_ICOMPARE_IMM:
10503 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10504 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10508 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10509 return OP_AMD64_COMPARE_MEMBASE_REG;
10512 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10513 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10522 op_to_op_src2_membase (int load_opcode, int opcode)
10525 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10531 return OP_X86_COMPARE_REG_MEMBASE;
10533 return OP_X86_ADD_REG_MEMBASE;
10535 return OP_X86_SUB_REG_MEMBASE;
10537 return OP_X86_AND_REG_MEMBASE;
10539 return OP_X86_OR_REG_MEMBASE;
10541 return OP_X86_XOR_REG_MEMBASE;
10545 #ifdef TARGET_AMD64
10546 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
10549 return OP_AMD64_ICOMPARE_REG_MEMBASE;
10551 return OP_X86_ADD_REG_MEMBASE;
10553 return OP_X86_SUB_REG_MEMBASE;
10555 return OP_X86_AND_REG_MEMBASE;
10557 return OP_X86_OR_REG_MEMBASE;
10559 return OP_X86_XOR_REG_MEMBASE;
10561 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
10565 return OP_AMD64_COMPARE_REG_MEMBASE;
10567 return OP_AMD64_ADD_REG_MEMBASE;
10569 return OP_AMD64_SUB_REG_MEMBASE;
10571 return OP_AMD64_AND_REG_MEMBASE;
10573 return OP_AMD64_OR_REG_MEMBASE;
10575 return OP_AMD64_XOR_REG_MEMBASE;
10584 mono_op_to_op_imm_noemul (int opcode)
10587 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10593 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10601 return mono_op_to_op_imm (opcode);
10605 #ifndef DISABLE_JIT
10608 * mono_handle_global_vregs:
10610 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10614 mono_handle_global_vregs (MonoCompile *cfg)
10616 gint32 *vreg_to_bb;
10617 MonoBasicBlock *bb;
10620 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
10622 #ifdef MONO_ARCH_SIMD_INTRINSICS
10623 if (cfg->uses_simd_intrinsics)
10624 mono_simd_simplify_indirection (cfg);
10627 /* Find local vregs used in more than one bb */
10628 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10629 MonoInst *ins = bb->code;
10630 int block_num = bb->block_num;
10632 if (cfg->verbose_level > 2)
10633 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
10636 for (; ins; ins = ins->next) {
10637 const char *spec = INS_INFO (ins->opcode);
10638 int regtype = 0, regindex;
10641 if (G_UNLIKELY (cfg->verbose_level > 2))
10642 mono_print_ins (ins);
10644 g_assert (ins->opcode >= MONO_CEE_LAST);
10646 for (regindex = 0; regindex < 4; regindex ++) {
10649 if (regindex == 0) {
10650 regtype = spec [MONO_INST_DEST];
10651 if (regtype == ' ')
10654 } else if (regindex == 1) {
10655 regtype = spec [MONO_INST_SRC1];
10656 if (regtype == ' ')
10659 } else if (regindex == 2) {
10660 regtype = spec [MONO_INST_SRC2];
10661 if (regtype == ' ')
10664 } else if (regindex == 3) {
10665 regtype = spec [MONO_INST_SRC3];
10666 if (regtype == ' ')
10671 #if SIZEOF_REGISTER == 4
10672 /* In the LLVM case, the long opcodes are not decomposed */
10673 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
10675 * Since some instructions reference the original long vreg,
10676 * and some reference the two component vregs, it is quite hard
10677 * to determine when it needs to be global. So be conservative.
10679 if (!get_vreg_to_inst (cfg, vreg)) {
10680 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10682 if (cfg->verbose_level > 2)
10683 printf ("LONG VREG R%d made global.\n", vreg);
10687 * Make the component vregs volatile since the optimizations can
10688 * get confused otherwise.
10690 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
10691 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
10695 g_assert (vreg != -1);
10697 prev_bb = vreg_to_bb [vreg];
10698 if (prev_bb == 0) {
10699 /* 0 is a valid block num */
10700 vreg_to_bb [vreg] = block_num + 1;
10701 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
10702 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
10705 if (!get_vreg_to_inst (cfg, vreg)) {
10706 if (G_UNLIKELY (cfg->verbose_level > 2))
10707 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
10711 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
10714 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10717 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
10720 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
10723 g_assert_not_reached ();
10727 /* Flag as having been used in more than one bb */
10728 vreg_to_bb [vreg] = -1;
10734 /* If a variable is used in only one bblock, convert it into a local vreg */
10735 for (i = 0; i < cfg->num_varinfo; i++) {
10736 MonoInst *var = cfg->varinfo [i];
10737 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
10739 switch (var->type) {
10745 #if SIZEOF_REGISTER == 8
10748 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
10749 /* Enabling this screws up the fp stack on x86 */
10752 /* Arguments are implicitly global */
10753 /* Putting R4 vars into registers doesn't work currently */
10754 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
10756 * Make that the variable's liveness interval doesn't contain a call, since
10757 * that would cause the lvreg to be spilled, making the whole optimization
10760 /* This is too slow for JIT compilation */
10762 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
10764 int def_index, call_index, ins_index;
10765 gboolean spilled = FALSE;
10770 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
10771 const char *spec = INS_INFO (ins->opcode);
10773 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
10774 def_index = ins_index;
10776 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
10777 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
10778 if (call_index > def_index) {
10784 if (MONO_IS_CALL (ins))
10785 call_index = ins_index;
10795 if (G_UNLIKELY (cfg->verbose_level > 2))
10796 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10797 var->flags |= MONO_INST_IS_DEAD;
10798 cfg->vreg_to_inst [var->dreg] = NULL;
10805 * Compress the varinfo and vars tables so the liveness computation is faster and
10806 * takes up less space.
10809 for (i = 0; i < cfg->num_varinfo; ++i) {
10810 MonoInst *var = cfg->varinfo [i];
10811 if (pos < i && cfg->locals_start == i)
10812 cfg->locals_start = pos;
10813 if (!(var->flags & MONO_INST_IS_DEAD)) {
10815 cfg->varinfo [pos] = cfg->varinfo [i];
10816 cfg->varinfo [pos]->inst_c0 = pos;
10817 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10818 cfg->vars [pos].idx = pos;
10819 #if SIZEOF_REGISTER == 4
10820 if (cfg->varinfo [pos]->type == STACK_I8) {
10821 /* Modify the two component vars too */
10824 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10825 var1->inst_c0 = pos;
10826 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10827 var1->inst_c0 = pos;
10834 cfg->num_varinfo = pos;
10835 if (cfg->locals_start > cfg->num_varinfo)
10836 cfg->locals_start = cfg->num_varinfo;
10840 * mono_spill_global_vars:
10842 * Generate spill code for variables which are not allocated to registers,
10843 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10844 * code is generated which could be optimized by the local optimization passes.
10847 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10849 MonoBasicBlock *bb;
10851 int orig_next_vreg;
10852 guint32 *vreg_to_lvreg;
10854 guint32 i, lvregs_len;
10855 gboolean dest_has_lvreg = FALSE;
10856 guint32 stacktypes [128];
10857 MonoInst **live_range_start, **live_range_end;
10858 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
10860 *need_local_opts = FALSE;
10862 memset (spec2, 0, sizeof (spec2));
10864 /* FIXME: Move this function to mini.c */
10865 stacktypes ['i'] = STACK_PTR;
10866 stacktypes ['l'] = STACK_I8;
10867 stacktypes ['f'] = STACK_R8;
10868 #ifdef MONO_ARCH_SIMD_INTRINSICS
10869 stacktypes ['x'] = STACK_VTYPE;
10872 #if SIZEOF_REGISTER == 4
10873 /* Create MonoInsts for longs */
10874 for (i = 0; i < cfg->num_varinfo; i++) {
10875 MonoInst *ins = cfg->varinfo [i];
10877 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10878 switch (ins->type) {
10883 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
10886 g_assert (ins->opcode == OP_REGOFFSET);
10888 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10890 tree->opcode = OP_REGOFFSET;
10891 tree->inst_basereg = ins->inst_basereg;
10892 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10894 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10896 tree->opcode = OP_REGOFFSET;
10897 tree->inst_basereg = ins->inst_basereg;
10898 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10908 /* FIXME: widening and truncation */
10911 * As an optimization, when a variable allocated to the stack is first loaded into
10912 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10913 * the variable again.
10915 orig_next_vreg = cfg->next_vreg;
10916 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10917 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10921 * These arrays contain the first and last instructions accessing a given
10923 * Since we emit bblocks in the same order we process them here, and we
10924 * don't split live ranges, these will precisely describe the live range of
10925 * the variable, i.e. the instruction range where a valid value can be found
10926 * in the variables location.
10927 * The live range is computed using the liveness info computed by the liveness pass.
10928 * We can't use vmv->range, since that is an abstract live range, and we need
10929 * one which is instruction precise.
10930 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
10932 /* FIXME: Only do this if debugging info is requested */
10933 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
10934 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
10935 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10936 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10938 /* Add spill loads/stores */
10939 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10942 if (cfg->verbose_level > 2)
10943 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10945 /* Clear vreg_to_lvreg array */
10946 for (i = 0; i < lvregs_len; i++)
10947 vreg_to_lvreg [lvregs [i]] = 0;
10951 MONO_BB_FOR_EACH_INS (bb, ins) {
10952 const char *spec = INS_INFO (ins->opcode);
10953 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
10954 gboolean store, no_lvreg;
10955 int sregs [MONO_MAX_SRC_REGS];
10957 if (G_UNLIKELY (cfg->verbose_level > 2))
10958 mono_print_ins (ins);
10960 if (ins->opcode == OP_NOP)
10964 * We handle LDADDR here as well, since it can only be decomposed
10965 * when variable addresses are known.
10967 if (ins->opcode == OP_LDADDR) {
10968 MonoInst *var = ins->inst_p0;
10970 if (var->opcode == OP_VTARG_ADDR) {
10971 /* Happens on SPARC/S390 where vtypes are passed by reference */
10972 MonoInst *vtaddr = var->inst_left;
10973 if (vtaddr->opcode == OP_REGVAR) {
10974 ins->opcode = OP_MOVE;
10975 ins->sreg1 = vtaddr->dreg;
10977 else if (var->inst_left->opcode == OP_REGOFFSET) {
10978 ins->opcode = OP_LOAD_MEMBASE;
10979 ins->inst_basereg = vtaddr->inst_basereg;
10980 ins->inst_offset = vtaddr->inst_offset;
10984 g_assert (var->opcode == OP_REGOFFSET);
10986 ins->opcode = OP_ADD_IMM;
10987 ins->sreg1 = var->inst_basereg;
10988 ins->inst_imm = var->inst_offset;
10991 *need_local_opts = TRUE;
10992 spec = INS_INFO (ins->opcode);
10995 if (ins->opcode < MONO_CEE_LAST) {
10996 mono_print_ins (ins);
10997 g_assert_not_reached ();
11001 * Store opcodes have destbasereg in the dreg, but in reality, it is an
11005 if (MONO_IS_STORE_MEMBASE (ins)) {
11006 tmp_reg = ins->dreg;
11007 ins->dreg = ins->sreg2;
11008 ins->sreg2 = tmp_reg;
11011 spec2 [MONO_INST_DEST] = ' ';
11012 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11013 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11014 spec2 [MONO_INST_SRC3] = ' ';
11016 } else if (MONO_IS_STORE_MEMINDEX (ins))
11017 g_assert_not_reached ();
11022 if (G_UNLIKELY (cfg->verbose_level > 2)) {
11023 printf ("\t %.3s %d", spec, ins->dreg);
11024 num_sregs = mono_inst_get_src_registers (ins, sregs);
11025 for (srcindex = 0; srcindex < 3; ++srcindex)
11026 printf (" %d", sregs [srcindex]);
11033 regtype = spec [MONO_INST_DEST];
11034 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
11037 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
11038 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
11039 MonoInst *store_ins;
11041 MonoInst *def_ins = ins;
11042 int dreg = ins->dreg; /* The original vreg */
11044 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
11046 if (var->opcode == OP_REGVAR) {
11047 ins->dreg = var->dreg;
11048 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
11050 * Instead of emitting a load+store, use a _membase opcode.
11052 g_assert (var->opcode == OP_REGOFFSET);
11053 if (ins->opcode == OP_MOVE) {
11057 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
11058 ins->inst_basereg = var->inst_basereg;
11059 ins->inst_offset = var->inst_offset;
11062 spec = INS_INFO (ins->opcode);
11066 g_assert (var->opcode == OP_REGOFFSET);
11068 prev_dreg = ins->dreg;
11070 /* Invalidate any previous lvreg for this vreg */
11071 vreg_to_lvreg [ins->dreg] = 0;
11075 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
11077 store_opcode = OP_STOREI8_MEMBASE_REG;
11080 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
11082 if (regtype == 'l') {
11083 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
11084 mono_bblock_insert_after_ins (bb, ins, store_ins);
11085 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
11086 mono_bblock_insert_after_ins (bb, ins, store_ins);
11087 def_ins = store_ins;
11090 g_assert (store_opcode != OP_STOREV_MEMBASE);
11092 /* Try to fuse the store into the instruction itself */
11093 /* FIXME: Add more instructions */
11094 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
11095 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
11096 ins->inst_imm = ins->inst_c0;
11097 ins->inst_destbasereg = var->inst_basereg;
11098 ins->inst_offset = var->inst_offset;
11099 spec = INS_INFO (ins->opcode);
11100 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
11101 ins->opcode = store_opcode;
11102 ins->inst_destbasereg = var->inst_basereg;
11103 ins->inst_offset = var->inst_offset;
11107 tmp_reg = ins->dreg;
11108 ins->dreg = ins->sreg2;
11109 ins->sreg2 = tmp_reg;
11112 spec2 [MONO_INST_DEST] = ' ';
11113 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11114 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11115 spec2 [MONO_INST_SRC3] = ' ';
11117 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
11118 // FIXME: The backends expect the base reg to be in inst_basereg
11119 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
11121 ins->inst_basereg = var->inst_basereg;
11122 ins->inst_offset = var->inst_offset;
11123 spec = INS_INFO (ins->opcode);
11125 /* printf ("INS: "); mono_print_ins (ins); */
11126 /* Create a store instruction */
11127 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
11129 /* Insert it after the instruction */
11130 mono_bblock_insert_after_ins (bb, ins, store_ins);
11132 def_ins = store_ins;
11135 * We can't assign ins->dreg to var->dreg here, since the
11136 * sregs could use it. So set a flag, and do it after
11139 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
11140 dest_has_lvreg = TRUE;
11145 if (def_ins && !live_range_start [dreg]) {
11146 live_range_start [dreg] = def_ins;
11147 live_range_start_bb [dreg] = bb;
11154 num_sregs = mono_inst_get_src_registers (ins, sregs);
11155 for (srcindex = 0; srcindex < 3; ++srcindex) {
11156 regtype = spec [MONO_INST_SRC1 + srcindex];
11157 sreg = sregs [srcindex];
11159 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
11160 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
11161 MonoInst *var = get_vreg_to_inst (cfg, sreg);
11162 MonoInst *use_ins = ins;
11163 MonoInst *load_ins;
11164 guint32 load_opcode;
11166 if (var->opcode == OP_REGVAR) {
11167 sregs [srcindex] = var->dreg;
11168 //mono_inst_set_src_registers (ins, sregs);
11169 live_range_end [sreg] = use_ins;
11170 live_range_end_bb [sreg] = bb;
11174 g_assert (var->opcode == OP_REGOFFSET);
11176 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
11178 g_assert (load_opcode != OP_LOADV_MEMBASE);
11180 if (vreg_to_lvreg [sreg]) {
11181 g_assert (vreg_to_lvreg [sreg] != -1);
11183 /* The variable is already loaded to an lvreg */
11184 if (G_UNLIKELY (cfg->verbose_level > 2))
11185 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
11186 sregs [srcindex] = vreg_to_lvreg [sreg];
11187 //mono_inst_set_src_registers (ins, sregs);
11191 /* Try to fuse the load into the instruction */
11192 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
11193 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
11194 sregs [0] = var->inst_basereg;
11195 //mono_inst_set_src_registers (ins, sregs);
11196 ins->inst_offset = var->inst_offset;
11197 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
11198 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
11199 sregs [1] = var->inst_basereg;
11200 //mono_inst_set_src_registers (ins, sregs);
11201 ins->inst_offset = var->inst_offset;
11203 if (MONO_IS_REAL_MOVE (ins)) {
11204 ins->opcode = OP_NOP;
11207 //printf ("%d ", srcindex); mono_print_ins (ins);
11209 sreg = alloc_dreg (cfg, stacktypes [regtype]);
11211 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
11212 if (var->dreg == prev_dreg) {
11214 * sreg refers to the value loaded by the load
11215 * emitted below, but we need to use ins->dreg
11216 * since it refers to the store emitted earlier.
11220 g_assert (sreg != -1);
11221 vreg_to_lvreg [var->dreg] = sreg;
11222 g_assert (lvregs_len < 1024);
11223 lvregs [lvregs_len ++] = var->dreg;
11227 sregs [srcindex] = sreg;
11228 //mono_inst_set_src_registers (ins, sregs);
11230 if (regtype == 'l') {
11231 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
11232 mono_bblock_insert_before_ins (bb, ins, load_ins);
11233 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
11234 mono_bblock_insert_before_ins (bb, ins, load_ins);
11235 use_ins = load_ins;
11238 #if SIZEOF_REGISTER == 4
11239 g_assert (load_opcode != OP_LOADI8_MEMBASE);
11241 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
11242 mono_bblock_insert_before_ins (bb, ins, load_ins);
11243 use_ins = load_ins;
11247 if (var->dreg < orig_next_vreg) {
11248 live_range_end [var->dreg] = use_ins;
11249 live_range_end_bb [var->dreg] = bb;
11253 mono_inst_set_src_registers (ins, sregs);
11255 if (dest_has_lvreg) {
11256 g_assert (ins->dreg != -1);
11257 vreg_to_lvreg [prev_dreg] = ins->dreg;
11258 g_assert (lvregs_len < 1024);
11259 lvregs [lvregs_len ++] = prev_dreg;
11260 dest_has_lvreg = FALSE;
11264 tmp_reg = ins->dreg;
11265 ins->dreg = ins->sreg2;
11266 ins->sreg2 = tmp_reg;
11269 if (MONO_IS_CALL (ins)) {
11270 /* Clear vreg_to_lvreg array */
11271 for (i = 0; i < lvregs_len; i++)
11272 vreg_to_lvreg [lvregs [i]] = 0;
11274 } else if (ins->opcode == OP_NOP) {
11276 MONO_INST_NULLIFY_SREGS (ins);
11279 if (cfg->verbose_level > 2)
11280 mono_print_ins_index (1, ins);
11283 /* Extend the live range based on the liveness info */
11284 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
11285 for (i = 0; i < cfg->num_varinfo; i ++) {
11286 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
11288 if (vreg_is_volatile (cfg, vi->vreg))
11289 /* The liveness info is incomplete */
11292 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
11293 /* Live from at least the first ins of this bb */
11294 live_range_start [vi->vreg] = bb->code;
11295 live_range_start_bb [vi->vreg] = bb;
11298 if (mono_bitset_test_fast (bb->live_out_set, i)) {
11299 /* Live at least until the last ins of this bb */
11300 live_range_end [vi->vreg] = bb->last_ins;
11301 live_range_end_bb [vi->vreg] = bb;
11307 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
11309 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
11310 * by storing the current native offset into MonoMethodVar->live_range_start/end.
11312 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
11313 for (i = 0; i < cfg->num_varinfo; ++i) {
11314 int vreg = MONO_VARINFO (cfg, i)->vreg;
11317 if (live_range_start [vreg]) {
11318 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
11320 ins->inst_c1 = vreg;
11321 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
11323 if (live_range_end [vreg]) {
11324 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
11326 ins->inst_c1 = vreg;
11327 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
11328 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
11330 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
11336 g_free (live_range_start);
11337 g_free (live_range_end);
11338 g_free (live_range_start_bb);
11339 g_free (live_range_end_bb);
11344 * - use 'iadd' instead of 'int_add'
11345 * - handling ovf opcodes: decompose in method_to_ir.
11346 * - unify iregs/fregs
11347 * -> partly done, the missing parts are:
11348 * - a more complete unification would involve unifying the hregs as well, so
11349 * code wouldn't need if (fp) all over the place. but that would mean the hregs
11350 * would no longer map to the machine hregs, so the code generators would need to
11351 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
11352 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
11353 * fp/non-fp branches speeds it up by about 15%.
11354 * - use sext/zext opcodes instead of shifts
11356 * - get rid of TEMPLOADs if possible and use vregs instead
11357 * - clean up usage of OP_P/OP_ opcodes
11358 * - cleanup usage of DUMMY_USE
11359 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
11361 * - set the stack type and allocate a dreg in the EMIT_NEW macros
11362 * - get rid of all the <foo>2 stuff when the new JIT is ready.
11363 * - make sure handle_stack_args () is called before the branch is emitted
11364 * - when the new IR is done, get rid of all unused stuff
11365 * - COMPARE/BEQ as separate instructions or unify them ?
11366 * - keeping them separate allows specialized compare instructions like
11367 * compare_imm, compare_membase
11368 * - most back ends unify fp compare+branch, fp compare+ceq
11369 * - integrate mono_save_args into inline_method
11370 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
11371 * - handle long shift opts on 32 bit platforms somehow: they require
11372 * 3 sregs (2 for arg1 and 1 for arg2)
11373 * - make byref a 'normal' type.
11374 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
11375 * variable if needed.
11376 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
11377 * like inline_method.
11378 * - remove inlining restrictions
11379 * - fix LNEG and enable cfold of INEG
11380 * - generalize x86 optimizations like ldelema as a peephole optimization
11381 * - add store_mem_imm for amd64
11382 * - optimize the loading of the interruption flag in the managed->native wrappers
11383 * - avoid special handling of OP_NOP in passes
11384 * - move code inserting instructions into one function/macro.
11385 * - try a coalescing phase after liveness analysis
11386 * - add float -> vreg conversion + local optimizations on !x86
11387 * - figure out how to handle decomposed branches during optimizations, ie.
11388 * compare+branch, op_jump_table+op_br etc.
11389 * - promote RuntimeXHandles to vregs
11390 * - vtype cleanups:
11391 * - add a NEW_VARLOADA_VREG macro
11392 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11393 * accessing vtype fields.
11394 * - get rid of I8CONST on 64 bit platforms
11395 * - dealing with the increase in code size due to branches created during opcode
11397 * - use extended basic blocks
11398 * - all parts of the JIT
11399 * - handle_global_vregs () && local regalloc
11400 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
11401 * - sources of increase in code size:
11404 * - isinst and castclass
11405 * - lvregs not allocated to global registers even if used multiple times
11406 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
11408 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
11409 * - add all micro optimizations from the old JIT
11410 * - put tree optimizations into the deadce pass
11411 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
11412 * specific function.
11413 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
11414 * fcompare + branchCC.
11415 * - create a helper function for allocating a stack slot, taking into account
11416 * MONO_CFG_HAS_SPILLUP.
11418 * - merge the ia64 switch changes.
11419 * - optimize mono_regstate2_alloc_int/float.
11420 * - fix the pessimistic handling of variables accessed in exception handler blocks.
11421 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
11422 * parts of the tree could be separated by other instructions, killing the tree
11423 * arguments, or stores killing loads etc. Also, should we fold loads into other
11424 * instructions if the result of the load is used multiple times ?
11425 * - make the REM_IMM optimization in mini-x86.c arch-independent.
11426 * - LAST MERGE: 108395.
11427 * - when returning vtypes in registers, generate IR and append it to the end of the
11428 * last bb instead of doing it in the epilog.
11429 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
11437 - When to decompose opcodes:
11438 - earlier: this makes some optimizations hard to implement, since the low level IR
11439 no longer contains the neccessary information. But it is easier to do.
11440 - later: harder to implement, enables more optimizations.
11441 - Branches inside bblocks:
11442 - created when decomposing complex opcodes.
11443 - branches to another bblock: harmless, but not tracked by the branch
11444 optimizations, so need to branch to a label at the start of the bblock.
11445 - branches to inside the same bblock: very problematic, trips up the local
11446 reg allocator. Can be fixed by spitting the current bblock, but that is a
11447 complex operation, since some local vregs can become global vregs etc.
11448 - Local/global vregs:
11449 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11450 local register allocator.
11451 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11452 structure, created by mono_create_var (). Assigned to hregs or the stack by
11453 the global register allocator.
11454 - When to do optimizations like alu->alu_imm:
11455 - earlier -> saves work later on since the IR will be smaller/simpler
11456 - later -> can work on more instructions
11457 - Handling of valuetypes:
11458 - When a vtype is pushed on the stack, a new temporary is created, an
11459 instruction computing its address (LDADDR) is emitted and pushed on
11460 the stack. Need to optimize cases when the vtype is used immediately as in
11461 argument passing, stloc etc.
11462 - Instead of the to_end stuff in the old JIT, simply call the function handling
11463 the values on the stack before emitting the last instruction of the bb.
11466 #endif /* DISABLE_JIT */