2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
30 #include <mono/utils/memcheck.h>
32 #include <mono/metadata/assembly.h>
33 #include <mono/metadata/loader.h>
34 #include <mono/metadata/tabledefs.h>
35 #include <mono/metadata/class.h>
36 #include <mono/metadata/object.h>
37 #include <mono/metadata/exception.h>
38 #include <mono/metadata/opcodes.h>
39 #include <mono/metadata/mono-endian.h>
40 #include <mono/metadata/tokentype.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/marshal.h>
43 #include <mono/metadata/debug-helpers.h>
44 #include <mono/metadata/mono-debug.h>
45 #include <mono/metadata/gc-internal.h>
46 #include <mono/metadata/security-manager.h>
47 #include <mono/metadata/threads-types.h>
48 #include <mono/metadata/security-core-clr.h>
49 #include <mono/metadata/monitor.h>
50 #include <mono/metadata/profiler-private.h>
51 #include <mono/metadata/profiler.h>
52 #include <mono/utils/mono-compiler.h>
53 #include <mono/metadata/mono-basic-block.h>
60 #include "jit-icalls.h"
62 #include "debugger-agent.h"
64 #define BRANCH_COST 10
65 #define INLINE_LENGTH_LIMIT 20
66 #define INLINE_FAILURE do {\
67 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
70 #define CHECK_CFG_EXCEPTION do {\
71 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
74 #define METHOD_ACCESS_FAILURE do { \
75 char *method_fname = mono_method_full_name (method, TRUE); \
76 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
77 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
78 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
79 g_free (method_fname); \
80 g_free (cil_method_fname); \
81 goto exception_exit; \
83 #define FIELD_ACCESS_FAILURE do { \
84 char *method_fname = mono_method_full_name (method, TRUE); \
85 char *field_fname = mono_field_full_name (field); \
86 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
87 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
88 g_free (method_fname); \
89 g_free (field_fname); \
90 goto exception_exit; \
92 #define GENERIC_SHARING_FAILURE(opcode) do { \
93 if (cfg->generic_sharing_context) { \
94 if (cfg->verbose_level > 2) \
95 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
96 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
97 goto exception_exit; \
101 /* Determine whenever 'ins' represents a load of the 'this' argument */
102 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
104 static int ldind_to_load_membase (int opcode);
105 static int stind_to_store_membase (int opcode);
107 int mono_op_to_op_imm (int opcode);
108 int mono_op_to_op_imm_noemul (int opcode);
110 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
111 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
112 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
114 /* helper methods signature */
115 extern MonoMethodSignature *helper_sig_class_init_trampoline;
116 extern MonoMethodSignature *helper_sig_domain_get;
117 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
118 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm;
119 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
120 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
121 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
124 * Instruction metadata
132 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
133 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
139 #if SIZEOF_REGISTER == 8
144 /* keep in sync with the enum in mini.h */
147 #include "mini-ops.h"
152 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
153 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
155 * This should contain the index of the last sreg + 1. This is not the same
156 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
158 const gint8 ins_sreg_counts[] = {
159 #include "mini-ops.h"
164 #define MONO_INIT_VARINFO(vi,id) do { \
165 (vi)->range.first_use.pos.bid = 0xffff; \
171 mono_inst_set_src_registers (MonoInst *ins, int *regs)
173 ins->sreg1 = regs [0];
174 ins->sreg2 = regs [1];
175 ins->sreg3 = regs [2];
179 mono_alloc_ireg (MonoCompile *cfg)
181 return alloc_ireg (cfg);
185 mono_alloc_freg (MonoCompile *cfg)
187 return alloc_freg (cfg);
191 mono_alloc_preg (MonoCompile *cfg)
193 return alloc_preg (cfg);
197 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
199 return alloc_dreg (cfg, stack_type);
203 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
209 switch (type->type) {
212 case MONO_TYPE_BOOLEAN:
224 case MONO_TYPE_FNPTR:
226 case MONO_TYPE_CLASS:
227 case MONO_TYPE_STRING:
228 case MONO_TYPE_OBJECT:
229 case MONO_TYPE_SZARRAY:
230 case MONO_TYPE_ARRAY:
234 #if SIZEOF_REGISTER == 8
243 case MONO_TYPE_VALUETYPE:
244 if (type->data.klass->enumtype) {
245 type = mono_class_enum_basetype (type->data.klass);
248 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
251 case MONO_TYPE_TYPEDBYREF:
253 case MONO_TYPE_GENERICINST:
254 type = &type->data.generic_class->container_class->byval_arg;
258 g_assert (cfg->generic_sharing_context);
261 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
267 mono_print_bb (MonoBasicBlock *bb, const char *msg)
272 printf ("\n%s %d: [IN: ", msg, bb->block_num);
273 for (i = 0; i < bb->in_count; ++i)
274 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
276 for (i = 0; i < bb->out_count; ++i)
277 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
279 for (tree = bb->code; tree; tree = tree->next)
280 mono_print_ins_index (-1, tree);
284 * Can't put this at the beginning, since other files reference stuff from this
289 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
291 #define GET_BBLOCK(cfg,tblock,ip) do { \
292 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
294 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
295 NEW_BBLOCK (cfg, (tblock)); \
296 (tblock)->cil_code = (ip); \
297 ADD_BBLOCK (cfg, (tblock)); \
301 #if defined(TARGET_X86) || defined(TARGET_AMD64)
302 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
303 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
304 (dest)->dreg = alloc_preg ((cfg)); \
305 (dest)->sreg1 = (sr1); \
306 (dest)->sreg2 = (sr2); \
307 (dest)->inst_imm = (imm); \
308 (dest)->backend.shift_amount = (shift); \
309 MONO_ADD_INS ((cfg)->cbb, (dest)); \
313 #if SIZEOF_REGISTER == 8
314 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
315 /* FIXME: Need to add many more cases */ \
316 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
318 int dr = alloc_preg (cfg); \
319 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
320 (ins)->sreg2 = widen->dreg; \
324 #define ADD_WIDEN_OP(ins, arg1, arg2)
327 #define ADD_BINOP(op) do { \
328 MONO_INST_NEW (cfg, ins, (op)); \
330 ins->sreg1 = sp [0]->dreg; \
331 ins->sreg2 = sp [1]->dreg; \
332 type_from_op (ins, sp [0], sp [1]); \
334 /* Have to insert a widening op */ \
335 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
336 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
337 MONO_ADD_INS ((cfg)->cbb, (ins)); \
338 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
341 #define ADD_UNOP(op) do { \
342 MONO_INST_NEW (cfg, ins, (op)); \
344 ins->sreg1 = sp [0]->dreg; \
345 type_from_op (ins, sp [0], NULL); \
347 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
348 MONO_ADD_INS ((cfg)->cbb, (ins)); \
349 *sp++ = mono_decompose_opcode (cfg, ins); \
352 #define ADD_BINCOND(next_block) do { \
355 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
356 cmp->sreg1 = sp [0]->dreg; \
357 cmp->sreg2 = sp [1]->dreg; \
358 type_from_op (cmp, sp [0], sp [1]); \
360 type_from_op (ins, sp [0], sp [1]); \
361 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
362 GET_BBLOCK (cfg, tblock, target); \
363 link_bblock (cfg, bblock, tblock); \
364 ins->inst_true_bb = tblock; \
365 if ((next_block)) { \
366 link_bblock (cfg, bblock, (next_block)); \
367 ins->inst_false_bb = (next_block); \
368 start_new_bblock = 1; \
370 GET_BBLOCK (cfg, tblock, ip); \
371 link_bblock (cfg, bblock, tblock); \
372 ins->inst_false_bb = tblock; \
373 start_new_bblock = 2; \
375 if (sp != stack_start) { \
376 handle_stack_args (cfg, stack_start, sp - stack_start); \
377 CHECK_UNVERIFIABLE (cfg); \
379 MONO_ADD_INS (bblock, cmp); \
380 MONO_ADD_INS (bblock, ins); \
384 * link_bblock: Links two basic blocks
386 * links two basic blocks in the control flow graph, the 'from'
387 * argument is the starting block and the 'to' argument is the block
388 * the control flow ends to after 'from'.
391 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
393 MonoBasicBlock **newa;
397 if (from->cil_code) {
399 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
401 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
404 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
406 printf ("edge from entry to exit\n");
411 for (i = 0; i < from->out_count; ++i) {
412 if (to == from->out_bb [i]) {
418 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
419 for (i = 0; i < from->out_count; ++i) {
420 newa [i] = from->out_bb [i];
428 for (i = 0; i < to->in_count; ++i) {
429 if (from == to->in_bb [i]) {
435 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
436 for (i = 0; i < to->in_count; ++i) {
437 newa [i] = to->in_bb [i];
446 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
448 link_bblock (cfg, from, to);
452 * mono_find_block_region:
454 * We mark each basic block with a region ID. We use that to avoid BB
455 * optimizations when blocks are in different regions.
458 * A region token that encodes where this region is, and information
459 * about the clause owner for this block.
461 * The region encodes the try/catch/filter clause that owns this block
462 * as well as the type. -1 is a special value that represents a block
463 * that is in none of try/catch/filter.
466 mono_find_block_region (MonoCompile *cfg, int offset)
468 MonoMethodHeader *header = cfg->header;
469 MonoExceptionClause *clause;
472 for (i = 0; i < header->num_clauses; ++i) {
473 clause = &header->clauses [i];
474 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
475 (offset < (clause->handler_offset)))
476 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
478 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
479 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
480 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
481 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
482 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
484 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
487 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
488 return ((i + 1) << 8) | clause->flags;
495 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
497 MonoMethodHeader *header = cfg->header;
498 MonoExceptionClause *clause;
502 for (i = 0; i < header->num_clauses; ++i) {
503 clause = &header->clauses [i];
504 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
505 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
506 if (clause->flags == type)
507 res = g_list_append (res, clause);
514 mono_create_spvar_for_region (MonoCompile *cfg, int region)
518 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
522 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
523 /* prevent it from being register allocated */
524 var->flags |= MONO_INST_INDIRECT;
526 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
530 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
532 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
536 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
540 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
544 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
545 /* prevent it from being register allocated */
546 var->flags |= MONO_INST_INDIRECT;
548 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
554 * Returns the type used in the eval stack when @type is loaded.
555 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
558 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
562 inst->klass = klass = mono_class_from_mono_type (type);
564 inst->type = STACK_MP;
569 switch (type->type) {
571 inst->type = STACK_INV;
575 case MONO_TYPE_BOOLEAN:
581 inst->type = STACK_I4;
586 case MONO_TYPE_FNPTR:
587 inst->type = STACK_PTR;
589 case MONO_TYPE_CLASS:
590 case MONO_TYPE_STRING:
591 case MONO_TYPE_OBJECT:
592 case MONO_TYPE_SZARRAY:
593 case MONO_TYPE_ARRAY:
594 inst->type = STACK_OBJ;
598 inst->type = STACK_I8;
602 inst->type = STACK_R8;
604 case MONO_TYPE_VALUETYPE:
605 if (type->data.klass->enumtype) {
606 type = mono_class_enum_basetype (type->data.klass);
610 inst->type = STACK_VTYPE;
613 case MONO_TYPE_TYPEDBYREF:
614 inst->klass = mono_defaults.typed_reference_class;
615 inst->type = STACK_VTYPE;
617 case MONO_TYPE_GENERICINST:
618 type = &type->data.generic_class->container_class->byval_arg;
621 case MONO_TYPE_MVAR :
622 /* FIXME: all the arguments must be references for now,
623 * later look inside cfg and see if the arg num is
626 g_assert (cfg->generic_sharing_context);
627 inst->type = STACK_OBJ;
630 g_error ("unknown type 0x%02x in eval stack type", type->type);
635 * The following tables are used to quickly validate the IL code in type_from_op ().
638 bin_num_table [STACK_MAX] [STACK_MAX] = {
639 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
640 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
641 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
642 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
643 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
644 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
645 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
646 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
651 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
654 /* reduce the size of this table */
656 bin_int_table [STACK_MAX] [STACK_MAX] = {
657 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
658 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
659 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
660 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
661 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
662 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
663 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
664 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
668 bin_comp_table [STACK_MAX] [STACK_MAX] = {
669 /* Inv i L p F & O vt */
671 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
672 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
673 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
674 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
675 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
676 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
677 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
680 /* reduce the size of this table */
682 shift_table [STACK_MAX] [STACK_MAX] = {
683 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
684 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
685 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
686 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
687 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
688 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
689 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
690 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
694 * Tables to map from the non-specific opcode to the matching
695 * type-specific opcode.
697 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
699 binops_op_map [STACK_MAX] = {
700 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
703 /* handles from CEE_NEG to CEE_CONV_U8 */
705 unops_op_map [STACK_MAX] = {
706 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
709 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
711 ovfops_op_map [STACK_MAX] = {
712 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
715 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
717 ovf2ops_op_map [STACK_MAX] = {
718 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
721 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
723 ovf3ops_op_map [STACK_MAX] = {
724 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
727 /* handles from CEE_BEQ to CEE_BLT_UN */
729 beqops_op_map [STACK_MAX] = {
730 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
733 /* handles from CEE_CEQ to CEE_CLT_UN */
735 ceqops_op_map [STACK_MAX] = {
736 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
740 * Sets ins->type (the type on the eval stack) according to the
741 * type of the opcode and the arguments to it.
742 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
744 * FIXME: this function sets ins->type unconditionally in some cases, but
745 * it should set it to invalid for some types (a conv.x on an object)
748 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
750 switch (ins->opcode) {
757 /* FIXME: check unverifiable args for STACK_MP */
758 ins->type = bin_num_table [src1->type] [src2->type];
759 ins->opcode += binops_op_map [ins->type];
766 ins->type = bin_int_table [src1->type] [src2->type];
767 ins->opcode += binops_op_map [ins->type];
772 ins->type = shift_table [src1->type] [src2->type];
773 ins->opcode += binops_op_map [ins->type];
778 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
779 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
780 ins->opcode = OP_LCOMPARE;
781 else if (src1->type == STACK_R8)
782 ins->opcode = OP_FCOMPARE;
784 ins->opcode = OP_ICOMPARE;
786 case OP_ICOMPARE_IMM:
787 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
788 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
789 ins->opcode = OP_LCOMPARE_IMM;
801 ins->opcode += beqops_op_map [src1->type];
804 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
805 ins->opcode += ceqops_op_map [src1->type];
811 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
812 ins->opcode += ceqops_op_map [src1->type];
816 ins->type = neg_table [src1->type];
817 ins->opcode += unops_op_map [ins->type];
820 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
821 ins->type = src1->type;
823 ins->type = STACK_INV;
824 ins->opcode += unops_op_map [ins->type];
830 ins->type = STACK_I4;
831 ins->opcode += unops_op_map [src1->type];
834 ins->type = STACK_R8;
835 switch (src1->type) {
838 ins->opcode = OP_ICONV_TO_R_UN;
841 ins->opcode = OP_LCONV_TO_R_UN;
845 case CEE_CONV_OVF_I1:
846 case CEE_CONV_OVF_U1:
847 case CEE_CONV_OVF_I2:
848 case CEE_CONV_OVF_U2:
849 case CEE_CONV_OVF_I4:
850 case CEE_CONV_OVF_U4:
851 ins->type = STACK_I4;
852 ins->opcode += ovf3ops_op_map [src1->type];
854 case CEE_CONV_OVF_I_UN:
855 case CEE_CONV_OVF_U_UN:
856 ins->type = STACK_PTR;
857 ins->opcode += ovf2ops_op_map [src1->type];
859 case CEE_CONV_OVF_I1_UN:
860 case CEE_CONV_OVF_I2_UN:
861 case CEE_CONV_OVF_I4_UN:
862 case CEE_CONV_OVF_U1_UN:
863 case CEE_CONV_OVF_U2_UN:
864 case CEE_CONV_OVF_U4_UN:
865 ins->type = STACK_I4;
866 ins->opcode += ovf2ops_op_map [src1->type];
869 ins->type = STACK_PTR;
870 switch (src1->type) {
872 ins->opcode = OP_ICONV_TO_U;
876 #if SIZEOF_REGISTER == 8
877 ins->opcode = OP_LCONV_TO_U;
879 ins->opcode = OP_MOVE;
883 ins->opcode = OP_LCONV_TO_U;
886 ins->opcode = OP_FCONV_TO_U;
892 ins->type = STACK_I8;
893 ins->opcode += unops_op_map [src1->type];
895 case CEE_CONV_OVF_I8:
896 case CEE_CONV_OVF_U8:
897 ins->type = STACK_I8;
898 ins->opcode += ovf3ops_op_map [src1->type];
900 case CEE_CONV_OVF_U8_UN:
901 case CEE_CONV_OVF_I8_UN:
902 ins->type = STACK_I8;
903 ins->opcode += ovf2ops_op_map [src1->type];
907 ins->type = STACK_R8;
908 ins->opcode += unops_op_map [src1->type];
911 ins->type = STACK_R8;
915 ins->type = STACK_I4;
916 ins->opcode += ovfops_op_map [src1->type];
921 ins->type = STACK_PTR;
922 ins->opcode += ovfops_op_map [src1->type];
930 ins->type = bin_num_table [src1->type] [src2->type];
931 ins->opcode += ovfops_op_map [src1->type];
932 if (ins->type == STACK_R8)
933 ins->type = STACK_INV;
935 case OP_LOAD_MEMBASE:
936 ins->type = STACK_PTR;
938 case OP_LOADI1_MEMBASE:
939 case OP_LOADU1_MEMBASE:
940 case OP_LOADI2_MEMBASE:
941 case OP_LOADU2_MEMBASE:
942 case OP_LOADI4_MEMBASE:
943 case OP_LOADU4_MEMBASE:
944 ins->type = STACK_PTR;
946 case OP_LOADI8_MEMBASE:
947 ins->type = STACK_I8;
949 case OP_LOADR4_MEMBASE:
950 case OP_LOADR8_MEMBASE:
951 ins->type = STACK_R8;
954 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
958 if (ins->type == STACK_MP)
959 ins->klass = mono_defaults.object_class;
964 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
970 param_table [STACK_MAX] [STACK_MAX] = {
975 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
979 switch (args->type) {
989 for (i = 0; i < sig->param_count; ++i) {
990 switch (args [i].type) {
994 if (!sig->params [i]->byref)
998 if (sig->params [i]->byref)
1000 switch (sig->params [i]->type) {
1001 case MONO_TYPE_CLASS:
1002 case MONO_TYPE_STRING:
1003 case MONO_TYPE_OBJECT:
1004 case MONO_TYPE_SZARRAY:
1005 case MONO_TYPE_ARRAY:
1012 if (sig->params [i]->byref)
1014 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1023 /*if (!param_table [args [i].type] [sig->params [i]->type])
1031 * When we need a pointer to the current domain many times in a method, we
1032 * call mono_domain_get() once and we store the result in a local variable.
1033 * This function returns the variable that represents the MonoDomain*.
1035 inline static MonoInst *
1036 mono_get_domainvar (MonoCompile *cfg)
1038 if (!cfg->domainvar)
1039 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1040 return cfg->domainvar;
1044 * The got_var contains the address of the Global Offset Table when AOT
1048 mono_get_got_var (MonoCompile *cfg)
1050 #ifdef MONO_ARCH_NEED_GOT_VAR
1051 if (!cfg->compile_aot)
1053 if (!cfg->got_var) {
1054 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1056 return cfg->got_var;
1063 mono_get_vtable_var (MonoCompile *cfg)
1065 g_assert (cfg->generic_sharing_context);
1067 if (!cfg->rgctx_var) {
1068 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1069 /* force the var to be stack allocated */
1070 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1073 return cfg->rgctx_var;
1077 type_from_stack_type (MonoInst *ins) {
1078 switch (ins->type) {
1079 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1080 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1081 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1082 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1084 return &ins->klass->this_arg;
1085 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1086 case STACK_VTYPE: return &ins->klass->byval_arg;
1088 g_error ("stack type %d to monotype not handled\n", ins->type);
1093 static G_GNUC_UNUSED int
1094 type_to_stack_type (MonoType *t)
1096 t = mono_type_get_underlying_type (t);
1100 case MONO_TYPE_BOOLEAN:
1103 case MONO_TYPE_CHAR:
1110 case MONO_TYPE_FNPTR:
1112 case MONO_TYPE_CLASS:
1113 case MONO_TYPE_STRING:
1114 case MONO_TYPE_OBJECT:
1115 case MONO_TYPE_SZARRAY:
1116 case MONO_TYPE_ARRAY:
1124 case MONO_TYPE_VALUETYPE:
1125 case MONO_TYPE_TYPEDBYREF:
1127 case MONO_TYPE_GENERICINST:
1128 if (mono_type_generic_inst_is_valuetype (t))
1134 g_assert_not_reached ();
1141 array_access_to_klass (int opcode)
1145 return mono_defaults.byte_class;
1147 return mono_defaults.uint16_class;
1150 return mono_defaults.int_class;
1153 return mono_defaults.sbyte_class;
1156 return mono_defaults.int16_class;
1159 return mono_defaults.int32_class;
1161 return mono_defaults.uint32_class;
1164 return mono_defaults.int64_class;
1167 return mono_defaults.single_class;
1170 return mono_defaults.double_class;
1171 case CEE_LDELEM_REF:
1172 case CEE_STELEM_REF:
1173 return mono_defaults.object_class;
1175 g_assert_not_reached ();
1181 * We try to share variables when possible
1184 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1189 /* inlining can result in deeper stacks */
1190 if (slot >= cfg->header->max_stack)
1191 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1193 pos = ins->type - 1 + slot * STACK_MAX;
1195 switch (ins->type) {
1202 if ((vnum = cfg->intvars [pos]))
1203 return cfg->varinfo [vnum];
1204 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1205 cfg->intvars [pos] = res->inst_c0;
1208 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1214 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1217 * Don't use this if a generic_context is set, since that means AOT can't
1218 * look up the method using just the image+token.
1219 * table == 0 means this is a reference made from a wrapper.
1221 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1222 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1223 jump_info_token->image = image;
1224 jump_info_token->token = token;
1225 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1230 * This function is called to handle items that are left on the evaluation stack
1231 * at basic block boundaries. What happens is that we save the values to local variables
1232 * and we reload them later when first entering the target basic block (with the
1233 * handle_loaded_temps () function).
1234 * A single joint point will use the same variables (stored in the array bb->out_stack or
1235 * bb->in_stack, if the basic block is before or after the joint point).
1237 * This function needs to be called _before_ emitting the last instruction of
1238 * the bb (i.e. before emitting a branch).
1239 * If the stack merge fails at a join point, cfg->unverifiable is set.
1242 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1245 MonoBasicBlock *bb = cfg->cbb;
1246 MonoBasicBlock *outb;
1247 MonoInst *inst, **locals;
1252 if (cfg->verbose_level > 3)
1253 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1254 if (!bb->out_scount) {
1255 bb->out_scount = count;
1256 //printf ("bblock %d has out:", bb->block_num);
1258 for (i = 0; i < bb->out_count; ++i) {
1259 outb = bb->out_bb [i];
1260 /* exception handlers are linked, but they should not be considered for stack args */
1261 if (outb->flags & BB_EXCEPTION_HANDLER)
1263 //printf (" %d", outb->block_num);
1264 if (outb->in_stack) {
1266 bb->out_stack = outb->in_stack;
1272 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1273 for (i = 0; i < count; ++i) {
1275 * try to reuse temps already allocated for this purpouse, if they occupy the same
1276 * stack slot and if they are of the same type.
1277 * This won't cause conflicts since if 'local' is used to
1278 * store one of the values in the in_stack of a bblock, then
1279 * the same variable will be used for the same outgoing stack
1281 * This doesn't work when inlining methods, since the bblocks
1282 * in the inlined methods do not inherit their in_stack from
1283 * the bblock they are inlined to. See bug #58863 for an
1286 if (cfg->inlined_method)
1287 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1289 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1294 for (i = 0; i < bb->out_count; ++i) {
1295 outb = bb->out_bb [i];
1296 /* exception handlers are linked, but they should not be considered for stack args */
1297 if (outb->flags & BB_EXCEPTION_HANDLER)
1299 if (outb->in_scount) {
1300 if (outb->in_scount != bb->out_scount) {
1301 cfg->unverifiable = TRUE;
1304 continue; /* check they are the same locals */
1306 outb->in_scount = count;
1307 outb->in_stack = bb->out_stack;
1310 locals = bb->out_stack;
1312 for (i = 0; i < count; ++i) {
1313 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1314 inst->cil_code = sp [i]->cil_code;
1315 sp [i] = locals [i];
1316 if (cfg->verbose_level > 3)
1317 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1321 * It is possible that the out bblocks already have in_stack assigned, and
1322 * the in_stacks differ. In this case, we will store to all the different
1329 /* Find a bblock which has a different in_stack */
1331 while (bindex < bb->out_count) {
1332 outb = bb->out_bb [bindex];
1333 /* exception handlers are linked, but they should not be considered for stack args */
1334 if (outb->flags & BB_EXCEPTION_HANDLER) {
1338 if (outb->in_stack != locals) {
1339 for (i = 0; i < count; ++i) {
1340 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1341 inst->cil_code = sp [i]->cil_code;
1342 sp [i] = locals [i];
1343 if (cfg->verbose_level > 3)
1344 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1346 locals = outb->in_stack;
1355 /* Emit code which loads interface_offsets [klass->interface_id]
1356 * The array is stored in memory before vtable.
1359 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1361 if (cfg->compile_aot) {
1362 int ioffset_reg = alloc_preg (cfg);
1363 int iid_reg = alloc_preg (cfg);
1365 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1366 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1367 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1370 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1375 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1377 int ibitmap_reg = alloc_preg (cfg);
1378 #ifdef COMPRESSED_INTERFACE_BITMAP
1380 MonoInst *res, *ins;
1381 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1382 MONO_ADD_INS (cfg->cbb, ins);
1384 if (cfg->compile_aot)
1385 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1387 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1388 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1389 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1391 int ibitmap_byte_reg = alloc_preg (cfg);
1393 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1395 if (cfg->compile_aot) {
1396 int iid_reg = alloc_preg (cfg);
1397 int shifted_iid_reg = alloc_preg (cfg);
1398 int ibitmap_byte_address_reg = alloc_preg (cfg);
1399 int masked_iid_reg = alloc_preg (cfg);
1400 int iid_one_bit_reg = alloc_preg (cfg);
1401 int iid_bit_reg = alloc_preg (cfg);
1402 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1403 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1404 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1405 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1406 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1407 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1408 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1409 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1411 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1412 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1418 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1419 * stored in "klass_reg" implements the interface "klass".
1422 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1424 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1428 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1429 * stored in "vtable_reg" implements the interface "klass".
1432 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1434 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1438 * Emit code which checks whenever the interface id of @klass is smaller than
1439 * than the value given by max_iid_reg.
1442 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1443 MonoBasicBlock *false_target)
1445 if (cfg->compile_aot) {
1446 int iid_reg = alloc_preg (cfg);
1447 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1448 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1451 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1453 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1455 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1458 /* Same as above, but obtains max_iid from a vtable */
1460 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1461 MonoBasicBlock *false_target)
1463 int max_iid_reg = alloc_preg (cfg);
1465 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1466 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1469 /* Same as above, but obtains max_iid from a klass */
1471 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1472 MonoBasicBlock *false_target)
1474 int max_iid_reg = alloc_preg (cfg);
1476 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1477 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1481 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1483 int idepth_reg = alloc_preg (cfg);
1484 int stypes_reg = alloc_preg (cfg);
1485 int stype = alloc_preg (cfg);
1487 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1488 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1489 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1490 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1492 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1493 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1495 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1496 } else if (cfg->compile_aot) {
1497 int const_reg = alloc_preg (cfg);
1498 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1499 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1501 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1503 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1507 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1509 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1513 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1515 int intf_reg = alloc_preg (cfg);
1517 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1518 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1519 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1521 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1523 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1527 * Variant of the above that takes a register to the class, not the vtable.
1530 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1532 int intf_bit_reg = alloc_preg (cfg);
1534 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1535 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1536 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1538 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1540 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1544 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1547 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1548 } else if (cfg->compile_aot) {
1549 int const_reg = alloc_preg (cfg);
1550 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1551 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1553 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1555 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1559 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1561 return mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1565 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1567 if (cfg->compile_aot) {
1568 int const_reg = alloc_preg (cfg);
1569 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1570 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1572 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1574 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1578 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1581 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1584 int rank_reg = alloc_preg (cfg);
1585 int eclass_reg = alloc_preg (cfg);
1587 g_assert (!klass_inst);
1588 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1589 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1590 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1591 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1592 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1593 if (klass->cast_class == mono_defaults.object_class) {
1594 int parent_reg = alloc_preg (cfg);
1595 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1596 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1597 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1598 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1599 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1600 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1601 } else if (klass->cast_class == mono_defaults.enum_class) {
1602 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1603 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1604 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1606 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1607 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1610 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1611 /* Check that the object is a vector too */
1612 int bounds_reg = alloc_preg (cfg);
1613 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1614 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1615 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1618 int idepth_reg = alloc_preg (cfg);
1619 int stypes_reg = alloc_preg (cfg);
1620 int stype = alloc_preg (cfg);
1622 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1623 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1624 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1625 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1627 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1628 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1629 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1634 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1636 return mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1640 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1644 g_assert (val == 0);
1649 if ((size <= 4) && (size <= align)) {
1652 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1655 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1658 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1660 #if SIZEOF_REGISTER == 8
1662 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1668 val_reg = alloc_preg (cfg);
1670 if (SIZEOF_REGISTER == 8)
1671 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1673 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1676 /* This could be optimized further if neccesary */
1678 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1685 #if !NO_UNALIGNED_ACCESS
1686 if (SIZEOF_REGISTER == 8) {
1688 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1693 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1701 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1706 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1711 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1718 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1725 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1726 g_assert (size < 10000);
1729 /* This could be optimized further if neccesary */
1731 cur_reg = alloc_preg (cfg);
1732 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1733 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1740 #if !NO_UNALIGNED_ACCESS
1741 if (SIZEOF_REGISTER == 8) {
1743 cur_reg = alloc_preg (cfg);
1744 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1745 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1754 cur_reg = alloc_preg (cfg);
1755 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1756 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1762 cur_reg = alloc_preg (cfg);
1763 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1764 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1770 cur_reg = alloc_preg (cfg);
1771 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1772 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1780 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1783 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1786 type = mini_get_basic_type_from_generic (gsctx, type);
1787 switch (type->type) {
1788 case MONO_TYPE_VOID:
1789 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1792 case MONO_TYPE_BOOLEAN:
1795 case MONO_TYPE_CHAR:
1798 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1802 case MONO_TYPE_FNPTR:
1803 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1804 case MONO_TYPE_CLASS:
1805 case MONO_TYPE_STRING:
1806 case MONO_TYPE_OBJECT:
1807 case MONO_TYPE_SZARRAY:
1808 case MONO_TYPE_ARRAY:
1809 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1812 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1815 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1816 case MONO_TYPE_VALUETYPE:
1817 if (type->data.klass->enumtype) {
1818 type = mono_class_enum_basetype (type->data.klass);
1821 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1822 case MONO_TYPE_TYPEDBYREF:
1823 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1824 case MONO_TYPE_GENERICINST:
1825 type = &type->data.generic_class->container_class->byval_arg;
1828 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1834 * target_type_is_incompatible:
1835 * @cfg: MonoCompile context
1837 * Check that the item @arg on the evaluation stack can be stored
1838 * in the target type (can be a local, or field, etc).
1839 * The cfg arg can be used to check if we need verification or just
1842 * Returns: non-0 value if arg can't be stored on a target.
1845 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1847 MonoType *simple_type;
1850 if (target->byref) {
1851 /* FIXME: check that the pointed to types match */
1852 if (arg->type == STACK_MP)
1853 return arg->klass != mono_class_from_mono_type (target);
1854 if (arg->type == STACK_PTR)
1859 simple_type = mono_type_get_underlying_type (target);
1860 switch (simple_type->type) {
1861 case MONO_TYPE_VOID:
1865 case MONO_TYPE_BOOLEAN:
1868 case MONO_TYPE_CHAR:
1871 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1875 /* STACK_MP is needed when setting pinned locals */
1876 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1881 case MONO_TYPE_FNPTR:
1882 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1885 case MONO_TYPE_CLASS:
1886 case MONO_TYPE_STRING:
1887 case MONO_TYPE_OBJECT:
1888 case MONO_TYPE_SZARRAY:
1889 case MONO_TYPE_ARRAY:
1890 if (arg->type != STACK_OBJ)
1892 /* FIXME: check type compatibility */
1896 if (arg->type != STACK_I8)
1901 if (arg->type != STACK_R8)
1904 case MONO_TYPE_VALUETYPE:
1905 if (arg->type != STACK_VTYPE)
1907 klass = mono_class_from_mono_type (simple_type);
1908 if (klass != arg->klass)
1911 case MONO_TYPE_TYPEDBYREF:
1912 if (arg->type != STACK_VTYPE)
1914 klass = mono_class_from_mono_type (simple_type);
1915 if (klass != arg->klass)
1918 case MONO_TYPE_GENERICINST:
1919 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1920 if (arg->type != STACK_VTYPE)
1922 klass = mono_class_from_mono_type (simple_type);
1923 if (klass != arg->klass)
1927 if (arg->type != STACK_OBJ)
1929 /* FIXME: check type compatibility */
1933 case MONO_TYPE_MVAR:
1934 /* FIXME: all the arguments must be references for now,
1935 * later look inside cfg and see if the arg num is
1936 * really a reference
1938 g_assert (cfg->generic_sharing_context);
1939 if (arg->type != STACK_OBJ)
1943 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1949 * Prepare arguments for passing to a function call.
1950 * Return a non-zero value if the arguments can't be passed to the given
1952 * The type checks are not yet complete and some conversions may need
1953 * casts on 32 or 64 bit architectures.
1955 * FIXME: implement this using target_type_is_incompatible ()
1958 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1960 MonoType *simple_type;
1964 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1968 for (i = 0; i < sig->param_count; ++i) {
1969 if (sig->params [i]->byref) {
1970 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1974 simple_type = sig->params [i];
1975 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1977 switch (simple_type->type) {
1978 case MONO_TYPE_VOID:
1983 case MONO_TYPE_BOOLEAN:
1986 case MONO_TYPE_CHAR:
1989 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
1995 case MONO_TYPE_FNPTR:
1996 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
1999 case MONO_TYPE_CLASS:
2000 case MONO_TYPE_STRING:
2001 case MONO_TYPE_OBJECT:
2002 case MONO_TYPE_SZARRAY:
2003 case MONO_TYPE_ARRAY:
2004 if (args [i]->type != STACK_OBJ)
2009 if (args [i]->type != STACK_I8)
2014 if (args [i]->type != STACK_R8)
2017 case MONO_TYPE_VALUETYPE:
2018 if (simple_type->data.klass->enumtype) {
2019 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2022 if (args [i]->type != STACK_VTYPE)
2025 case MONO_TYPE_TYPEDBYREF:
2026 if (args [i]->type != STACK_VTYPE)
2029 case MONO_TYPE_GENERICINST:
2030 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2034 g_error ("unknown type 0x%02x in check_call_signature",
2042 callvirt_to_call (int opcode)
2047 case OP_VOIDCALLVIRT:
2056 g_assert_not_reached ();
2063 callvirt_to_call_membase (int opcode)
2067 return OP_CALL_MEMBASE;
2068 case OP_VOIDCALLVIRT:
2069 return OP_VOIDCALL_MEMBASE;
2071 return OP_FCALL_MEMBASE;
2073 return OP_LCALL_MEMBASE;
2075 return OP_VCALL_MEMBASE;
2077 g_assert_not_reached ();
2083 #ifdef MONO_ARCH_HAVE_IMT
2085 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2089 if (COMPILE_LLVM (cfg)) {
2090 method_reg = alloc_preg (cfg);
2093 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2094 } else if (cfg->compile_aot) {
2095 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2098 MONO_INST_NEW (cfg, ins, OP_PCONST);
2099 ins->inst_p0 = call->method;
2100 ins->dreg = method_reg;
2101 MONO_ADD_INS (cfg->cbb, ins);
2105 call->imt_arg_reg = method_reg;
2107 #ifdef MONO_ARCH_IMT_REG
2108 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2110 /* Need this to keep the IMT arg alive */
2111 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2116 #ifdef MONO_ARCH_IMT_REG
2117 method_reg = alloc_preg (cfg);
2120 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2121 } else if (cfg->compile_aot) {
2122 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2125 MONO_INST_NEW (cfg, ins, OP_PCONST);
2126 ins->inst_p0 = call->method;
2127 ins->dreg = method_reg;
2128 MONO_ADD_INS (cfg->cbb, ins);
2131 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2133 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2138 static MonoJumpInfo *
2139 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2141 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2145 ji->data.target = target;
2150 inline static MonoCallInst *
2151 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2152 MonoInst **args, int calli, int virtual, int tail)
2155 #ifdef MONO_ARCH_SOFT_FLOAT
2160 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2162 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2165 call->signature = sig;
2167 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2170 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2171 call->vret_var = cfg->vret_addr;
2172 //g_assert_not_reached ();
2174 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2175 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2178 temp->backend.is_pinvoke = sig->pinvoke;
2181 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2182 * address of return value to increase optimization opportunities.
2183 * Before vtype decomposition, the dreg of the call ins itself represents the
2184 * fact the call modifies the return value. After decomposition, the call will
2185 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2186 * will be transformed into an LDADDR.
2188 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2189 loada->dreg = alloc_preg (cfg);
2190 loada->inst_p0 = temp;
2191 /* We reference the call too since call->dreg could change during optimization */
2192 loada->inst_p1 = call;
2193 MONO_ADD_INS (cfg->cbb, loada);
2195 call->inst.dreg = temp->dreg;
2197 call->vret_var = loada;
2198 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2199 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2201 #ifdef MONO_ARCH_SOFT_FLOAT
2202 if (COMPILE_SOFT_FLOAT (cfg)) {
2204 * If the call has a float argument, we would need to do an r8->r4 conversion using
2205 * an icall, but that cannot be done during the call sequence since it would clobber
2206 * the call registers + the stack. So we do it before emitting the call.
2208 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2210 MonoInst *in = call->args [i];
2212 if (i >= sig->hasthis)
2213 t = sig->params [i - sig->hasthis];
2215 t = &mono_defaults.int_class->byval_arg;
2216 t = mono_type_get_underlying_type (t);
2218 if (!t->byref && t->type == MONO_TYPE_R4) {
2219 MonoInst *iargs [1];
2223 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2225 /* The result will be in an int vreg */
2226 call->args [i] = conv;
2233 if (COMPILE_LLVM (cfg))
2234 mono_llvm_emit_call (cfg, call);
2236 mono_arch_emit_call (cfg, call);
2238 mono_arch_emit_call (cfg, call);
2241 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2242 cfg->flags |= MONO_CFG_HAS_CALLS;
2247 inline static MonoInst*
2248 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2250 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE);
2252 call->inst.sreg1 = addr->dreg;
2254 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2256 return (MonoInst*)call;
2260 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2262 #ifdef MONO_ARCH_RGCTX_REG
2263 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2264 cfg->uses_rgctx_reg = TRUE;
2265 call->rgctx_reg = TRUE;
2267 call->rgctx_arg_reg = rgctx_reg;
2274 inline static MonoInst*
2275 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2281 rgctx_reg = mono_alloc_preg (cfg);
2282 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2284 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2286 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2287 return (MonoInst*)call;
2291 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, int rgctx_type);
2293 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, int rgctx_type);
2296 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2297 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2299 gboolean might_be_remote;
2300 gboolean virtual = this != NULL;
2301 gboolean enable_for_aot = TRUE;
2305 if (method->string_ctor) {
2306 /* Create the real signature */
2307 /* FIXME: Cache these */
2308 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2309 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2314 might_be_remote = this && sig->hasthis &&
2315 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2316 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this);
2318 context_used = mono_method_check_context_used (method);
2319 if (might_be_remote && context_used) {
2322 g_assert (cfg->generic_sharing_context);
2324 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2326 return mono_emit_calli (cfg, sig, args, addr);
2329 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE);
2331 if (might_be_remote)
2332 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2334 call->method = method;
2335 call->inst.flags |= MONO_INST_HAS_METHOD;
2336 call->inst.inst_left = this;
2339 int vtable_reg, slot_reg, this_reg;
2341 this_reg = this->dreg;
2343 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2344 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2345 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2347 /* Make a call to delegate->invoke_impl */
2348 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2349 call->inst.inst_basereg = this_reg;
2350 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2351 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2353 return (MonoInst*)call;
2357 if ((!cfg->compile_aot || enable_for_aot) &&
2358 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2359 (MONO_METHOD_IS_FINAL (method) &&
2360 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2361 !(method->klass->marshalbyref && context_used)) {
2363 * the method is not virtual, we just need to ensure this is not null
2364 * and then we can call the method directly.
2366 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2368 * The check above ensures method is not gshared, this is needed since
2369 * gshared methods can't have wrappers.
2371 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2374 if (!method->string_ctor)
2375 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2377 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2379 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2381 return (MonoInst*)call;
2384 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2386 * the method is virtual, but we can statically dispatch since either
2387 * it's class or the method itself are sealed.
2388 * But first we need to ensure it's not a null reference.
2390 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2392 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2393 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2395 return (MonoInst*)call;
2398 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2400 vtable_reg = alloc_preg (cfg);
2401 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2402 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2404 #ifdef MONO_ARCH_HAVE_IMT
2406 guint32 imt_slot = mono_method_get_imt_slot (method);
2407 emit_imt_argument (cfg, call, imt_arg);
2408 slot_reg = vtable_reg;
2409 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2412 if (slot_reg == -1) {
2413 slot_reg = alloc_preg (cfg);
2414 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2415 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2418 slot_reg = vtable_reg;
2419 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2420 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2421 #ifdef MONO_ARCH_HAVE_IMT
2423 g_assert (mono_method_signature (method)->generic_param_count);
2424 emit_imt_argument (cfg, call, imt_arg);
2429 call->inst.sreg1 = slot_reg;
2430 call->virtual = TRUE;
2433 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2435 return (MonoInst*)call;
2439 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2440 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2447 rgctx_reg = mono_alloc_preg (cfg);
2448 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2450 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2452 call = (MonoCallInst*)ins;
2454 set_rgctx_arg (cfg, call, rgctx_reg, vtable_arg);
2460 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2462 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2466 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2473 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE);
2476 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2478 return (MonoInst*)call;
2482 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2484 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2488 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2492 * mono_emit_abs_call:
2494 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2496 inline static MonoInst*
2497 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2498 MonoMethodSignature *sig, MonoInst **args)
2500 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2504 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2507 if (cfg->abs_patches == NULL)
2508 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2509 g_hash_table_insert (cfg->abs_patches, ji, ji);
2510 ins = mono_emit_native_call (cfg, ji, sig, args);
2511 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2516 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2518 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2519 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2523 * Native code might return non register sized integers
2524 * without initializing the upper bits.
2526 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2527 case OP_LOADI1_MEMBASE:
2528 widen_op = OP_ICONV_TO_I1;
2530 case OP_LOADU1_MEMBASE:
2531 widen_op = OP_ICONV_TO_U1;
2533 case OP_LOADI2_MEMBASE:
2534 widen_op = OP_ICONV_TO_I2;
2536 case OP_LOADU2_MEMBASE:
2537 widen_op = OP_ICONV_TO_U2;
2543 if (widen_op != -1) {
2544 int dreg = alloc_preg (cfg);
2547 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2548 widen->type = ins->type;
2558 get_memcpy_method (void)
2560 static MonoMethod *memcpy_method = NULL;
2561 if (!memcpy_method) {
2562 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2564 g_error ("Old corlib found. Install a new one");
2566 return memcpy_method;
2570 create_write_barrier_bitmap (MonoClass *klass, unsigned *wb_bitmap, int offset)
2572 MonoClassField *field;
2573 gpointer iter = NULL;
2575 while ((field = mono_class_get_fields (klass, &iter))) {
2578 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2580 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2581 if (mono_type_is_reference (field->type)) {
2582 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2583 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2585 /*FIXME support nested value types so this works for: struct X { Y y; int z;} struct Y { object a,b; }*/
2586 MonoClass *field_class = mono_class_from_mono_type (field->type);
2587 if (field_class->has_references)
2588 create_write_barrier_bitmap (field_class, wb_bitmap, offset + foffset);
2594 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
2596 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
2597 unsigned need_wb = 0;
2602 /*types with references can't have alignment smaller than sizeof(void*) */
2603 if (align < SIZEOF_VOID_P)
2606 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
2607 if (size > 32 * SIZEOF_VOID_P)
2610 create_write_barrier_bitmap (klass, &need_wb, 0);
2612 /* We don't unroll more than 5 stores to avoid code bloat. */
2613 if (size > 5 * SIZEOF_VOID_P) {
2614 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
2615 size += (SIZEOF_VOID_P - 1);
2616 size &= ~(SIZEOF_VOID_P - 1);
2618 EMIT_NEW_ICONST (cfg, iargs [2], size);
2619 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
2620 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
2624 destreg = iargs [0]->dreg;
2625 srcreg = iargs [1]->dreg;
2628 dest_ptr_reg = alloc_preg (cfg);
2629 tmp_reg = alloc_preg (cfg);
2632 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
2634 while (size >= SIZEOF_VOID_P) {
2635 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, tmp_reg, srcreg, offset);
2636 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
2638 if (need_wb & 0x1) {
2639 MonoInst *dummy_use;
2641 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2642 mono_emit_method_call (cfg, write_barrier, &iargs [0], NULL);
2644 MONO_INST_NEW (cfg, dummy_use, OP_DUMMY_USE);
2645 dummy_use->sreg1 = dest_ptr_reg;
2646 MONO_ADD_INS (cfg->cbb, dummy_use);
2650 offset += SIZEOF_VOID_P;
2651 size -= SIZEOF_VOID_P;
2654 /*tmp += sizeof (void*)*/
2655 if (size >= SIZEOF_VOID_P) {
2656 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
2657 MONO_ADD_INS (cfg->cbb, iargs [0]);
2661 /* Those cannot be references since size < sizeof (void*) */
2663 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
2664 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
2670 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
2671 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
2677 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
2678 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
2687 * Emit code to copy a valuetype of type @klass whose address is stored in
2688 * @src->dreg to memory whose address is stored at @dest->dreg.
2691 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2693 MonoInst *iargs [4];
2696 MonoMethod *memcpy_method;
2700 * This check breaks with spilled vars... need to handle it during verification anyway.
2701 * g_assert (klass && klass == src->klass && klass == dest->klass);
2705 n = mono_class_native_size (klass, &align);
2707 n = mono_class_value_size (klass, &align);
2709 /* if native is true there should be no references in the struct */
2710 if (cfg->gen_write_barriers && klass->has_references && !native) {
2711 /* Avoid barriers when storing to the stack */
2712 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2713 (dest->opcode == OP_LDADDR))) {
2714 int context_used = 0;
2719 if (cfg->generic_sharing_context)
2720 context_used = mono_class_check_context_used (klass);
2722 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
2723 if ((cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
2725 } else if (context_used) {
2726 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2728 if (cfg->compile_aot) {
2729 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2731 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2732 mono_class_compute_gc_descriptor (klass);
2736 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2741 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2742 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2743 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2747 EMIT_NEW_ICONST (cfg, iargs [2], n);
2749 memcpy_method = get_memcpy_method ();
2750 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2755 get_memset_method (void)
2757 static MonoMethod *memset_method = NULL;
2758 if (!memset_method) {
2759 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2761 g_error ("Old corlib found. Install a new one");
2763 return memset_method;
2767 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2769 MonoInst *iargs [3];
2772 MonoMethod *memset_method;
2774 /* FIXME: Optimize this for the case when dest is an LDADDR */
2776 mono_class_init (klass);
2777 n = mono_class_value_size (klass, &align);
2779 if (n <= sizeof (gpointer) * 5) {
2780 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2783 memset_method = get_memset_method ();
2785 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2786 EMIT_NEW_ICONST (cfg, iargs [2], n);
2787 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2792 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2794 MonoInst *this = NULL;
2796 g_assert (cfg->generic_sharing_context);
2798 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2799 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2800 !method->klass->valuetype)
2801 EMIT_NEW_ARGLOAD (cfg, this, 0);
2803 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2804 MonoInst *mrgctx_loc, *mrgctx_var;
2807 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2809 mrgctx_loc = mono_get_vtable_var (cfg);
2810 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2813 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2814 MonoInst *vtable_loc, *vtable_var;
2818 vtable_loc = mono_get_vtable_var (cfg);
2819 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2821 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2822 MonoInst *mrgctx_var = vtable_var;
2825 vtable_reg = alloc_preg (cfg);
2826 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2827 vtable_var->type = STACK_PTR;
2833 int vtable_reg, res_reg;
2835 vtable_reg = alloc_preg (cfg);
2836 res_reg = alloc_preg (cfg);
2837 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2842 static MonoJumpInfoRgctxEntry *
2843 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2845 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2846 res->method = method;
2847 res->in_mrgctx = in_mrgctx;
2848 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2849 res->data->type = patch_type;
2850 res->data->data.target = patch_data;
2851 res->info_type = info_type;
2856 static inline MonoInst*
2857 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2859 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2863 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2864 MonoClass *klass, int rgctx_type)
2866 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2867 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2869 return emit_rgctx_fetch (cfg, rgctx, entry);
2873 * emit_get_rgctx_method:
2875 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2876 * normal constants, else emit a load from the rgctx.
2879 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2880 MonoMethod *cmethod, int rgctx_type)
2882 if (!context_used) {
2885 switch (rgctx_type) {
2886 case MONO_RGCTX_INFO_METHOD:
2887 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
2889 case MONO_RGCTX_INFO_METHOD_RGCTX:
2890 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
2893 g_assert_not_reached ();
2896 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2897 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2899 return emit_rgctx_fetch (cfg, rgctx, entry);
2904 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2905 MonoClassField *field, int rgctx_type)
2907 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2908 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2910 return emit_rgctx_fetch (cfg, rgctx, entry);
2914 * On return the caller must check @klass for load errors.
2917 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
2919 MonoInst *vtable_arg;
2921 int context_used = 0;
2923 if (cfg->generic_sharing_context)
2924 context_used = mono_class_check_context_used (klass);
2927 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
2928 klass, MONO_RGCTX_INFO_VTABLE);
2930 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2934 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
2937 if (COMPILE_LLVM (cfg))
2938 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
2940 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
2941 #ifdef MONO_ARCH_VTABLE_REG
2942 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
2943 cfg->uses_vtable_reg = TRUE;
2950 * On return the caller must check @array_class for load errors
2953 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
2955 int vtable_reg = alloc_preg (cfg);
2956 int context_used = 0;
2958 if (cfg->generic_sharing_context)
2959 context_used = mono_class_check_context_used (array_class);
2961 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2963 if (cfg->opt & MONO_OPT_SHARED) {
2964 int class_reg = alloc_preg (cfg);
2965 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2966 if (cfg->compile_aot) {
2967 int klass_reg = alloc_preg (cfg);
2968 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
2969 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
2971 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
2973 } else if (context_used) {
2974 MonoInst *vtable_ins;
2976 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
2977 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
2979 if (cfg->compile_aot) {
2983 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2985 vt_reg = alloc_preg (cfg);
2986 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
2987 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
2990 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2992 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
2996 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3000 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
3002 if (mini_get_debug_options ()->better_cast_details) {
3003 int to_klass_reg = alloc_preg (cfg);
3004 int vtable_reg = alloc_preg (cfg);
3005 int klass_reg = alloc_preg (cfg);
3006 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3009 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3013 MONO_ADD_INS (cfg->cbb, tls_get);
3014 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3015 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3017 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3018 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3019 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3024 reset_cast_details (MonoCompile *cfg)
3026 /* Reset the variables holding the cast details */
3027 if (mini_get_debug_options ()->better_cast_details) {
3028 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3030 MONO_ADD_INS (cfg->cbb, tls_get);
3031 /* It is enough to reset the from field */
3032 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3037 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3038 * generic code is generated.
3041 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3043 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3046 MonoInst *rgctx, *addr;
3048 /* FIXME: What if the class is shared? We might not
3049 have to get the address of the method from the
3051 addr = emit_get_rgctx_method (cfg, context_used, method,
3052 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3054 rgctx = emit_get_rgctx (cfg, method, context_used);
3056 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3058 return mono_emit_method_call (cfg, method, &val, NULL);
3063 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3067 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3068 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3069 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3070 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3072 obj_reg = sp [0]->dreg;
3073 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3074 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3076 /* FIXME: generics */
3077 g_assert (klass->rank == 0);
3080 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3081 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3083 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3084 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3087 MonoInst *element_class;
3089 /* This assertion is from the unboxcast insn */
3090 g_assert (klass->rank == 0);
3092 element_class = emit_get_rgctx_klass (cfg, context_used,
3093 klass->element_class, MONO_RGCTX_INFO_KLASS);
3095 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3096 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3098 save_cast_details (cfg, klass->element_class, obj_reg);
3099 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3100 reset_cast_details (cfg);
3103 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
3104 MONO_ADD_INS (cfg->cbb, add);
3105 add->type = STACK_MP;
3112 * Returns NULL and set the cfg exception on error.
3115 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3117 MonoInst *iargs [2];
3123 MonoInst *iargs [2];
3126 FIXME: we cannot get managed_alloc here because we can't get
3127 the class's vtable (because it's not a closed class)
3129 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3130 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3133 if (cfg->opt & MONO_OPT_SHARED)
3134 rgctx_info = MONO_RGCTX_INFO_KLASS;
3136 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3137 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3139 if (cfg->opt & MONO_OPT_SHARED) {
3140 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3142 alloc_ftn = mono_object_new;
3145 alloc_ftn = mono_object_new_specific;
3148 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3151 if (cfg->opt & MONO_OPT_SHARED) {
3152 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3153 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3155 alloc_ftn = mono_object_new;
3156 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3157 /* This happens often in argument checking code, eg. throw new FooException... */
3158 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3159 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3160 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3162 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3163 MonoMethod *managed_alloc = NULL;
3167 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3168 cfg->exception_ptr = klass;
3172 #ifndef MONO_CROSS_COMPILE
3173 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3176 if (managed_alloc) {
3177 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3178 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3180 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3182 guint32 lw = vtable->klass->instance_size;
3183 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3184 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3185 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3188 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3192 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3196 * Returns NULL and set the cfg exception on error.
3199 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
3201 MonoInst *alloc, *ins;
3203 if (mono_class_is_nullable (klass)) {
3204 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3207 /* FIXME: What if the class is shared? We might not
3208 have to get the method address from the RGCTX. */
3209 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3210 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3211 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3213 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3215 return mono_emit_method_call (cfg, method, &val, NULL);
3219 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3223 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3228 // FIXME: This doesn't work yet (class libs tests fail?)
3229 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || mono_class_has_variant_generic_params (klass) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3232 * Returns NULL and set the cfg exception on error.
3235 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3237 MonoBasicBlock *is_null_bb;
3238 int obj_reg = src->dreg;
3239 int vtable_reg = alloc_preg (cfg);
3240 MonoInst *klass_inst = NULL;
3245 klass_inst = emit_get_rgctx_klass (cfg, context_used,
3246 klass, MONO_RGCTX_INFO_KLASS);
3248 if (is_complex_isinst (klass)) {
3249 /* Complex case, handle by an icall */
3255 args [1] = klass_inst;
3257 return mono_emit_jit_icall (cfg, mono_object_castclass, args);
3259 /* Simple case, handled by the code below */
3263 NEW_BBLOCK (cfg, is_null_bb);
3265 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3266 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3268 save_cast_details (cfg, klass, obj_reg);
3270 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3271 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3272 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3274 int klass_reg = alloc_preg (cfg);
3276 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3278 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3279 /* the remoting code is broken, access the class for now */
3280 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3281 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3283 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3284 cfg->exception_ptr = klass;
3287 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3289 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3290 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3292 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3294 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3295 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3299 MONO_START_BB (cfg, is_null_bb);
3301 reset_cast_details (cfg);
3307 * Returns NULL and set the cfg exception on error.
3310 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3313 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3314 int obj_reg = src->dreg;
3315 int vtable_reg = alloc_preg (cfg);
3316 int res_reg = alloc_preg (cfg);
3317 MonoInst *klass_inst = NULL;
3320 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3322 if (is_complex_isinst (klass)) {
3325 /* Complex case, handle by an icall */
3331 args [1] = klass_inst;
3333 return mono_emit_jit_icall (cfg, mono_object_isinst, args);
3335 /* Simple case, the code below can handle it */
3339 NEW_BBLOCK (cfg, is_null_bb);
3340 NEW_BBLOCK (cfg, false_bb);
3341 NEW_BBLOCK (cfg, end_bb);
3343 /* Do the assignment at the beginning, so the other assignment can be if converted */
3344 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3345 ins->type = STACK_OBJ;
3348 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3349 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3351 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3353 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3354 g_assert (!context_used);
3355 /* the is_null_bb target simply copies the input register to the output */
3356 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3358 int klass_reg = alloc_preg (cfg);
3361 int rank_reg = alloc_preg (cfg);
3362 int eclass_reg = alloc_preg (cfg);
3364 g_assert (!context_used);
3365 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3366 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3367 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3368 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3369 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3370 if (klass->cast_class == mono_defaults.object_class) {
3371 int parent_reg = alloc_preg (cfg);
3372 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3373 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3374 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3375 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3376 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3377 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3378 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3379 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3380 } else if (klass->cast_class == mono_defaults.enum_class) {
3381 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3382 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3383 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3384 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3386 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3387 /* Check that the object is a vector too */
3388 int bounds_reg = alloc_preg (cfg);
3389 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3390 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3391 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3394 /* the is_null_bb target simply copies the input register to the output */
3395 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3397 } else if (mono_class_is_nullable (klass)) {
3398 g_assert (!context_used);
3399 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3400 /* the is_null_bb target simply copies the input register to the output */
3401 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3403 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3404 g_assert (!context_used);
3405 /* the remoting code is broken, access the class for now */
3406 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3407 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3409 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3410 cfg->exception_ptr = klass;
3413 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3415 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3416 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3418 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3419 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3421 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3422 /* the is_null_bb target simply copies the input register to the output */
3423 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
3428 MONO_START_BB (cfg, false_bb);
3430 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3431 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3433 MONO_START_BB (cfg, is_null_bb);
3435 MONO_START_BB (cfg, end_bb);
3441 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3443 /* This opcode takes as input an object reference and a class, and returns:
3444 0) if the object is an instance of the class,
3445 1) if the object is not instance of the class,
3446 2) if the object is a proxy whose type cannot be determined */
3449 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3450 int obj_reg = src->dreg;
3451 int dreg = alloc_ireg (cfg);
3453 int klass_reg = alloc_preg (cfg);
3455 NEW_BBLOCK (cfg, true_bb);
3456 NEW_BBLOCK (cfg, false_bb);
3457 NEW_BBLOCK (cfg, false2_bb);
3458 NEW_BBLOCK (cfg, end_bb);
3459 NEW_BBLOCK (cfg, no_proxy_bb);
3461 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3462 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3464 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3465 NEW_BBLOCK (cfg, interface_fail_bb);
3467 tmp_reg = alloc_preg (cfg);
3468 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3469 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3470 MONO_START_BB (cfg, interface_fail_bb);
3471 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3473 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3475 tmp_reg = alloc_preg (cfg);
3476 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3477 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3478 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3480 tmp_reg = alloc_preg (cfg);
3481 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3482 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3484 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3485 tmp_reg = alloc_preg (cfg);
3486 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3487 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3489 tmp_reg = alloc_preg (cfg);
3490 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3491 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3492 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3494 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3495 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3497 MONO_START_BB (cfg, no_proxy_bb);
3499 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3502 MONO_START_BB (cfg, false_bb);
3504 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3505 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3507 MONO_START_BB (cfg, false2_bb);
3509 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3510 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3512 MONO_START_BB (cfg, true_bb);
3514 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3516 MONO_START_BB (cfg, end_bb);
3519 MONO_INST_NEW (cfg, ins, OP_ICONST);
3521 ins->type = STACK_I4;
3527 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3529 /* This opcode takes as input an object reference and a class, and returns:
3530 0) if the object is an instance of the class,
3531 1) if the object is a proxy whose type cannot be determined
3532 an InvalidCastException exception is thrown otherwhise*/
3535 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3536 int obj_reg = src->dreg;
3537 int dreg = alloc_ireg (cfg);
3538 int tmp_reg = alloc_preg (cfg);
3539 int klass_reg = alloc_preg (cfg);
3541 NEW_BBLOCK (cfg, end_bb);
3542 NEW_BBLOCK (cfg, ok_result_bb);
3544 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3545 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3547 save_cast_details (cfg, klass, obj_reg);
3549 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3550 NEW_BBLOCK (cfg, interface_fail_bb);
3552 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3553 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3554 MONO_START_BB (cfg, interface_fail_bb);
3555 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3557 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3559 tmp_reg = alloc_preg (cfg);
3560 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3561 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3562 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3564 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3565 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3568 NEW_BBLOCK (cfg, no_proxy_bb);
3570 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3571 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3572 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3574 tmp_reg = alloc_preg (cfg);
3575 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3576 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3578 tmp_reg = alloc_preg (cfg);
3579 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3580 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3581 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3583 NEW_BBLOCK (cfg, fail_1_bb);
3585 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3587 MONO_START_BB (cfg, fail_1_bb);
3589 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3590 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3592 MONO_START_BB (cfg, no_proxy_bb);
3594 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3597 MONO_START_BB (cfg, ok_result_bb);
3599 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3601 MONO_START_BB (cfg, end_bb);
3604 MONO_INST_NEW (cfg, ins, OP_ICONST);
3606 ins->type = STACK_I4;
3612 * Returns NULL and set the cfg exception on error.
3614 static G_GNUC_UNUSED MonoInst*
3615 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3617 gpointer *trampoline;
3618 MonoInst *obj, *method_ins, *tramp_ins;
3622 obj = handle_alloc (cfg, klass, FALSE, 0);
3626 /* Inline the contents of mono_delegate_ctor */
3628 /* Set target field */
3629 /* Optimize away setting of NULL target */
3630 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3631 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3633 /* Set method field */
3634 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3635 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3638 * To avoid looking up the compiled code belonging to the target method
3639 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3640 * store it, and we fill it after the method has been compiled.
3642 if (!cfg->compile_aot && !method->dynamic) {
3643 MonoInst *code_slot_ins;
3646 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3648 domain = mono_domain_get ();
3649 mono_domain_lock (domain);
3650 if (!domain_jit_info (domain)->method_code_hash)
3651 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3652 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3654 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3655 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3657 mono_domain_unlock (domain);
3659 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3661 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3664 /* Set invoke_impl field */
3665 if (cfg->compile_aot) {
3666 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3668 trampoline = mono_create_delegate_trampoline (klass);
3669 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3671 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3673 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3679 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3681 MonoJitICallInfo *info;
3683 /* Need to register the icall so it gets an icall wrapper */
3684 info = mono_get_array_new_va_icall (rank);
3686 cfg->flags |= MONO_CFG_HAS_VARARGS;
3688 /* mono_array_new_va () needs a vararg calling convention */
3689 cfg->disable_llvm = TRUE;
3691 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3692 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3696 mono_emit_load_got_addr (MonoCompile *cfg)
3698 MonoInst *getaddr, *dummy_use;
3700 if (!cfg->got_var || cfg->got_var_allocated)
3703 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3704 getaddr->dreg = cfg->got_var->dreg;
3706 /* Add it to the start of the first bblock */
3707 if (cfg->bb_entry->code) {
3708 getaddr->next = cfg->bb_entry->code;
3709 cfg->bb_entry->code = getaddr;
3712 MONO_ADD_INS (cfg->bb_entry, getaddr);
3714 cfg->got_var_allocated = TRUE;
3717 * Add a dummy use to keep the got_var alive, since real uses might
3718 * only be generated by the back ends.
3719 * Add it to end_bblock, so the variable's lifetime covers the whole
3721 * It would be better to make the usage of the got var explicit in all
3722 * cases when the backend needs it (i.e. calls, throw etc.), so this
3723 * wouldn't be needed.
3725 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3726 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3729 static int inline_limit;
3730 static gboolean inline_limit_inited;
3733 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3735 MonoMethodHeaderSummary header;
3737 #ifdef MONO_ARCH_SOFT_FLOAT
3738 MonoMethodSignature *sig = mono_method_signature (method);
3742 if (cfg->generic_sharing_context)
3745 if (cfg->inline_depth > 10)
3748 #ifdef MONO_ARCH_HAVE_LMF_OPS
3749 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3750 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3751 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3756 if (!mono_method_get_header_summary (method, &header))
3759 /*runtime, icall and pinvoke are checked by summary call*/
3760 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3761 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3762 (method->klass->marshalbyref) ||
3766 /* also consider num_locals? */
3767 /* Do the size check early to avoid creating vtables */
3768 if (!inline_limit_inited) {
3769 if (getenv ("MONO_INLINELIMIT"))
3770 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3772 inline_limit = INLINE_LENGTH_LIMIT;
3773 inline_limit_inited = TRUE;
3775 if (header.code_size >= inline_limit)
3779 * if we can initialize the class of the method right away, we do,
3780 * otherwise we don't allow inlining if the class needs initialization,
3781 * since it would mean inserting a call to mono_runtime_class_init()
3782 * inside the inlined code
3784 if (!(cfg->opt & MONO_OPT_SHARED)) {
3785 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3786 if (cfg->run_cctors && method->klass->has_cctor) {
3787 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3788 if (!method->klass->runtime_info)
3789 /* No vtable created yet */
3791 vtable = mono_class_vtable (cfg->domain, method->klass);
3794 /* This makes so that inline cannot trigger */
3795 /* .cctors: too many apps depend on them */
3796 /* running with a specific order... */
3797 if (! vtable->initialized)
3799 mono_runtime_class_init (vtable);
3801 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3802 if (!method->klass->runtime_info)
3803 /* No vtable created yet */
3805 vtable = mono_class_vtable (cfg->domain, method->klass);
3808 if (!vtable->initialized)
3813 * If we're compiling for shared code
3814 * the cctor will need to be run at aot method load time, for example,
3815 * or at the end of the compilation of the inlining method.
3817 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3822 * CAS - do not inline methods with declarative security
3823 * Note: this has to be before any possible return TRUE;
3825 if (mono_method_has_declsec (method))
3828 #ifdef MONO_ARCH_SOFT_FLOAT
3830 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3832 for (i = 0; i < sig->param_count; ++i)
3833 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3841 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3843 if (vtable->initialized && !cfg->compile_aot)
3846 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3849 if (!mono_class_needs_cctor_run (vtable->klass, method))
3852 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3853 /* The initialization is already done before the method is called */
3860 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
3864 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3866 mono_class_init (klass);
3867 size = mono_class_array_element_size (klass);
3869 mult_reg = alloc_preg (cfg);
3870 array_reg = arr->dreg;
3871 index_reg = index->dreg;
3873 #if SIZEOF_REGISTER == 8
3874 /* The array reg is 64 bits but the index reg is only 32 */
3875 if (COMPILE_LLVM (cfg)) {
3877 index2_reg = index_reg;
3879 index2_reg = alloc_preg (cfg);
3880 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3883 if (index->type == STACK_I8) {
3884 index2_reg = alloc_preg (cfg);
3885 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
3887 index2_reg = index_reg;
3892 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3894 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3895 if (size == 1 || size == 2 || size == 4 || size == 8) {
3896 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3898 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3899 ins->type = STACK_PTR;
3905 add_reg = alloc_preg (cfg);
3907 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3908 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3909 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3910 ins->type = STACK_PTR;
3911 MONO_ADD_INS (cfg->cbb, ins);
3916 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3918 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3920 int bounds_reg = alloc_preg (cfg);
3921 int add_reg = alloc_preg (cfg);
3922 int mult_reg = alloc_preg (cfg);
3923 int mult2_reg = alloc_preg (cfg);
3924 int low1_reg = alloc_preg (cfg);
3925 int low2_reg = alloc_preg (cfg);
3926 int high1_reg = alloc_preg (cfg);
3927 int high2_reg = alloc_preg (cfg);
3928 int realidx1_reg = alloc_preg (cfg);
3929 int realidx2_reg = alloc_preg (cfg);
3930 int sum_reg = alloc_preg (cfg);
3935 mono_class_init (klass);
3936 size = mono_class_array_element_size (klass);
3938 index1 = index_ins1->dreg;
3939 index2 = index_ins2->dreg;
3941 /* range checking */
3942 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3943 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3945 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3946 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3947 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3948 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3949 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3950 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3951 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3953 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3954 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3955 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3956 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3957 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3958 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3959 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3961 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3962 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3963 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3964 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3965 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3967 ins->type = STACK_MP;
3969 MONO_ADD_INS (cfg->cbb, ins);
3976 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3980 MonoMethod *addr_method;
3983 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3986 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
3988 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3989 /* emit_ldelema_2 depends on OP_LMUL */
3990 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3991 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3995 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3996 addr_method = mono_marshal_get_array_address (rank, element_size);
3997 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4002 static MonoBreakPolicy
4003 always_insert_breakpoint (MonoMethod *method)
4005 return MONO_BREAK_POLICY_ALWAYS;
4008 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4011 * mono_set_break_policy:
4012 * policy_callback: the new callback function
4014 * Allow embedders to decide wherther to actually obey breakpoint instructions
4015 * (both break IL instructions and Debugger.Break () method calls), for example
4016 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4017 * untrusted or semi-trusted code.
4019 * @policy_callback will be called every time a break point instruction needs to
4020 * be inserted with the method argument being the method that calls Debugger.Break()
4021 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4022 * if it wants the breakpoint to not be effective in the given method.
4023 * #MONO_BREAK_POLICY_ALWAYS is the default.
4026 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4028 if (policy_callback)
4029 break_policy_func = policy_callback;
4031 break_policy_func = always_insert_breakpoint;
4035 should_insert_brekpoint (MonoMethod *method) {
4036 switch (break_policy_func (method)) {
4037 case MONO_BREAK_POLICY_ALWAYS:
4039 case MONO_BREAK_POLICY_NEVER:
4041 case MONO_BREAK_POLICY_ON_DBG:
4042 return mono_debug_using_mono_debugger ();
4044 g_warning ("Incorrect value returned from break policy callback");
4049 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4051 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4053 MonoInst *addr, *store, *load;
4054 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4056 /* the bounds check is already done by the callers */
4057 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4059 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4060 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4062 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4063 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4069 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4071 MonoInst *ins = NULL;
4072 #ifdef MONO_ARCH_SIMD_INTRINSICS
4073 if (cfg->opt & MONO_OPT_SIMD) {
4074 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4084 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4086 MonoInst *ins = NULL;
4088 static MonoClass *runtime_helpers_class = NULL;
4089 if (! runtime_helpers_class)
4090 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
4091 "System.Runtime.CompilerServices", "RuntimeHelpers");
4093 if (cmethod->klass == mono_defaults.string_class) {
4094 if (strcmp (cmethod->name, "get_Chars") == 0) {
4095 int dreg = alloc_ireg (cfg);
4096 int index_reg = alloc_preg (cfg);
4097 int mult_reg = alloc_preg (cfg);
4098 int add_reg = alloc_preg (cfg);
4100 #if SIZEOF_REGISTER == 8
4101 /* The array reg is 64 bits but the index reg is only 32 */
4102 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
4104 index_reg = args [1]->dreg;
4106 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
4108 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4109 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
4110 add_reg = ins->dreg;
4111 /* Avoid a warning */
4113 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4116 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
4117 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4118 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4119 add_reg, G_STRUCT_OFFSET (MonoString, chars));
4121 type_from_op (ins, NULL, NULL);
4123 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4124 int dreg = alloc_ireg (cfg);
4125 /* Decompose later to allow more optimizations */
4126 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
4127 ins->type = STACK_I4;
4128 ins->flags |= MONO_INST_FAULT;
4129 cfg->cbb->has_array_access = TRUE;
4130 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
4133 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
4134 int mult_reg = alloc_preg (cfg);
4135 int add_reg = alloc_preg (cfg);
4137 /* The corlib functions check for oob already. */
4138 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
4139 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4140 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
4141 return cfg->cbb->last_ins;
4144 } else if (cmethod->klass == mono_defaults.object_class) {
4146 if (strcmp (cmethod->name, "GetType") == 0) {
4147 int dreg = alloc_preg (cfg);
4148 int vt_reg = alloc_preg (cfg);
4149 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4150 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
4151 type_from_op (ins, NULL, NULL);
4154 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
4155 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0) {
4156 int dreg = alloc_ireg (cfg);
4157 int t1 = alloc_ireg (cfg);
4159 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
4160 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
4161 ins->type = STACK_I4;
4165 } else if (strcmp (cmethod->name, ".ctor") == 0) {
4166 MONO_INST_NEW (cfg, ins, OP_NOP);
4167 MONO_ADD_INS (cfg->cbb, ins);
4171 } else if (cmethod->klass == mono_defaults.array_class) {
4172 if (strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
4173 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
4175 #ifndef MONO_BIG_ARRAYS
4177 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
4180 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
4181 int dreg = alloc_ireg (cfg);
4182 int bounds_reg = alloc_ireg (cfg);
4183 MonoBasicBlock *end_bb, *szarray_bb;
4184 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
4186 NEW_BBLOCK (cfg, end_bb);
4187 NEW_BBLOCK (cfg, szarray_bb);
4189 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
4190 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4191 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4192 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
4193 /* Non-szarray case */
4195 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4196 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4198 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4199 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4200 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4201 MONO_START_BB (cfg, szarray_bb);
4204 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4205 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4207 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4208 MONO_START_BB (cfg, end_bb);
4210 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
4211 ins->type = STACK_I4;
4217 if (cmethod->name [0] != 'g')
4220 if (strcmp (cmethod->name, "get_Rank") == 0) {
4221 int dreg = alloc_ireg (cfg);
4222 int vtable_reg = alloc_preg (cfg);
4223 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
4224 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4225 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
4226 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4227 type_from_op (ins, NULL, NULL);
4230 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4231 int dreg = alloc_ireg (cfg);
4233 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4234 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4235 type_from_op (ins, NULL, NULL);
4240 } else if (cmethod->klass == runtime_helpers_class) {
4242 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
4243 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
4247 } else if (cmethod->klass == mono_defaults.thread_class) {
4248 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
4249 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
4250 MONO_ADD_INS (cfg->cbb, ins);
4252 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
4253 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4254 MONO_ADD_INS (cfg->cbb, ins);
4257 } else if (cmethod->klass == mono_defaults.monitor_class) {
4258 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4259 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
4262 if (COMPILE_LLVM (cfg)) {
4264 * Pass the argument normally, the LLVM backend will handle the
4265 * calling convention problems.
4267 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4269 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
4270 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4271 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4272 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4275 return (MonoInst*)call;
4276 } else if (strcmp (cmethod->name, "Exit") == 0) {
4279 if (COMPILE_LLVM (cfg)) {
4280 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4282 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
4283 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4284 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4285 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4288 return (MonoInst*)call;
4290 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4291 MonoMethod *fast_method = NULL;
4293 /* Avoid infinite recursion */
4294 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
4295 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
4296 strcmp (cfg->method->name, "FastMonitorExit") == 0))
4299 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) ||
4300 strcmp (cmethod->name, "Exit") == 0)
4301 fast_method = mono_monitor_get_fast_path (cmethod);
4305 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4307 } else if (cmethod->klass->image == mono_defaults.corlib &&
4308 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4309 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4312 #if SIZEOF_REGISTER == 8
4313 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
4314 /* 64 bit reads are already atomic */
4315 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
4316 ins->dreg = mono_alloc_preg (cfg);
4317 ins->inst_basereg = args [0]->dreg;
4318 ins->inst_offset = 0;
4319 MONO_ADD_INS (cfg->cbb, ins);
4323 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4324 if (strcmp (cmethod->name, "Increment") == 0) {
4325 MonoInst *ins_iconst;
4328 if (fsig->params [0]->type == MONO_TYPE_I4)
4329 opcode = OP_ATOMIC_ADD_NEW_I4;
4330 #if SIZEOF_REGISTER == 8
4331 else if (fsig->params [0]->type == MONO_TYPE_I8)
4332 opcode = OP_ATOMIC_ADD_NEW_I8;
4335 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4336 ins_iconst->inst_c0 = 1;
4337 ins_iconst->dreg = mono_alloc_ireg (cfg);
4338 MONO_ADD_INS (cfg->cbb, ins_iconst);
4340 MONO_INST_NEW (cfg, ins, opcode);
4341 ins->dreg = mono_alloc_ireg (cfg);
4342 ins->inst_basereg = args [0]->dreg;
4343 ins->inst_offset = 0;
4344 ins->sreg2 = ins_iconst->dreg;
4345 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4346 MONO_ADD_INS (cfg->cbb, ins);
4348 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4349 MonoInst *ins_iconst;
4352 if (fsig->params [0]->type == MONO_TYPE_I4)
4353 opcode = OP_ATOMIC_ADD_NEW_I4;
4354 #if SIZEOF_REGISTER == 8
4355 else if (fsig->params [0]->type == MONO_TYPE_I8)
4356 opcode = OP_ATOMIC_ADD_NEW_I8;
4359 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4360 ins_iconst->inst_c0 = -1;
4361 ins_iconst->dreg = mono_alloc_ireg (cfg);
4362 MONO_ADD_INS (cfg->cbb, ins_iconst);
4364 MONO_INST_NEW (cfg, ins, opcode);
4365 ins->dreg = mono_alloc_ireg (cfg);
4366 ins->inst_basereg = args [0]->dreg;
4367 ins->inst_offset = 0;
4368 ins->sreg2 = ins_iconst->dreg;
4369 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4370 MONO_ADD_INS (cfg->cbb, ins);
4372 } else if (strcmp (cmethod->name, "Add") == 0) {
4375 if (fsig->params [0]->type == MONO_TYPE_I4)
4376 opcode = OP_ATOMIC_ADD_NEW_I4;
4377 #if SIZEOF_REGISTER == 8
4378 else if (fsig->params [0]->type == MONO_TYPE_I8)
4379 opcode = OP_ATOMIC_ADD_NEW_I8;
4383 MONO_INST_NEW (cfg, ins, opcode);
4384 ins->dreg = mono_alloc_ireg (cfg);
4385 ins->inst_basereg = args [0]->dreg;
4386 ins->inst_offset = 0;
4387 ins->sreg2 = args [1]->dreg;
4388 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4389 MONO_ADD_INS (cfg->cbb, ins);
4392 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4394 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4395 if (strcmp (cmethod->name, "Exchange") == 0) {
4397 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4399 if (fsig->params [0]->type == MONO_TYPE_I4)
4400 opcode = OP_ATOMIC_EXCHANGE_I4;
4401 #if SIZEOF_REGISTER == 8
4402 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4403 (fsig->params [0]->type == MONO_TYPE_I))
4404 opcode = OP_ATOMIC_EXCHANGE_I8;
4406 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4407 opcode = OP_ATOMIC_EXCHANGE_I4;
4412 MONO_INST_NEW (cfg, ins, opcode);
4413 ins->dreg = mono_alloc_ireg (cfg);
4414 ins->inst_basereg = args [0]->dreg;
4415 ins->inst_offset = 0;
4416 ins->sreg2 = args [1]->dreg;
4417 MONO_ADD_INS (cfg->cbb, ins);
4419 switch (fsig->params [0]->type) {
4421 ins->type = STACK_I4;
4425 ins->type = STACK_I8;
4427 case MONO_TYPE_OBJECT:
4428 ins->type = STACK_OBJ;
4431 g_assert_not_reached ();
4434 if (cfg->gen_write_barriers && is_ref) {
4435 MonoInst *dummy_use;
4436 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4437 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4438 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [1]);
4441 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4443 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4444 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4446 gboolean is_ref = MONO_TYPE_IS_REFERENCE (fsig->params [1]);
4447 if (fsig->params [1]->type == MONO_TYPE_I4)
4449 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4450 size = sizeof (gpointer);
4451 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
4454 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4455 ins->dreg = alloc_ireg (cfg);
4456 ins->sreg1 = args [0]->dreg;
4457 ins->sreg2 = args [1]->dreg;
4458 ins->sreg3 = args [2]->dreg;
4459 ins->type = STACK_I4;
4460 MONO_ADD_INS (cfg->cbb, ins);
4461 } else if (size == 8) {
4462 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4463 ins->dreg = alloc_ireg (cfg);
4464 ins->sreg1 = args [0]->dreg;
4465 ins->sreg2 = args [1]->dreg;
4466 ins->sreg3 = args [2]->dreg;
4467 ins->type = STACK_I8;
4468 MONO_ADD_INS (cfg->cbb, ins);
4470 /* g_assert_not_reached (); */
4472 if (cfg->gen_write_barriers && is_ref) {
4473 MonoInst *dummy_use;
4474 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4475 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4476 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [1]);
4479 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4483 } else if (cmethod->klass->image == mono_defaults.corlib) {
4484 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4485 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4486 if (should_insert_brekpoint (cfg->method))
4487 MONO_INST_NEW (cfg, ins, OP_BREAK);
4489 MONO_INST_NEW (cfg, ins, OP_NOP);
4490 MONO_ADD_INS (cfg->cbb, ins);
4493 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4494 && strcmp (cmethod->klass->name, "Environment") == 0) {
4496 EMIT_NEW_ICONST (cfg, ins, 1);
4498 EMIT_NEW_ICONST (cfg, ins, 0);
4502 } else if (cmethod->klass == mono_defaults.math_class) {
4504 * There is general branches code for Min/Max, but it does not work for
4506 * http://everything2.com/?node_id=1051618
4510 #ifdef MONO_ARCH_SIMD_INTRINSICS
4511 if (cfg->opt & MONO_OPT_SIMD) {
4512 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4518 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4522 * This entry point could be used later for arbitrary method
4525 inline static MonoInst*
4526 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4527 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4529 if (method->klass == mono_defaults.string_class) {
4530 /* managed string allocation support */
4531 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS)) {
4532 MonoInst *iargs [2];
4533 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4534 MonoMethod *managed_alloc = NULL;
4536 g_assert (vtable); /*Should not fail since it System.String*/
4537 #ifndef MONO_CROSS_COMPILE
4538 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4542 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4543 iargs [1] = args [0];
4544 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4551 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4553 MonoInst *store, *temp;
4556 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4557 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4560 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4561 * would be different than the MonoInst's used to represent arguments, and
4562 * the ldelema implementation can't deal with that.
4563 * Solution: When ldelema is used on an inline argument, create a var for
4564 * it, emit ldelema on that var, and emit the saving code below in
4565 * inline_method () if needed.
4567 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4568 cfg->args [i] = temp;
4569 /* This uses cfg->args [i] which is set by the preceeding line */
4570 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4571 store->cil_code = sp [0]->cil_code;
4576 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4577 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4579 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4581 check_inline_called_method_name_limit (MonoMethod *called_method)
4584 static char *limit = NULL;
4586 if (limit == NULL) {
4587 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4589 if (limit_string != NULL)
4590 limit = limit_string;
4592 limit = (char *) "";
4595 if (limit [0] != '\0') {
4596 char *called_method_name = mono_method_full_name (called_method, TRUE);
4598 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4599 g_free (called_method_name);
4601 //return (strncmp_result <= 0);
4602 return (strncmp_result == 0);
4609 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4611 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4614 static char *limit = NULL;
4616 if (limit == NULL) {
4617 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4618 if (limit_string != NULL) {
4619 limit = limit_string;
4621 limit = (char *) "";
4625 if (limit [0] != '\0') {
4626 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4628 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4629 g_free (caller_method_name);
4631 //return (strncmp_result <= 0);
4632 return (strncmp_result == 0);
4640 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4641 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4643 MonoInst *ins, *rvar = NULL;
4644 MonoMethodHeader *cheader;
4645 MonoBasicBlock *ebblock, *sbblock;
4647 MonoMethod *prev_inlined_method;
4648 MonoInst **prev_locals, **prev_args;
4649 MonoType **prev_arg_types;
4650 guint prev_real_offset;
4651 GHashTable *prev_cbb_hash;
4652 MonoBasicBlock **prev_cil_offset_to_bb;
4653 MonoBasicBlock *prev_cbb;
4654 unsigned char* prev_cil_start;
4655 guint32 prev_cil_offset_to_bb_len;
4656 MonoMethod *prev_current_method;
4657 MonoGenericContext *prev_generic_context;
4658 gboolean ret_var_set, prev_ret_var_set;
4660 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4662 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4663 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4666 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4667 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4671 if (cfg->verbose_level > 2)
4672 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4674 if (!cmethod->inline_info) {
4675 mono_jit_stats.inlineable_methods++;
4676 cmethod->inline_info = 1;
4679 /* allocate local variables */
4680 cheader = mono_method_get_header (cmethod);
4682 if (cheader == NULL || mono_loader_get_last_error ()) {
4684 mono_metadata_free_mh (cheader);
4685 mono_loader_clear_error ();
4689 /* allocate space to store the return value */
4690 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4691 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4695 prev_locals = cfg->locals;
4696 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4697 for (i = 0; i < cheader->num_locals; ++i)
4698 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4700 /* allocate start and end blocks */
4701 /* This is needed so if the inline is aborted, we can clean up */
4702 NEW_BBLOCK (cfg, sbblock);
4703 sbblock->real_offset = real_offset;
4705 NEW_BBLOCK (cfg, ebblock);
4706 ebblock->block_num = cfg->num_bblocks++;
4707 ebblock->real_offset = real_offset;
4709 prev_args = cfg->args;
4710 prev_arg_types = cfg->arg_types;
4711 prev_inlined_method = cfg->inlined_method;
4712 cfg->inlined_method = cmethod;
4713 cfg->ret_var_set = FALSE;
4714 cfg->inline_depth ++;
4715 prev_real_offset = cfg->real_offset;
4716 prev_cbb_hash = cfg->cbb_hash;
4717 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4718 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4719 prev_cil_start = cfg->cil_start;
4720 prev_cbb = cfg->cbb;
4721 prev_current_method = cfg->current_method;
4722 prev_generic_context = cfg->generic_context;
4723 prev_ret_var_set = cfg->ret_var_set;
4725 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4727 ret_var_set = cfg->ret_var_set;
4729 cfg->inlined_method = prev_inlined_method;
4730 cfg->real_offset = prev_real_offset;
4731 cfg->cbb_hash = prev_cbb_hash;
4732 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4733 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4734 cfg->cil_start = prev_cil_start;
4735 cfg->locals = prev_locals;
4736 cfg->args = prev_args;
4737 cfg->arg_types = prev_arg_types;
4738 cfg->current_method = prev_current_method;
4739 cfg->generic_context = prev_generic_context;
4740 cfg->ret_var_set = prev_ret_var_set;
4741 cfg->inline_depth --;
4743 if ((costs >= 0 && costs < 60) || inline_allways) {
4744 if (cfg->verbose_level > 2)
4745 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4747 mono_jit_stats.inlined_methods++;
4749 /* always add some code to avoid block split failures */
4750 MONO_INST_NEW (cfg, ins, OP_NOP);
4751 MONO_ADD_INS (prev_cbb, ins);
4753 prev_cbb->next_bb = sbblock;
4754 link_bblock (cfg, prev_cbb, sbblock);
4757 * Get rid of the begin and end bblocks if possible to aid local
4760 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4762 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4763 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4765 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4766 MonoBasicBlock *prev = ebblock->in_bb [0];
4767 mono_merge_basic_blocks (cfg, prev, ebblock);
4769 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4770 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4771 cfg->cbb = prev_cbb;
4779 * If the inlined method contains only a throw, then the ret var is not
4780 * set, so set it to a dummy value.
4783 static double r8_0 = 0.0;
4785 switch (rvar->type) {
4787 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4790 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4795 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4798 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4799 ins->type = STACK_R8;
4800 ins->inst_p0 = (void*)&r8_0;
4801 ins->dreg = rvar->dreg;
4802 MONO_ADD_INS (cfg->cbb, ins);
4805 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4808 g_assert_not_reached ();
4812 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4815 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4818 if (cfg->verbose_level > 2)
4819 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4820 cfg->exception_type = MONO_EXCEPTION_NONE;
4821 mono_loader_clear_error ();
4823 /* This gets rid of the newly added bblocks */
4824 cfg->cbb = prev_cbb;
4826 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4831 * Some of these comments may well be out-of-date.
4832 * Design decisions: we do a single pass over the IL code (and we do bblock
4833 * splitting/merging in the few cases when it's required: a back jump to an IL
4834 * address that was not already seen as bblock starting point).
4835 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4836 * Complex operations are decomposed in simpler ones right away. We need to let the
4837 * arch-specific code peek and poke inside this process somehow (except when the
4838 * optimizations can take advantage of the full semantic info of coarse opcodes).
4839 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4840 * MonoInst->opcode initially is the IL opcode or some simplification of that
4841 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4842 * opcode with value bigger than OP_LAST.
4843 * At this point the IR can be handed over to an interpreter, a dumb code generator
4844 * or to the optimizing code generator that will translate it to SSA form.
4846 * Profiling directed optimizations.
4847 * We may compile by default with few or no optimizations and instrument the code
4848 * or the user may indicate what methods to optimize the most either in a config file
4849 * or through repeated runs where the compiler applies offline the optimizations to
4850 * each method and then decides if it was worth it.
4853 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4854 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4855 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4856 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4857 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4858 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4859 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4860 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4862 /* offset from br.s -> br like opcodes */
4863 #define BIG_BRANCH_OFFSET 13
4866 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4868 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4870 return b == NULL || b == bb;
4874 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4876 unsigned char *ip = start;
4877 unsigned char *target;
4880 MonoBasicBlock *bblock;
4881 const MonoOpcode *opcode;
4884 cli_addr = ip - start;
4885 i = mono_opcode_value ((const guint8 **)&ip, end);
4888 opcode = &mono_opcodes [i];
4889 switch (opcode->argument) {
4890 case MonoInlineNone:
4893 case MonoInlineString:
4894 case MonoInlineType:
4895 case MonoInlineField:
4896 case MonoInlineMethod:
4899 case MonoShortInlineR:
4906 case MonoShortInlineVar:
4907 case MonoShortInlineI:
4910 case MonoShortInlineBrTarget:
4911 target = start + cli_addr + 2 + (signed char)ip [1];
4912 GET_BBLOCK (cfg, bblock, target);
4915 GET_BBLOCK (cfg, bblock, ip);
4917 case MonoInlineBrTarget:
4918 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4919 GET_BBLOCK (cfg, bblock, target);
4922 GET_BBLOCK (cfg, bblock, ip);
4924 case MonoInlineSwitch: {
4925 guint32 n = read32 (ip + 1);
4928 cli_addr += 5 + 4 * n;
4929 target = start + cli_addr;
4930 GET_BBLOCK (cfg, bblock, target);
4932 for (j = 0; j < n; ++j) {
4933 target = start + cli_addr + (gint32)read32 (ip);
4934 GET_BBLOCK (cfg, bblock, target);
4944 g_assert_not_reached ();
4947 if (i == CEE_THROW) {
4948 unsigned char *bb_start = ip - 1;
4950 /* Find the start of the bblock containing the throw */
4952 while ((bb_start >= start) && !bblock) {
4953 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4957 bblock->out_of_line = 1;
4966 static inline MonoMethod *
4967 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4971 if (m->wrapper_type != MONO_WRAPPER_NONE)
4972 return mono_method_get_wrapper_data (m, token);
4974 method = mono_get_method_full (m->klass->image, token, klass, context);
4979 static inline MonoMethod *
4980 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4982 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4984 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4990 static inline MonoClass*
4991 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4995 if (method->wrapper_type != MONO_WRAPPER_NONE)
4996 klass = mono_method_get_wrapper_data (method, token);
4998 klass = mono_class_get_full (method->klass->image, token, context);
5000 mono_class_init (klass);
5005 * Returns TRUE if the JIT should abort inlining because "callee"
5006 * is influenced by security attributes.
5009 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
5013 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
5017 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
5018 if (result == MONO_JIT_SECURITY_OK)
5021 if (result == MONO_JIT_LINKDEMAND_ECMA) {
5022 /* Generate code to throw a SecurityException before the actual call/link */
5023 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5026 NEW_ICONST (cfg, args [0], 4);
5027 NEW_METHODCONST (cfg, args [1], caller);
5028 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
5029 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
5030 /* don't hide previous results */
5031 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
5032 cfg->exception_data = result;
5040 throw_exception (void)
5042 static MonoMethod *method = NULL;
5045 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5046 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
5053 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
5055 MonoMethod *thrower = throw_exception ();
5058 EMIT_NEW_PCONST (cfg, args [0], ex);
5059 mono_emit_method_call (cfg, thrower, args, NULL);
5063 * Return the original method is a wrapper is specified. We can only access
5064 * the custom attributes from the original method.
5067 get_original_method (MonoMethod *method)
5069 if (method->wrapper_type == MONO_WRAPPER_NONE)
5072 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
5073 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
5076 /* in other cases we need to find the original method */
5077 return mono_marshal_method_from_wrapper (method);
5081 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
5082 MonoBasicBlock *bblock, unsigned char *ip)
5084 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5085 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
5087 emit_throw_exception (cfg, ex);
5091 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
5092 MonoBasicBlock *bblock, unsigned char *ip)
5094 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5095 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
5097 emit_throw_exception (cfg, ex);
5101 * Check that the IL instructions at ip are the array initialization
5102 * sequence and return the pointer to the data and the size.
5105 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
5108 * newarr[System.Int32]
5110 * ldtoken field valuetype ...
5111 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
5113 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
5114 guint32 token = read32 (ip + 7);
5115 guint32 field_token = read32 (ip + 2);
5116 guint32 field_index = field_token & 0xffffff;
5118 const char *data_ptr;
5120 MonoMethod *cmethod;
5121 MonoClass *dummy_class;
5122 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
5128 *out_field_token = field_token;
5130 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
5133 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
5135 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
5136 case MONO_TYPE_BOOLEAN:
5140 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
5141 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
5142 case MONO_TYPE_CHAR:
5152 return NULL; /* stupid ARM FP swapped format */
5162 if (size > mono_type_size (field->type, &dummy_align))
5165 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
5166 if (!method->klass->image->dynamic) {
5167 field_index = read32 (ip + 2) & 0xffffff;
5168 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
5169 data_ptr = mono_image_rva_map (method->klass->image, rva);
5170 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
5171 /* for aot code we do the lookup on load */
5172 if (aot && data_ptr)
5173 return GUINT_TO_POINTER (rva);
5175 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
5177 data_ptr = mono_field_get_data (field);
5185 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
5187 char *method_fname = mono_method_full_name (method, TRUE);
5189 MonoMethodHeader *header = mono_method_get_header (method);
5191 if (header->code_size == 0)
5192 method_code = g_strdup ("method body is empty.");
5194 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
5195 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
5196 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
5197 g_free (method_fname);
5198 g_free (method_code);
5199 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
5203 set_exception_object (MonoCompile *cfg, MonoException *exception)
5205 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
5206 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
5207 cfg->exception_ptr = exception;
5211 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5215 if (cfg->generic_sharing_context)
5216 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
5218 type = &klass->byval_arg;
5219 return MONO_TYPE_IS_REFERENCE (type);
5223 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5226 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5227 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5228 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5229 /* Optimize reg-reg moves away */
5231 * Can't optimize other opcodes, since sp[0] might point to
5232 * the last ins of a decomposed opcode.
5234 sp [0]->dreg = (cfg)->locals [n]->dreg;
5236 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5241 * ldloca inhibits many optimizations so try to get rid of it in common
5244 static inline unsigned char *
5245 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5254 local = read16 (ip + 2);
5258 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5259 gboolean skip = FALSE;
5261 /* From the INITOBJ case */
5262 token = read32 (ip + 2);
5263 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5264 CHECK_TYPELOAD (klass);
5265 if (generic_class_is_reference_type (cfg, klass)) {
5266 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5267 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5268 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5269 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5270 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5283 is_exception_class (MonoClass *class)
5286 if (class == mono_defaults.exception_class)
5288 class = class->parent;
5294 * mono_method_to_ir:
5296 * Translate the .net IL into linear IR.
5299 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5300 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5301 guint inline_offset, gboolean is_virtual_call)
5304 MonoInst *ins, **sp, **stack_start;
5305 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5306 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
5307 MonoMethod *cmethod, *method_definition;
5308 MonoInst **arg_array;
5309 MonoMethodHeader *header;
5311 guint32 token, ins_flag;
5313 MonoClass *constrained_call = NULL;
5314 unsigned char *ip, *end, *target, *err_pos;
5315 static double r8_0 = 0.0;
5316 MonoMethodSignature *sig;
5317 MonoGenericContext *generic_context = NULL;
5318 MonoGenericContainer *generic_container = NULL;
5319 MonoType **param_types;
5320 int i, n, start_new_bblock, dreg;
5321 int num_calls = 0, inline_costs = 0;
5322 int breakpoint_id = 0;
5324 MonoBoolean security, pinvoke;
5325 MonoSecurityManager* secman = NULL;
5326 MonoDeclSecurityActions actions;
5327 GSList *class_inits = NULL;
5328 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5330 gboolean init_locals, seq_points, skip_dead_blocks;
5332 /* serialization and xdomain stuff may need access to private fields and methods */
5333 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5334 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5335 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5336 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5337 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5338 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5340 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5342 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5343 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5344 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5345 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5347 image = method->klass->image;
5348 header = mono_method_get_header (method);
5350 MonoLoaderError *error;
5352 if ((error = mono_loader_get_last_error ())) {
5353 cfg->exception_type = error->exception_type;
5355 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
5356 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
5358 goto exception_exit;
5360 generic_container = mono_method_get_generic_container (method);
5361 sig = mono_method_signature (method);
5362 num_args = sig->hasthis + sig->param_count;
5363 ip = (unsigned char*)header->code;
5364 cfg->cil_start = ip;
5365 end = ip + header->code_size;
5366 mono_jit_stats.cil_code_size += header->code_size;
5367 init_locals = header->init_locals;
5369 seq_points = cfg->gen_seq_points && cfg->method == method;
5372 * Methods without init_locals set could cause asserts in various passes
5377 method_definition = method;
5378 while (method_definition->is_inflated) {
5379 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5380 method_definition = imethod->declaring;
5383 /* SkipVerification is not allowed if core-clr is enabled */
5384 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5386 dont_verify_stloc = TRUE;
5389 if (!dont_verify && mini_method_verify (cfg, method_definition))
5390 goto exception_exit;
5392 if (mono_debug_using_mono_debugger ())
5393 cfg->keep_cil_nops = TRUE;
5395 if (sig->is_inflated)
5396 generic_context = mono_method_get_context (method);
5397 else if (generic_container)
5398 generic_context = &generic_container->context;
5399 cfg->generic_context = generic_context;
5401 if (!cfg->generic_sharing_context)
5402 g_assert (!sig->has_type_parameters);
5404 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5405 g_assert (method->is_inflated);
5406 g_assert (mono_method_get_context (method)->method_inst);
5408 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5409 g_assert (sig->generic_param_count);
5411 if (cfg->method == method) {
5412 cfg->real_offset = 0;
5414 cfg->real_offset = inline_offset;
5417 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5418 cfg->cil_offset_to_bb_len = header->code_size;
5420 cfg->current_method = method;
5422 if (cfg->verbose_level > 2)
5423 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5425 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5427 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5428 for (n = 0; n < sig->param_count; ++n)
5429 param_types [n + sig->hasthis] = sig->params [n];
5430 cfg->arg_types = param_types;
5432 dont_inline = g_list_prepend (dont_inline, method);
5433 if (cfg->method == method) {
5435 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5436 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5439 NEW_BBLOCK (cfg, start_bblock);
5440 cfg->bb_entry = start_bblock;
5441 start_bblock->cil_code = NULL;
5442 start_bblock->cil_length = 0;
5445 NEW_BBLOCK (cfg, end_bblock);
5446 cfg->bb_exit = end_bblock;
5447 end_bblock->cil_code = NULL;
5448 end_bblock->cil_length = 0;
5449 g_assert (cfg->num_bblocks == 2);
5451 arg_array = cfg->args;
5453 if (header->num_clauses) {
5454 cfg->spvars = g_hash_table_new (NULL, NULL);
5455 cfg->exvars = g_hash_table_new (NULL, NULL);
5457 /* handle exception clauses */
5458 for (i = 0; i < header->num_clauses; ++i) {
5459 MonoBasicBlock *try_bb;
5460 MonoExceptionClause *clause = &header->clauses [i];
5461 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5462 try_bb->real_offset = clause->try_offset;
5463 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5464 tblock->real_offset = clause->handler_offset;
5465 tblock->flags |= BB_EXCEPTION_HANDLER;
5467 link_bblock (cfg, try_bb, tblock);
5469 if (*(ip + clause->handler_offset) == CEE_POP)
5470 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5472 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5473 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5474 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5475 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5476 MONO_ADD_INS (tblock, ins);
5478 /* todo: is a fault block unsafe to optimize? */
5479 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5480 tblock->flags |= BB_EXCEPTION_UNSAFE;
5484 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5486 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5488 /* catch and filter blocks get the exception object on the stack */
5489 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5490 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5491 MonoInst *dummy_use;
5493 /* mostly like handle_stack_args (), but just sets the input args */
5494 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5495 tblock->in_scount = 1;
5496 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5497 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5500 * Add a dummy use for the exvar so its liveness info will be
5504 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5506 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5507 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5508 tblock->flags |= BB_EXCEPTION_HANDLER;
5509 tblock->real_offset = clause->data.filter_offset;
5510 tblock->in_scount = 1;
5511 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5512 /* The filter block shares the exvar with the handler block */
5513 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5514 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5515 MONO_ADD_INS (tblock, ins);
5519 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5520 clause->data.catch_class &&
5521 cfg->generic_sharing_context &&
5522 mono_class_check_context_used (clause->data.catch_class)) {
5524 * In shared generic code with catch
5525 * clauses containing type variables
5526 * the exception handling code has to
5527 * be able to get to the rgctx.
5528 * Therefore we have to make sure that
5529 * the vtable/mrgctx argument (for
5530 * static or generic methods) or the
5531 * "this" argument (for non-static
5532 * methods) are live.
5534 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5535 mini_method_get_context (method)->method_inst ||
5536 method->klass->valuetype) {
5537 mono_get_vtable_var (cfg);
5539 MonoInst *dummy_use;
5541 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5546 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5547 cfg->cbb = start_bblock;
5548 cfg->args = arg_array;
5549 mono_save_args (cfg, sig, inline_args);
5552 /* FIRST CODE BLOCK */
5553 NEW_BBLOCK (cfg, bblock);
5554 bblock->cil_code = ip;
5558 ADD_BBLOCK (cfg, bblock);
5560 if (cfg->method == method) {
5561 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5562 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5563 MONO_INST_NEW (cfg, ins, OP_BREAK);
5564 MONO_ADD_INS (bblock, ins);
5568 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5569 secman = mono_security_manager_get_methods ();
5571 security = (secman && mono_method_has_declsec (method));
5572 /* at this point having security doesn't mean we have any code to generate */
5573 if (security && (cfg->method == method)) {
5574 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5575 * And we do not want to enter the next section (with allocation) if we
5576 * have nothing to generate */
5577 security = mono_declsec_get_demands (method, &actions);
5580 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5581 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5583 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5584 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5585 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5587 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5588 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5592 mono_custom_attrs_free (custom);
5595 custom = mono_custom_attrs_from_class (wrapped->klass);
5596 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5600 mono_custom_attrs_free (custom);
5603 /* not a P/Invoke after all */
5608 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5609 /* we use a separate basic block for the initialization code */
5610 NEW_BBLOCK (cfg, init_localsbb);
5611 cfg->bb_init = init_localsbb;
5612 init_localsbb->real_offset = cfg->real_offset;
5613 start_bblock->next_bb = init_localsbb;
5614 init_localsbb->next_bb = bblock;
5615 link_bblock (cfg, start_bblock, init_localsbb);
5616 link_bblock (cfg, init_localsbb, bblock);
5618 cfg->cbb = init_localsbb;
5620 start_bblock->next_bb = bblock;
5621 link_bblock (cfg, start_bblock, bblock);
5624 /* at this point we know, if security is TRUE, that some code needs to be generated */
5625 if (security && (cfg->method == method)) {
5628 mono_jit_stats.cas_demand_generation++;
5630 if (actions.demand.blob) {
5631 /* Add code for SecurityAction.Demand */
5632 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5633 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5634 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5635 mono_emit_method_call (cfg, secman->demand, args, NULL);
5637 if (actions.noncasdemand.blob) {
5638 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5639 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5640 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5641 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5642 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5643 mono_emit_method_call (cfg, secman->demand, args, NULL);
5645 if (actions.demandchoice.blob) {
5646 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5647 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5648 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5649 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5650 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5654 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5656 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5659 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5660 /* check if this is native code, e.g. an icall or a p/invoke */
5661 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5662 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5664 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
5665 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
5667 /* if this ia a native call then it can only be JITted from platform code */
5668 if ((icall || pinvk) && method->klass && method->klass->image) {
5669 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
5670 MonoException *ex = icall ? mono_get_exception_security () :
5671 mono_get_exception_method_access ();
5672 emit_throw_exception (cfg, ex);
5679 if (header->code_size == 0)
5682 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5687 if (cfg->method == method)
5688 mono_debug_init_method (cfg, bblock, breakpoint_id);
5690 for (n = 0; n < header->num_locals; ++n) {
5691 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5696 /* We force the vtable variable here for all shared methods
5697 for the possibility that they might show up in a stack
5698 trace where their exact instantiation is needed. */
5699 if (cfg->generic_sharing_context && method == cfg->method) {
5700 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5701 mini_method_get_context (method)->method_inst ||
5702 method->klass->valuetype) {
5703 mono_get_vtable_var (cfg);
5705 /* FIXME: Is there a better way to do this?
5706 We need the variable live for the duration
5707 of the whole method. */
5708 cfg->args [0]->flags |= MONO_INST_INDIRECT;
5712 /* add a check for this != NULL to inlined methods */
5713 if (is_virtual_call) {
5716 NEW_ARGLOAD (cfg, arg_ins, 0);
5717 MONO_ADD_INS (cfg->cbb, arg_ins);
5718 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
5721 skip_dead_blocks = !dont_verify;
5722 if (skip_dead_blocks) {
5723 original_bb = bb = mono_basic_block_split (method, &error);
5724 if (!mono_error_ok (&error)) {
5725 mono_error_cleanup (&error);
5731 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5732 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5735 start_new_bblock = 0;
5738 if (cfg->method == method)
5739 cfg->real_offset = ip - header->code;
5741 cfg->real_offset = inline_offset;
5746 if (start_new_bblock) {
5747 bblock->cil_length = ip - bblock->cil_code;
5748 if (start_new_bblock == 2) {
5749 g_assert (ip == tblock->cil_code);
5751 GET_BBLOCK (cfg, tblock, ip);
5753 bblock->next_bb = tblock;
5756 start_new_bblock = 0;
5757 for (i = 0; i < bblock->in_scount; ++i) {
5758 if (cfg->verbose_level > 3)
5759 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5760 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5764 g_slist_free (class_inits);
5767 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5768 link_bblock (cfg, bblock, tblock);
5769 if (sp != stack_start) {
5770 handle_stack_args (cfg, stack_start, sp - stack_start);
5772 CHECK_UNVERIFIABLE (cfg);
5774 bblock->next_bb = tblock;
5777 for (i = 0; i < bblock->in_scount; ++i) {
5778 if (cfg->verbose_level > 3)
5779 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5780 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5783 g_slist_free (class_inits);
5788 if (skip_dead_blocks) {
5789 int ip_offset = ip - header->code;
5791 if (ip_offset == bb->end)
5795 int op_size = mono_opcode_size (ip, end);
5796 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
5798 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
5800 if (ip_offset + op_size == bb->end) {
5801 MONO_INST_NEW (cfg, ins, OP_NOP);
5802 MONO_ADD_INS (bblock, ins);
5803 start_new_bblock = 1;
5811 * Sequence points are points where the debugger can place a breakpoint.
5812 * Currently, we generate these automatically at points where the IL
5815 if (seq_points && sp == stack_start) {
5816 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
5817 MONO_ADD_INS (cfg->cbb, ins);
5820 bblock->real_offset = cfg->real_offset;
5822 if ((cfg->method == method) && cfg->coverage_info) {
5823 guint32 cil_offset = ip - header->code;
5824 cfg->coverage_info->data [cil_offset].cil_code = ip;
5826 /* TODO: Use an increment here */
5827 #if defined(TARGET_X86)
5828 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5829 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5831 MONO_ADD_INS (cfg->cbb, ins);
5833 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5834 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5838 if (cfg->verbose_level > 3)
5839 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5843 if (cfg->keep_cil_nops)
5844 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
5846 MONO_INST_NEW (cfg, ins, OP_NOP);
5848 MONO_ADD_INS (bblock, ins);
5851 if (should_insert_brekpoint (cfg->method))
5852 MONO_INST_NEW (cfg, ins, OP_BREAK);
5854 MONO_INST_NEW (cfg, ins, OP_NOP);
5856 MONO_ADD_INS (bblock, ins);
5862 CHECK_STACK_OVF (1);
5863 n = (*ip)-CEE_LDARG_0;
5865 EMIT_NEW_ARGLOAD (cfg, ins, n);
5873 CHECK_STACK_OVF (1);
5874 n = (*ip)-CEE_LDLOC_0;
5876 EMIT_NEW_LOCLOAD (cfg, ins, n);
5885 n = (*ip)-CEE_STLOC_0;
5888 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5890 emit_stloc_ir (cfg, sp, header, n);
5897 CHECK_STACK_OVF (1);
5900 EMIT_NEW_ARGLOAD (cfg, ins, n);
5906 CHECK_STACK_OVF (1);
5909 NEW_ARGLOADA (cfg, ins, n);
5910 MONO_ADD_INS (cfg->cbb, ins);
5920 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5922 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5927 CHECK_STACK_OVF (1);
5930 EMIT_NEW_LOCLOAD (cfg, ins, n);
5934 case CEE_LDLOCA_S: {
5935 unsigned char *tmp_ip;
5937 CHECK_STACK_OVF (1);
5938 CHECK_LOCAL (ip [1]);
5940 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
5946 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5955 CHECK_LOCAL (ip [1]);
5956 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5958 emit_stloc_ir (cfg, sp, header, ip [1]);
5963 CHECK_STACK_OVF (1);
5964 EMIT_NEW_PCONST (cfg, ins, NULL);
5965 ins->type = STACK_OBJ;
5970 CHECK_STACK_OVF (1);
5971 EMIT_NEW_ICONST (cfg, ins, -1);
5984 CHECK_STACK_OVF (1);
5985 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
5991 CHECK_STACK_OVF (1);
5993 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
5999 CHECK_STACK_OVF (1);
6000 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
6006 CHECK_STACK_OVF (1);
6007 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6008 ins->type = STACK_I8;
6009 ins->dreg = alloc_dreg (cfg, STACK_I8);
6011 ins->inst_l = (gint64)read64 (ip);
6012 MONO_ADD_INS (bblock, ins);
6018 gboolean use_aotconst = FALSE;
6020 #ifdef TARGET_POWERPC
6021 /* FIXME: Clean this up */
6022 if (cfg->compile_aot)
6023 use_aotconst = TRUE;
6026 /* FIXME: we should really allocate this only late in the compilation process */
6027 f = mono_domain_alloc (cfg->domain, sizeof (float));
6029 CHECK_STACK_OVF (1);
6035 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
6037 dreg = alloc_freg (cfg);
6038 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
6039 ins->type = STACK_R8;
6041 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6042 ins->type = STACK_R8;
6043 ins->dreg = alloc_dreg (cfg, STACK_R8);
6045 MONO_ADD_INS (bblock, ins);
6055 gboolean use_aotconst = FALSE;
6057 #ifdef TARGET_POWERPC
6058 /* FIXME: Clean this up */
6059 if (cfg->compile_aot)
6060 use_aotconst = TRUE;
6063 /* FIXME: we should really allocate this only late in the compilation process */
6064 d = mono_domain_alloc (cfg->domain, sizeof (double));
6066 CHECK_STACK_OVF (1);
6072 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
6074 dreg = alloc_freg (cfg);
6075 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
6076 ins->type = STACK_R8;
6078 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6079 ins->type = STACK_R8;
6080 ins->dreg = alloc_dreg (cfg, STACK_R8);
6082 MONO_ADD_INS (bblock, ins);
6091 MonoInst *temp, *store;
6093 CHECK_STACK_OVF (1);
6097 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
6098 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
6100 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6103 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6116 if (sp [0]->type == STACK_R8)
6117 /* we need to pop the value from the x86 FP stack */
6118 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
6127 if (stack_start != sp)
6129 token = read32 (ip + 1);
6130 /* FIXME: check the signature matches */
6131 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6136 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
6137 GENERIC_SHARING_FAILURE (CEE_JMP);
6139 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
6140 CHECK_CFG_EXCEPTION;
6142 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6144 MonoMethodSignature *fsig = mono_method_signature (cmethod);
6147 /* Handle tail calls similarly to calls */
6148 n = fsig->param_count + fsig->hasthis;
6150 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
6151 call->method = cmethod;
6152 call->tail_call = TRUE;
6153 call->signature = mono_method_signature (cmethod);
6154 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
6155 call->inst.inst_p0 = cmethod;
6156 for (i = 0; i < n; ++i)
6157 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
6159 mono_arch_emit_call (cfg, call);
6160 MONO_ADD_INS (bblock, (MonoInst*)call);
6163 for (i = 0; i < num_args; ++i)
6164 /* Prevent arguments from being optimized away */
6165 arg_array [i]->flags |= MONO_INST_VOLATILE;
6167 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6168 ins = (MonoInst*)call;
6169 ins->inst_p0 = cmethod;
6170 MONO_ADD_INS (bblock, ins);
6174 start_new_bblock = 1;
6179 case CEE_CALLVIRT: {
6180 MonoInst *addr = NULL;
6181 MonoMethodSignature *fsig = NULL;
6183 int virtual = *ip == CEE_CALLVIRT;
6184 int calli = *ip == CEE_CALLI;
6185 gboolean pass_imt_from_rgctx = FALSE;
6186 MonoInst *imt_arg = NULL;
6187 gboolean pass_vtable = FALSE;
6188 gboolean pass_mrgctx = FALSE;
6189 MonoInst *vtable_arg = NULL;
6190 gboolean check_this = FALSE;
6191 gboolean supported_tail_call = FALSE;
6194 token = read32 (ip + 1);
6201 if (method->wrapper_type != MONO_WRAPPER_NONE)
6202 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6204 fsig = mono_metadata_parse_signature (image, token);
6206 n = fsig->param_count + fsig->hasthis;
6208 if (method->dynamic && fsig->pinvoke) {
6212 * This is a call through a function pointer using a pinvoke
6213 * signature. Have to create a wrapper and call that instead.
6214 * FIXME: This is very slow, need to create a wrapper at JIT time
6215 * instead based on the signature.
6217 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
6218 EMIT_NEW_PCONST (cfg, args [1], fsig);
6220 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
6223 MonoMethod *cil_method;
6225 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6226 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6227 cil_method = cmethod;
6228 } else if (constrained_call) {
6229 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6231 * This is needed since get_method_constrained can't find
6232 * the method in klass representing a type var.
6233 * The type var is guaranteed to be a reference type in this
6236 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6237 cil_method = cmethod;
6238 g_assert (!cmethod->klass->valuetype);
6240 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6243 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6244 cil_method = cmethod;
6249 if (!dont_verify && !cfg->skip_visibility) {
6250 MonoMethod *target_method = cil_method;
6251 if (method->is_inflated) {
6252 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6254 if (!mono_method_can_access_method (method_definition, target_method) &&
6255 !mono_method_can_access_method (method, cil_method))
6256 METHOD_ACCESS_FAILURE;
6259 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6260 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6262 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6263 /* MS.NET seems to silently convert this to a callvirt */
6268 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
6269 * converts to a callvirt.
6271 * tests/bug-515884.il is an example of this behavior
6273 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
6274 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
6275 if (!virtual && cmethod->klass->marshalbyref && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
6279 if (!cmethod->klass->inited)
6280 if (!mono_class_init (cmethod->klass))
6283 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6284 mini_class_is_system_array (cmethod->klass)) {
6285 array_rank = cmethod->klass->rank;
6286 fsig = mono_method_signature (cmethod);
6288 fsig = mono_method_signature (cmethod);
6293 if (fsig->pinvoke) {
6294 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6295 check_for_pending_exc, FALSE);
6296 fsig = mono_method_signature (wrapper);
6297 } else if (constrained_call) {
6298 fsig = mono_method_signature (cmethod);
6300 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6304 mono_save_token_info (cfg, image, token, cil_method);
6306 n = fsig->param_count + fsig->hasthis;
6308 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6309 if (check_linkdemand (cfg, method, cmethod))
6311 CHECK_CFG_EXCEPTION;
6314 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6315 g_assert_not_reached ();
6318 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6321 if (!cfg->generic_sharing_context && cmethod)
6322 g_assert (!mono_method_check_context_used (cmethod));
6326 //g_assert (!virtual || fsig->hasthis);
6330 if (constrained_call) {
6332 * We have the `constrained.' prefix opcode.
6334 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6336 * The type parameter is instantiated as a valuetype,
6337 * but that type doesn't override the method we're
6338 * calling, so we need to box `this'.
6340 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
6341 ins->klass = constrained_call;
6342 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call));
6343 CHECK_CFG_EXCEPTION;
6344 } else if (!constrained_call->valuetype) {
6345 int dreg = alloc_preg (cfg);
6348 * The type parameter is instantiated as a reference
6349 * type. We have a managed pointer on the stack, so
6350 * we need to dereference it here.
6352 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6353 ins->type = STACK_OBJ;
6355 } else if (cmethod->klass->valuetype)
6357 constrained_call = NULL;
6360 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6364 * If the callee is a shared method, then its static cctor
6365 * might not get called after the call was patched.
6367 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6368 emit_generic_class_init (cfg, cmethod->klass);
6369 CHECK_TYPELOAD (cmethod->klass);
6372 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6373 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6374 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6375 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6376 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6379 * Pass vtable iff target method might
6380 * be shared, which means that sharing
6381 * is enabled for its class and its
6382 * context is sharable (and it's not a
6385 if (sharing_enabled && context_sharable &&
6386 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6390 if (cmethod && mini_method_get_context (cmethod) &&
6391 mini_method_get_context (cmethod)->method_inst) {
6392 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6393 MonoGenericContext *context = mini_method_get_context (cmethod);
6394 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6396 g_assert (!pass_vtable);
6398 if (sharing_enabled && context_sharable)
6402 if (cfg->generic_sharing_context && cmethod) {
6403 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6405 context_used = mono_method_check_context_used (cmethod);
6407 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6408 /* Generic method interface
6409 calls are resolved via a
6410 helper function and don't
6412 if (!cmethod_context || !cmethod_context->method_inst)
6413 pass_imt_from_rgctx = TRUE;
6417 * If a shared method calls another
6418 * shared method then the caller must
6419 * have a generic sharing context
6420 * because the magic trampoline
6421 * requires it. FIXME: We shouldn't
6422 * have to force the vtable/mrgctx
6423 * variable here. Instead there
6424 * should be a flag in the cfg to
6425 * request a generic sharing context.
6428 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6429 mono_get_vtable_var (cfg);
6434 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6436 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6438 CHECK_TYPELOAD (cmethod->klass);
6439 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6444 g_assert (!vtable_arg);
6446 if (!cfg->compile_aot) {
6448 * emit_get_rgctx_method () calls mono_class_vtable () so check
6449 * for type load errors before.
6451 mono_class_setup_vtable (cmethod->klass);
6452 CHECK_TYPELOAD (cmethod->klass);
6455 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6457 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6458 MONO_METHOD_IS_FINAL (cmethod)) {
6465 if (pass_imt_from_rgctx) {
6466 g_assert (!pass_vtable);
6469 imt_arg = emit_get_rgctx_method (cfg, context_used,
6470 cmethod, MONO_RGCTX_INFO_METHOD);
6474 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
6476 /* Calling virtual generic methods */
6477 if (cmethod && virtual &&
6478 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6479 !(MONO_METHOD_IS_FINAL (cmethod) &&
6480 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6481 mono_method_signature (cmethod)->generic_param_count) {
6482 MonoInst *this_temp, *this_arg_temp, *store;
6483 MonoInst *iargs [4];
6485 g_assert (mono_method_signature (cmethod)->is_inflated);
6487 /* Prevent inlining of methods that contain indirect calls */
6490 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
6491 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt) {
6492 g_assert (!imt_arg);
6494 g_assert (cmethod->is_inflated);
6495 imt_arg = emit_get_rgctx_method (cfg, context_used,
6496 cmethod, MONO_RGCTX_INFO_METHOD);
6497 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6501 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6502 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6503 MONO_ADD_INS (bblock, store);
6505 /* FIXME: This should be a managed pointer */
6506 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6508 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6509 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6510 cmethod, MONO_RGCTX_INFO_METHOD);
6511 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6512 addr = mono_emit_jit_icall (cfg,
6513 mono_helper_compile_generic_method, iargs);
6515 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6517 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6520 if (!MONO_TYPE_IS_VOID (fsig->ret))
6521 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6523 CHECK_CFG_EXCEPTION;
6530 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6531 supported_tail_call = cmethod && MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
6533 supported_tail_call = cmethod && mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6537 /* FIXME: runtime generic context pointer for jumps? */
6538 /* FIXME: handle this for generic sharing eventually */
6539 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) && supported_tail_call) {
6542 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6545 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6546 /* Handle tail calls similarly to calls */
6547 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE);
6549 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6550 call->tail_call = TRUE;
6551 call->method = cmethod;
6552 call->signature = mono_method_signature (cmethod);
6555 * We implement tail calls by storing the actual arguments into the
6556 * argument variables, then emitting a CEE_JMP.
6558 for (i = 0; i < n; ++i) {
6559 /* Prevent argument from being register allocated */
6560 arg_array [i]->flags |= MONO_INST_VOLATILE;
6561 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6565 ins = (MonoInst*)call;
6566 ins->inst_p0 = cmethod;
6567 ins->inst_p1 = arg_array [0];
6568 MONO_ADD_INS (bblock, ins);
6569 link_bblock (cfg, bblock, end_bblock);
6570 start_new_bblock = 1;
6572 CHECK_CFG_EXCEPTION;
6574 /* skip CEE_RET as well */
6580 /* Conversion to a JIT intrinsic */
6581 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6583 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6584 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6589 CHECK_CFG_EXCEPTION;
6597 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6598 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6599 mono_method_check_inlining (cfg, cmethod) &&
6600 !g_list_find (dont_inline, cmethod)) {
6602 gboolean allways = FALSE;
6604 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6605 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6606 /* Prevent inlining of methods that call wrappers */
6608 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6612 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6614 cfg->real_offset += 5;
6617 if (!MONO_TYPE_IS_VOID (fsig->ret))
6618 /* *sp is already set by inline_method */
6621 inline_costs += costs;
6627 inline_costs += 10 * num_calls++;
6629 /* Tail recursion elimination */
6630 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6631 gboolean has_vtargs = FALSE;
6634 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6637 /* keep it simple */
6638 for (i = fsig->param_count - 1; i >= 0; i--) {
6639 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6644 for (i = 0; i < n; ++i)
6645 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6646 MONO_INST_NEW (cfg, ins, OP_BR);
6647 MONO_ADD_INS (bblock, ins);
6648 tblock = start_bblock->out_bb [0];
6649 link_bblock (cfg, bblock, tblock);
6650 ins->inst_target_bb = tblock;
6651 start_new_bblock = 1;
6653 /* skip the CEE_RET, too */
6654 if (ip_in_bb (cfg, bblock, ip + 5))
6664 /* Generic sharing */
6665 /* FIXME: only do this for generic methods if
6666 they are not shared! */
6667 if (context_used && !imt_arg && !array_rank &&
6668 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6669 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6670 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6671 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6674 g_assert (cfg->generic_sharing_context && cmethod);
6678 * We are compiling a call to a
6679 * generic method from shared code,
6680 * which means that we have to look up
6681 * the method in the rgctx and do an
6684 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6687 /* Indirect calls */
6689 g_assert (!imt_arg);
6691 if (*ip == CEE_CALL)
6692 g_assert (context_used);
6693 else if (*ip == CEE_CALLI)
6694 g_assert (!vtable_arg);
6696 /* FIXME: what the hell is this??? */
6697 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6698 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6700 /* Prevent inlining of methods with indirect calls */
6705 int rgctx_reg = mono_alloc_preg (cfg);
6707 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6708 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6709 call = (MonoCallInst*)ins;
6710 set_rgctx_arg (cfg, call, rgctx_reg, vtable_arg);
6712 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6714 * Instead of emitting an indirect call, emit a direct call
6715 * with the contents of the aotconst as the patch info.
6717 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
6719 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6720 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
6723 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6726 if (!MONO_TYPE_IS_VOID (fsig->ret))
6727 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6729 CHECK_CFG_EXCEPTION;
6740 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6741 if (sp [fsig->param_count]->type == STACK_OBJ) {
6742 MonoInst *iargs [2];
6745 iargs [1] = sp [fsig->param_count];
6747 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6750 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6751 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6752 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6753 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6755 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6758 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6759 if (!cmethod->klass->element_class->valuetype && !readonly)
6760 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6761 CHECK_TYPELOAD (cmethod->klass);
6764 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6767 g_assert_not_reached ();
6770 CHECK_CFG_EXCEPTION;
6777 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6779 if (!MONO_TYPE_IS_VOID (fsig->ret))
6780 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6782 CHECK_CFG_EXCEPTION;
6792 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6794 } else if (imt_arg) {
6795 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6797 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6800 if (!MONO_TYPE_IS_VOID (fsig->ret))
6801 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6803 CHECK_CFG_EXCEPTION;
6810 if (cfg->method != method) {
6811 /* return from inlined method */
6813 * If in_count == 0, that means the ret is unreachable due to
6814 * being preceeded by a throw. In that case, inline_method () will
6815 * handle setting the return value
6816 * (test case: test_0_inline_throw ()).
6818 if (return_var && cfg->cbb->in_count) {
6822 //g_assert (returnvar != -1);
6823 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6824 cfg->ret_var_set = TRUE;
6828 MonoType *ret_type = mono_method_signature (method)->ret;
6832 * Place a seq point here too even through the IL stack is not
6833 * empty, so a step over on
6836 * will work correctly.
6838 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
6839 MONO_ADD_INS (cfg->cbb, ins);
6842 g_assert (!return_var);
6845 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6848 if (!cfg->vret_addr) {
6851 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6853 EMIT_NEW_RETLOADA (cfg, ret_addr);
6855 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6856 ins->klass = mono_class_from_mono_type (ret_type);
6859 #ifdef MONO_ARCH_SOFT_FLOAT
6860 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6861 MonoInst *iargs [1];
6865 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6866 mono_arch_emit_setret (cfg, method, conv);
6868 mono_arch_emit_setret (cfg, method, *sp);
6871 mono_arch_emit_setret (cfg, method, *sp);
6876 if (sp != stack_start)
6878 MONO_INST_NEW (cfg, ins, OP_BR);
6880 ins->inst_target_bb = end_bblock;
6881 MONO_ADD_INS (bblock, ins);
6882 link_bblock (cfg, bblock, end_bblock);
6883 start_new_bblock = 1;
6887 MONO_INST_NEW (cfg, ins, OP_BR);
6889 target = ip + 1 + (signed char)(*ip);
6891 GET_BBLOCK (cfg, tblock, target);
6892 link_bblock (cfg, bblock, tblock);
6893 ins->inst_target_bb = tblock;
6894 if (sp != stack_start) {
6895 handle_stack_args (cfg, stack_start, sp - stack_start);
6897 CHECK_UNVERIFIABLE (cfg);
6899 MONO_ADD_INS (bblock, ins);
6900 start_new_bblock = 1;
6901 inline_costs += BRANCH_COST;
6915 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6917 target = ip + 1 + *(signed char*)ip;
6923 inline_costs += BRANCH_COST;
6927 MONO_INST_NEW (cfg, ins, OP_BR);
6930 target = ip + 4 + (gint32)read32(ip);
6932 GET_BBLOCK (cfg, tblock, target);
6933 link_bblock (cfg, bblock, tblock);
6934 ins->inst_target_bb = tblock;
6935 if (sp != stack_start) {
6936 handle_stack_args (cfg, stack_start, sp - stack_start);
6938 CHECK_UNVERIFIABLE (cfg);
6941 MONO_ADD_INS (bblock, ins);
6943 start_new_bblock = 1;
6944 inline_costs += BRANCH_COST;
6951 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6952 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6953 guint32 opsize = is_short ? 1 : 4;
6955 CHECK_OPSIZE (opsize);
6957 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6960 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6965 GET_BBLOCK (cfg, tblock, target);
6966 link_bblock (cfg, bblock, tblock);
6967 GET_BBLOCK (cfg, tblock, ip);
6968 link_bblock (cfg, bblock, tblock);
6970 if (sp != stack_start) {
6971 handle_stack_args (cfg, stack_start, sp - stack_start);
6972 CHECK_UNVERIFIABLE (cfg);
6975 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6976 cmp->sreg1 = sp [0]->dreg;
6977 type_from_op (cmp, sp [0], NULL);
6980 #if SIZEOF_REGISTER == 4
6981 if (cmp->opcode == OP_LCOMPARE_IMM) {
6982 /* Convert it to OP_LCOMPARE */
6983 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6984 ins->type = STACK_I8;
6985 ins->dreg = alloc_dreg (cfg, STACK_I8);
6987 MONO_ADD_INS (bblock, ins);
6988 cmp->opcode = OP_LCOMPARE;
6989 cmp->sreg2 = ins->dreg;
6992 MONO_ADD_INS (bblock, cmp);
6994 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
6995 type_from_op (ins, sp [0], NULL);
6996 MONO_ADD_INS (bblock, ins);
6997 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
6998 GET_BBLOCK (cfg, tblock, target);
6999 ins->inst_true_bb = tblock;
7000 GET_BBLOCK (cfg, tblock, ip);
7001 ins->inst_false_bb = tblock;
7002 start_new_bblock = 2;
7005 inline_costs += BRANCH_COST;
7020 MONO_INST_NEW (cfg, ins, *ip);
7022 target = ip + 4 + (gint32)read32(ip);
7028 inline_costs += BRANCH_COST;
7032 MonoBasicBlock **targets;
7033 MonoBasicBlock *default_bblock;
7034 MonoJumpInfoBBTable *table;
7035 int offset_reg = alloc_preg (cfg);
7036 int target_reg = alloc_preg (cfg);
7037 int table_reg = alloc_preg (cfg);
7038 int sum_reg = alloc_preg (cfg);
7039 gboolean use_op_switch;
7043 n = read32 (ip + 1);
7046 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
7050 CHECK_OPSIZE (n * sizeof (guint32));
7051 target = ip + n * sizeof (guint32);
7053 GET_BBLOCK (cfg, default_bblock, target);
7055 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
7056 for (i = 0; i < n; ++i) {
7057 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
7058 targets [i] = tblock;
7062 if (sp != stack_start) {
7064 * Link the current bb with the targets as well, so handle_stack_args
7065 * will set their in_stack correctly.
7067 link_bblock (cfg, bblock, default_bblock);
7068 for (i = 0; i < n; ++i)
7069 link_bblock (cfg, bblock, targets [i]);
7071 handle_stack_args (cfg, stack_start, sp - stack_start);
7073 CHECK_UNVERIFIABLE (cfg);
7076 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
7077 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
7080 for (i = 0; i < n; ++i)
7081 link_bblock (cfg, bblock, targets [i]);
7083 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
7084 table->table = targets;
7085 table->table_size = n;
7087 use_op_switch = FALSE;
7089 /* ARM implements SWITCH statements differently */
7090 /* FIXME: Make it use the generic implementation */
7091 if (!cfg->compile_aot)
7092 use_op_switch = TRUE;
7095 if (COMPILE_LLVM (cfg))
7096 use_op_switch = TRUE;
7098 cfg->cbb->has_jump_table = 1;
7100 if (use_op_switch) {
7101 MONO_INST_NEW (cfg, ins, OP_SWITCH);
7102 ins->sreg1 = src1->dreg;
7103 ins->inst_p0 = table;
7104 ins->inst_many_bb = targets;
7105 ins->klass = GUINT_TO_POINTER (n);
7106 MONO_ADD_INS (cfg->cbb, ins);
7108 if (sizeof (gpointer) == 8)
7109 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
7111 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
7113 #if SIZEOF_REGISTER == 8
7114 /* The upper word might not be zero, and we add it to a 64 bit address later */
7115 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
7118 if (cfg->compile_aot) {
7119 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
7121 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
7122 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
7123 ins->inst_p0 = table;
7124 ins->dreg = table_reg;
7125 MONO_ADD_INS (cfg->cbb, ins);
7128 /* FIXME: Use load_memindex */
7129 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
7130 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
7131 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
7133 start_new_bblock = 1;
7134 inline_costs += (BRANCH_COST * 2);
7154 dreg = alloc_freg (cfg);
7157 dreg = alloc_lreg (cfg);
7160 dreg = alloc_preg (cfg);
7163 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
7164 ins->type = ldind_type [*ip - CEE_LDIND_I1];
7165 ins->flags |= ins_flag;
7167 MONO_ADD_INS (bblock, ins);
7182 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
7183 ins->flags |= ins_flag;
7185 MONO_ADD_INS (bblock, ins);
7187 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0))) {
7188 MonoInst *dummy_use;
7189 /* insert call to write barrier */
7190 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
7191 mono_emit_method_call (cfg, write_barrier, sp, NULL);
7192 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
7202 MONO_INST_NEW (cfg, ins, (*ip));
7204 ins->sreg1 = sp [0]->dreg;
7205 ins->sreg2 = sp [1]->dreg;
7206 type_from_op (ins, sp [0], sp [1]);
7208 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7210 /* Use the immediate opcodes if possible */
7211 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
7212 int imm_opcode = mono_op_to_op_imm (ins->opcode);
7213 if (imm_opcode != -1) {
7214 ins->opcode = imm_opcode;
7215 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7218 sp [1]->opcode = OP_NOP;
7222 MONO_ADD_INS ((cfg)->cbb, (ins));
7224 *sp++ = mono_decompose_opcode (cfg, ins);
7241 MONO_INST_NEW (cfg, ins, (*ip));
7243 ins->sreg1 = sp [0]->dreg;
7244 ins->sreg2 = sp [1]->dreg;
7245 type_from_op (ins, sp [0], sp [1]);
7247 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7248 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7250 /* FIXME: Pass opcode to is_inst_imm */
7252 /* Use the immediate opcodes if possible */
7253 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7256 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7257 if (imm_opcode != -1) {
7258 ins->opcode = imm_opcode;
7259 if (sp [1]->opcode == OP_I8CONST) {
7260 #if SIZEOF_REGISTER == 8
7261 ins->inst_imm = sp [1]->inst_l;
7263 ins->inst_ls_word = sp [1]->inst_ls_word;
7264 ins->inst_ms_word = sp [1]->inst_ms_word;
7268 ins->inst_imm = (gssize)(sp [1]->inst_c0);
7271 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7272 if (sp [1]->next == NULL)
7273 sp [1]->opcode = OP_NOP;
7276 MONO_ADD_INS ((cfg)->cbb, (ins));
7278 *sp++ = mono_decompose_opcode (cfg, ins);
7291 case CEE_CONV_OVF_I8:
7292 case CEE_CONV_OVF_U8:
7296 /* Special case this earlier so we have long constants in the IR */
7297 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7298 int data = sp [-1]->inst_c0;
7299 sp [-1]->opcode = OP_I8CONST;
7300 sp [-1]->type = STACK_I8;
7301 #if SIZEOF_REGISTER == 8
7302 if ((*ip) == CEE_CONV_U8)
7303 sp [-1]->inst_c0 = (guint32)data;
7305 sp [-1]->inst_c0 = data;
7307 sp [-1]->inst_ls_word = data;
7308 if ((*ip) == CEE_CONV_U8)
7309 sp [-1]->inst_ms_word = 0;
7311 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7313 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7320 case CEE_CONV_OVF_I4:
7321 case CEE_CONV_OVF_I1:
7322 case CEE_CONV_OVF_I2:
7323 case CEE_CONV_OVF_I:
7324 case CEE_CONV_OVF_U:
7327 if (sp [-1]->type == STACK_R8) {
7328 ADD_UNOP (CEE_CONV_OVF_I8);
7335 case CEE_CONV_OVF_U1:
7336 case CEE_CONV_OVF_U2:
7337 case CEE_CONV_OVF_U4:
7340 if (sp [-1]->type == STACK_R8) {
7341 ADD_UNOP (CEE_CONV_OVF_U8);
7348 case CEE_CONV_OVF_I1_UN:
7349 case CEE_CONV_OVF_I2_UN:
7350 case CEE_CONV_OVF_I4_UN:
7351 case CEE_CONV_OVF_I8_UN:
7352 case CEE_CONV_OVF_U1_UN:
7353 case CEE_CONV_OVF_U2_UN:
7354 case CEE_CONV_OVF_U4_UN:
7355 case CEE_CONV_OVF_U8_UN:
7356 case CEE_CONV_OVF_I_UN:
7357 case CEE_CONV_OVF_U_UN:
7364 CHECK_CFG_EXCEPTION;
7368 case CEE_ADD_OVF_UN:
7370 case CEE_MUL_OVF_UN:
7372 case CEE_SUB_OVF_UN:
7380 token = read32 (ip + 1);
7381 klass = mini_get_class (method, token, generic_context);
7382 CHECK_TYPELOAD (klass);
7384 if (generic_class_is_reference_type (cfg, klass)) {
7385 MonoInst *store, *load;
7386 int dreg = alloc_preg (cfg);
7388 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7389 load->flags |= ins_flag;
7390 MONO_ADD_INS (cfg->cbb, load);
7392 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7393 store->flags |= ins_flag;
7394 MONO_ADD_INS (cfg->cbb, store);
7396 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER) {
7397 MonoInst *dummy_use;
7398 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
7399 mono_emit_method_call (cfg, write_barrier, sp, NULL);
7400 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
7403 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7415 token = read32 (ip + 1);
7416 klass = mini_get_class (method, token, generic_context);
7417 CHECK_TYPELOAD (klass);
7419 /* Optimize the common ldobj+stloc combination */
7429 loc_index = ip [5] - CEE_STLOC_0;
7436 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7437 CHECK_LOCAL (loc_index);
7439 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7440 ins->dreg = cfg->locals [loc_index]->dreg;
7446 /* Optimize the ldobj+stobj combination */
7447 /* The reference case ends up being a load+store anyway */
7448 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7453 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7460 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7469 CHECK_STACK_OVF (1);
7471 n = read32 (ip + 1);
7473 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7474 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7475 ins->type = STACK_OBJ;
7478 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7479 MonoInst *iargs [1];
7481 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7482 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7484 if (cfg->opt & MONO_OPT_SHARED) {
7485 MonoInst *iargs [3];
7487 if (cfg->compile_aot) {
7488 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7490 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7491 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7492 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7493 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7494 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7496 if (bblock->out_of_line) {
7497 MonoInst *iargs [2];
7499 if (image == mono_defaults.corlib) {
7501 * Avoid relocations in AOT and save some space by using a
7502 * version of helper_ldstr specialized to mscorlib.
7504 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7505 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7507 /* Avoid creating the string object */
7508 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7509 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7510 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7514 if (cfg->compile_aot) {
7515 NEW_LDSTRCONST (cfg, ins, image, n);
7517 MONO_ADD_INS (bblock, ins);
7520 NEW_PCONST (cfg, ins, NULL);
7521 ins->type = STACK_OBJ;
7522 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7524 MONO_ADD_INS (bblock, ins);
7533 MonoInst *iargs [2];
7534 MonoMethodSignature *fsig;
7537 MonoInst *vtable_arg = NULL;
7540 token = read32 (ip + 1);
7541 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7544 fsig = mono_method_get_signature (cmethod, image, token);
7548 mono_save_token_info (cfg, image, token, cmethod);
7550 if (!mono_class_init (cmethod->klass))
7553 if (cfg->generic_sharing_context)
7554 context_used = mono_method_check_context_used (cmethod);
7556 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7557 if (check_linkdemand (cfg, method, cmethod))
7559 CHECK_CFG_EXCEPTION;
7560 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7561 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7564 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7565 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7566 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7567 mono_class_vtable (cfg->domain, cmethod->klass);
7568 CHECK_TYPELOAD (cmethod->klass);
7570 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7571 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7574 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7575 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7577 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7579 CHECK_TYPELOAD (cmethod->klass);
7580 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7585 n = fsig->param_count;
7589 * Generate smaller code for the common newobj <exception> instruction in
7590 * argument checking code.
7592 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7593 is_exception_class (cmethod->klass) && n <= 2 &&
7594 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7595 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7596 MonoInst *iargs [3];
7598 g_assert (!vtable_arg);
7602 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7605 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7609 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7614 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7617 g_assert_not_reached ();
7625 /* move the args to allow room for 'this' in the first position */
7631 /* check_call_signature () requires sp[0] to be set */
7632 this_ins.type = STACK_OBJ;
7634 if (check_call_signature (cfg, fsig, sp))
7639 if (mini_class_is_system_array (cmethod->klass)) {
7640 g_assert (!vtable_arg);
7642 *sp = emit_get_rgctx_method (cfg, context_used,
7643 cmethod, MONO_RGCTX_INFO_METHOD);
7645 /* Avoid varargs in the common case */
7646 if (fsig->param_count == 1)
7647 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7648 else if (fsig->param_count == 2)
7649 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7650 else if (fsig->param_count == 3)
7651 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
7653 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7654 } else if (cmethod->string_ctor) {
7655 g_assert (!context_used);
7656 g_assert (!vtable_arg);
7657 /* we simply pass a null pointer */
7658 EMIT_NEW_PCONST (cfg, *sp, NULL);
7659 /* now call the string ctor */
7660 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7662 MonoInst* callvirt_this_arg = NULL;
7664 if (cmethod->klass->valuetype) {
7665 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7666 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7667 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7672 * The code generated by mini_emit_virtual_call () expects
7673 * iargs [0] to be a boxed instance, but luckily the vcall
7674 * will be transformed into a normal call there.
7676 } else if (context_used) {
7677 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
7680 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7682 CHECK_TYPELOAD (cmethod->klass);
7685 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7686 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7687 * As a workaround, we call class cctors before allocating objects.
7689 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7690 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7691 if (cfg->verbose_level > 2)
7692 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7693 class_inits = g_slist_prepend (class_inits, vtable);
7696 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
7699 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
7702 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7704 /* Now call the actual ctor */
7705 /* Avoid virtual calls to ctors if possible */
7706 if (cmethod->klass->marshalbyref)
7707 callvirt_this_arg = sp [0];
7710 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7711 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7712 type_to_eval_stack_type ((cfg), fsig->ret, ins);
7717 CHECK_CFG_EXCEPTION;
7722 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7723 mono_method_check_inlining (cfg, cmethod) &&
7724 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7725 !g_list_find (dont_inline, cmethod)) {
7728 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7729 cfg->real_offset += 5;
7732 inline_costs += costs - 5;
7735 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7737 } else if (context_used &&
7738 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7739 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7740 MonoInst *cmethod_addr;
7742 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7743 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7745 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7748 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7749 callvirt_this_arg, NULL, vtable_arg);
7753 if (alloc == NULL) {
7755 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7756 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7770 token = read32 (ip + 1);
7771 klass = mini_get_class (method, token, generic_context);
7772 CHECK_TYPELOAD (klass);
7773 if (sp [0]->type != STACK_OBJ)
7776 if (cfg->generic_sharing_context)
7777 context_used = mono_class_check_context_used (klass);
7779 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7786 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7788 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7792 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7793 MonoMethod *mono_castclass;
7794 MonoInst *iargs [1];
7797 mono_castclass = mono_marshal_get_castclass (klass);
7800 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7801 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7802 g_assert (costs > 0);
7805 cfg->real_offset += 5;
7810 inline_costs += costs;
7813 ins = handle_castclass (cfg, klass, *sp, context_used);
7814 CHECK_CFG_EXCEPTION;
7824 token = read32 (ip + 1);
7825 klass = mini_get_class (method, token, generic_context);
7826 CHECK_TYPELOAD (klass);
7827 if (sp [0]->type != STACK_OBJ)
7830 if (cfg->generic_sharing_context)
7831 context_used = mono_class_check_context_used (klass);
7833 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7840 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7842 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7846 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7847 MonoMethod *mono_isinst;
7848 MonoInst *iargs [1];
7851 mono_isinst = mono_marshal_get_isinst (klass);
7854 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7855 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7856 g_assert (costs > 0);
7859 cfg->real_offset += 5;
7864 inline_costs += costs;
7867 ins = handle_isinst (cfg, klass, *sp, context_used);
7868 CHECK_CFG_EXCEPTION;
7875 case CEE_UNBOX_ANY: {
7879 token = read32 (ip + 1);
7880 klass = mini_get_class (method, token, generic_context);
7881 CHECK_TYPELOAD (klass);
7883 mono_save_token_info (cfg, image, token, klass);
7885 if (cfg->generic_sharing_context)
7886 context_used = mono_class_check_context_used (klass);
7888 if (generic_class_is_reference_type (cfg, klass)) {
7889 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
7890 if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7891 MonoMethod *mono_castclass;
7892 MonoInst *iargs [1];
7895 mono_castclass = mono_marshal_get_castclass (klass);
7898 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7899 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7901 g_assert (costs > 0);
7904 cfg->real_offset += 5;
7908 inline_costs += costs;
7910 ins = handle_castclass (cfg, klass, *sp, context_used);
7911 CHECK_CFG_EXCEPTION;
7919 if (mono_class_is_nullable (klass)) {
7920 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
7927 ins = handle_unbox (cfg, klass, sp, context_used);
7933 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7946 token = read32 (ip + 1);
7947 klass = mini_get_class (method, token, generic_context);
7948 CHECK_TYPELOAD (klass);
7950 mono_save_token_info (cfg, image, token, klass);
7952 if (cfg->generic_sharing_context)
7953 context_used = mono_class_check_context_used (klass);
7955 if (generic_class_is_reference_type (cfg, klass)) {
7961 if (klass == mono_defaults.void_class)
7963 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7965 /* frequent check in generic code: box (struct), brtrue */
7967 // FIXME: LLVM can't handle the inconsistent bb linking
7968 if (!COMPILE_LLVM (cfg) && !mono_class_is_nullable (klass) &&
7969 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) && (ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S)) {
7970 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7972 MONO_INST_NEW (cfg, ins, OP_BR);
7973 if (*ip == CEE_BRTRUE_S) {
7976 target = ip + 1 + (signed char)(*ip);
7981 target = ip + 4 + (gint)(read32 (ip));
7984 GET_BBLOCK (cfg, tblock, target);
7985 link_bblock (cfg, bblock, tblock);
7986 ins->inst_target_bb = tblock;
7987 GET_BBLOCK (cfg, tblock, ip);
7989 * This leads to some inconsistency, since the two bblocks are
7990 * not really connected, but it is needed for handling stack
7991 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
7992 * FIXME: This should only be needed if sp != stack_start, but that
7993 * doesn't work for some reason (test failure in mcs/tests on x86).
7995 link_bblock (cfg, bblock, tblock);
7996 if (sp != stack_start) {
7997 handle_stack_args (cfg, stack_start, sp - stack_start);
7999 CHECK_UNVERIFIABLE (cfg);
8001 MONO_ADD_INS (bblock, ins);
8002 start_new_bblock = 1;
8006 *sp++ = handle_box (cfg, val, klass, context_used);
8008 CHECK_CFG_EXCEPTION;
8017 token = read32 (ip + 1);
8018 klass = mini_get_class (method, token, generic_context);
8019 CHECK_TYPELOAD (klass);
8021 mono_save_token_info (cfg, image, token, klass);
8023 if (cfg->generic_sharing_context)
8024 context_used = mono_class_check_context_used (klass);
8026 if (mono_class_is_nullable (klass)) {
8029 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
8030 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
8034 ins = handle_unbox (cfg, klass, sp, context_used);
8044 MonoClassField *field;
8048 if (*ip == CEE_STFLD) {
8055 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
8057 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
8060 token = read32 (ip + 1);
8061 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8062 field = mono_method_get_wrapper_data (method, token);
8063 klass = field->parent;
8066 field = mono_field_from_token (image, token, &klass, generic_context);
8070 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8071 FIELD_ACCESS_FAILURE;
8072 mono_class_init (klass);
8074 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
8075 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
8076 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8077 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8080 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
8081 if (*ip == CEE_STFLD) {
8082 if (target_type_is_incompatible (cfg, field->type, sp [1]))
8084 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8085 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
8086 MonoInst *iargs [5];
8089 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8090 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8091 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
8095 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8096 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
8097 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8098 g_assert (costs > 0);
8100 cfg->real_offset += 5;
8103 inline_costs += costs;
8105 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
8110 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8112 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
8113 if (sp [0]->opcode != OP_LDADDR)
8114 store->flags |= MONO_INST_FAULT;
8116 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
8117 /* insert call to write barrier */
8118 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
8119 MonoInst *iargs [2], *dummy_use;
8122 dreg = alloc_preg (cfg);
8123 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8125 mono_emit_method_call (cfg, write_barrier, iargs, NULL);
8127 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
8130 store->flags |= ins_flag;
8137 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8138 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
8139 MonoInst *iargs [4];
8142 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8143 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8144 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
8145 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8146 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
8147 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8149 g_assert (costs > 0);
8151 cfg->real_offset += 5;
8155 inline_costs += costs;
8157 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
8161 if (sp [0]->type == STACK_VTYPE) {
8164 /* Have to compute the address of the variable */
8166 var = get_vreg_to_inst (cfg, sp [0]->dreg);
8168 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
8170 g_assert (var->klass == klass);
8172 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
8176 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8178 if (*ip == CEE_LDFLDA) {
8179 dreg = alloc_preg (cfg);
8181 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8182 ins->klass = mono_class_from_mono_type (field->type);
8183 ins->type = STACK_MP;
8188 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
8189 load->flags |= ins_flag;
8190 if (sp [0]->opcode != OP_LDADDR)
8191 load->flags |= MONO_INST_FAULT;
8202 MonoClassField *field;
8203 gpointer addr = NULL;
8204 gboolean is_special_static;
8207 token = read32 (ip + 1);
8209 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8210 field = mono_method_get_wrapper_data (method, token);
8211 klass = field->parent;
8214 field = mono_field_from_token (image, token, &klass, generic_context);
8217 mono_class_init (klass);
8218 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8219 FIELD_ACCESS_FAILURE;
8221 /* if the class is Critical then transparent code cannot access it's fields */
8222 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8223 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8226 * We can only support shared generic static
8227 * field access on architectures where the
8228 * trampoline code has been extended to handle
8229 * the generic class init.
8231 #ifndef MONO_ARCH_VTABLE_REG
8232 GENERIC_SHARING_FAILURE (*ip);
8235 if (cfg->generic_sharing_context)
8236 context_used = mono_class_check_context_used (klass);
8238 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
8240 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8241 * to be called here.
8243 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8244 mono_class_vtable (cfg->domain, klass);
8245 CHECK_TYPELOAD (klass);
8247 mono_domain_lock (cfg->domain);
8248 if (cfg->domain->special_static_fields)
8249 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8250 mono_domain_unlock (cfg->domain);
8252 is_special_static = mono_class_field_is_special_static (field);
8254 /* Generate IR to compute the field address */
8255 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
8257 * Fast access to TLS data
8258 * Inline version of get_thread_static_data () in
8262 int idx, static_data_reg, array_reg, dreg;
8263 MonoInst *thread_ins;
8265 // offset &= 0x7fffffff;
8266 // idx = (offset >> 24) - 1;
8267 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
8269 thread_ins = mono_get_thread_intrinsic (cfg);
8270 MONO_ADD_INS (cfg->cbb, thread_ins);
8271 static_data_reg = alloc_ireg (cfg);
8272 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
8274 if (cfg->compile_aot) {
8275 int offset_reg, offset2_reg, idx_reg;
8277 /* For TLS variables, this will return the TLS offset */
8278 EMIT_NEW_SFLDACONST (cfg, ins, field);
8279 offset_reg = ins->dreg;
8280 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
8281 idx_reg = alloc_ireg (cfg);
8282 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
8283 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
8284 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
8285 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
8286 array_reg = alloc_ireg (cfg);
8287 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
8288 offset2_reg = alloc_ireg (cfg);
8289 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
8290 dreg = alloc_ireg (cfg);
8291 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
8293 offset = (gsize)addr & 0x7fffffff;
8294 idx = (offset >> 24) - 1;
8296 array_reg = alloc_ireg (cfg);
8297 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
8298 dreg = alloc_ireg (cfg);
8299 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
8301 } else if ((cfg->opt & MONO_OPT_SHARED) ||
8302 (cfg->compile_aot && is_special_static) ||
8303 (context_used && is_special_static)) {
8304 MonoInst *iargs [2];
8306 g_assert (field->parent);
8307 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8309 iargs [1] = emit_get_rgctx_field (cfg, context_used,
8310 field, MONO_RGCTX_INFO_CLASS_FIELD);
8312 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8314 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8315 } else if (context_used) {
8316 MonoInst *static_data;
8319 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8320 method->klass->name_space, method->klass->name, method->name,
8321 depth, field->offset);
8324 if (mono_class_needs_cctor_run (klass, method))
8325 emit_generic_class_init (cfg, klass);
8328 * The pointer we're computing here is
8330 * super_info.static_data + field->offset
8332 static_data = emit_get_rgctx_klass (cfg, context_used,
8333 klass, MONO_RGCTX_INFO_STATIC_DATA);
8335 if (field->offset == 0) {
8338 int addr_reg = mono_alloc_preg (cfg);
8339 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8341 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8342 MonoInst *iargs [2];
8344 g_assert (field->parent);
8345 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8346 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8347 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8349 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8351 CHECK_TYPELOAD (klass);
8353 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8354 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8355 if (cfg->verbose_level > 2)
8356 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8357 class_inits = g_slist_prepend (class_inits, vtable);
8359 if (cfg->run_cctors) {
8361 /* This makes so that inline cannot trigger */
8362 /* .cctors: too many apps depend on them */
8363 /* running with a specific order... */
8364 if (! vtable->initialized)
8366 ex = mono_runtime_class_init_full (vtable, FALSE);
8368 set_exception_object (cfg, ex);
8369 goto exception_exit;
8373 addr = (char*)vtable->data + field->offset;
8375 if (cfg->compile_aot)
8376 EMIT_NEW_SFLDACONST (cfg, ins, field);
8378 EMIT_NEW_PCONST (cfg, ins, addr);
8380 MonoInst *iargs [1];
8381 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8382 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8386 /* Generate IR to do the actual load/store operation */
8388 if (*ip == CEE_LDSFLDA) {
8389 ins->klass = mono_class_from_mono_type (field->type);
8390 ins->type = STACK_PTR;
8392 } else if (*ip == CEE_STSFLD) {
8397 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
8398 store->flags |= ins_flag;
8400 gboolean is_const = FALSE;
8401 MonoVTable *vtable = NULL;
8403 if (!context_used) {
8404 vtable = mono_class_vtable (cfg->domain, klass);
8405 CHECK_TYPELOAD (klass);
8407 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8408 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8409 gpointer addr = (char*)vtable->data + field->offset;
8410 int ro_type = field->type->type;
8411 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
8412 ro_type = mono_class_enum_basetype (field->type->data.klass)->type;
8414 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8417 case MONO_TYPE_BOOLEAN:
8419 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8423 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8426 case MONO_TYPE_CHAR:
8428 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8432 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8437 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8441 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8447 case MONO_TYPE_FNPTR:
8448 #ifndef HAVE_MOVING_COLLECTOR
8449 case MONO_TYPE_STRING:
8450 case MONO_TYPE_OBJECT:
8451 case MONO_TYPE_CLASS:
8452 case MONO_TYPE_SZARRAY:
8453 case MONO_TYPE_ARRAY:
8455 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8456 type_to_eval_stack_type ((cfg), field->type, *sp);
8461 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8466 case MONO_TYPE_VALUETYPE:
8476 CHECK_STACK_OVF (1);
8478 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8479 load->flags |= ins_flag;
8492 token = read32 (ip + 1);
8493 klass = mini_get_class (method, token, generic_context);
8494 CHECK_TYPELOAD (klass);
8495 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8496 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8497 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
8498 generic_class_is_reference_type (cfg, klass)) {
8499 MonoInst *dummy_use;
8500 /* insert call to write barrier */
8501 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
8502 mono_emit_method_call (cfg, write_barrier, sp, NULL);
8503 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
8515 const char *data_ptr;
8517 guint32 field_token;
8523 token = read32 (ip + 1);
8525 klass = mini_get_class (method, token, generic_context);
8526 CHECK_TYPELOAD (klass);
8528 if (cfg->generic_sharing_context)
8529 context_used = mono_class_check_context_used (klass);
8531 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
8532 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
8533 ins->sreg1 = sp [0]->dreg;
8534 ins->type = STACK_I4;
8535 ins->dreg = alloc_ireg (cfg);
8536 MONO_ADD_INS (cfg->cbb, ins);
8537 *sp = mono_decompose_opcode (cfg, ins);
8542 MonoClass *array_class = mono_array_class_get (klass, 1);
8543 /* FIXME: we cannot get a managed
8544 allocator because we can't get the
8545 open generic class's vtable. We
8546 have the same problem in
8547 handle_alloc(). This
8548 needs to be solved so that we can
8549 have managed allocs of shared
8552 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
8553 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
8555 MonoMethod *managed_alloc = NULL;
8557 /* FIXME: Decompose later to help abcrem */
8560 args [0] = emit_get_rgctx_klass (cfg, context_used,
8561 array_class, MONO_RGCTX_INFO_VTABLE);
8566 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
8568 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8570 if (cfg->opt & MONO_OPT_SHARED) {
8571 /* Decompose now to avoid problems with references to the domainvar */
8572 MonoInst *iargs [3];
8574 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8575 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8578 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8580 /* Decompose later since it is needed by abcrem */
8581 MonoClass *array_type = mono_array_class_get (klass, 1);
8582 mono_class_vtable (cfg->domain, array_type);
8583 CHECK_TYPELOAD (array_type);
8585 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8586 ins->dreg = alloc_preg (cfg);
8587 ins->sreg1 = sp [0]->dreg;
8588 ins->inst_newa_class = klass;
8589 ins->type = STACK_OBJ;
8591 MONO_ADD_INS (cfg->cbb, ins);
8592 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8593 cfg->cbb->has_array_access = TRUE;
8595 /* Needed so mono_emit_load_get_addr () gets called */
8596 mono_get_got_var (cfg);
8606 * we inline/optimize the initialization sequence if possible.
8607 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8608 * for small sizes open code the memcpy
8609 * ensure the rva field is big enough
8611 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8612 MonoMethod *memcpy_method = get_memcpy_method ();
8613 MonoInst *iargs [3];
8614 int add_reg = alloc_preg (cfg);
8616 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8617 if (cfg->compile_aot) {
8618 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8620 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8622 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8623 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8632 if (sp [0]->type != STACK_OBJ)
8635 dreg = alloc_preg (cfg);
8636 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8637 ins->dreg = alloc_preg (cfg);
8638 ins->sreg1 = sp [0]->dreg;
8639 ins->type = STACK_I4;
8640 /* This flag will be inherited by the decomposition */
8641 ins->flags |= MONO_INST_FAULT;
8642 MONO_ADD_INS (cfg->cbb, ins);
8643 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8644 cfg->cbb->has_array_access = TRUE;
8652 if (sp [0]->type != STACK_OBJ)
8655 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8657 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8658 CHECK_TYPELOAD (klass);
8659 /* we need to make sure that this array is exactly the type it needs
8660 * to be for correctness. the wrappers are lax with their usage
8661 * so we need to ignore them here
8663 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
8664 MonoClass *array_class = mono_array_class_get (klass, 1);
8665 mini_emit_check_array_type (cfg, sp [0], array_class);
8666 CHECK_TYPELOAD (array_class);
8670 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8685 case CEE_LDELEM_REF: {
8691 if (*ip == CEE_LDELEM) {
8693 token = read32 (ip + 1);
8694 klass = mini_get_class (method, token, generic_context);
8695 CHECK_TYPELOAD (klass);
8696 mono_class_init (klass);
8699 klass = array_access_to_klass (*ip);
8701 if (sp [0]->type != STACK_OBJ)
8704 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8706 if (sp [1]->opcode == OP_ICONST) {
8707 int array_reg = sp [0]->dreg;
8708 int index_reg = sp [1]->dreg;
8709 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8711 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8712 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8714 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8715 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8718 if (*ip == CEE_LDELEM)
8731 case CEE_STELEM_REF:
8738 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8740 if (*ip == CEE_STELEM) {
8742 token = read32 (ip + 1);
8743 klass = mini_get_class (method, token, generic_context);
8744 CHECK_TYPELOAD (klass);
8745 mono_class_init (klass);
8748 klass = array_access_to_klass (*ip);
8750 if (sp [0]->type != STACK_OBJ)
8753 /* storing a NULL doesn't need any of the complex checks in stelemref */
8754 if (generic_class_is_reference_type (cfg, klass) &&
8755 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8756 MonoMethod* helper = mono_marshal_get_stelemref ();
8757 MonoInst *iargs [3];
8759 if (sp [0]->type != STACK_OBJ)
8761 if (sp [2]->type != STACK_OBJ)
8768 mono_emit_method_call (cfg, helper, iargs, NULL);
8770 if (sp [1]->opcode == OP_ICONST) {
8771 int array_reg = sp [0]->dreg;
8772 int index_reg = sp [1]->dreg;
8773 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8775 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8776 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8778 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8779 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8783 if (*ip == CEE_STELEM)
8790 case CEE_CKFINITE: {
8794 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8795 ins->sreg1 = sp [0]->dreg;
8796 ins->dreg = alloc_freg (cfg);
8797 ins->type = STACK_R8;
8798 MONO_ADD_INS (bblock, ins);
8800 *sp++ = mono_decompose_opcode (cfg, ins);
8805 case CEE_REFANYVAL: {
8806 MonoInst *src_var, *src;
8808 int klass_reg = alloc_preg (cfg);
8809 int dreg = alloc_preg (cfg);
8812 MONO_INST_NEW (cfg, ins, *ip);
8815 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8816 CHECK_TYPELOAD (klass);
8817 mono_class_init (klass);
8819 if (cfg->generic_sharing_context)
8820 context_used = mono_class_check_context_used (klass);
8823 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8825 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8826 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8827 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8830 MonoInst *klass_ins;
8832 klass_ins = emit_get_rgctx_klass (cfg, context_used,
8833 klass, MONO_RGCTX_INFO_KLASS);
8836 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8837 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8839 mini_emit_class_check (cfg, klass_reg, klass);
8841 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8842 ins->type = STACK_MP;
8847 case CEE_MKREFANY: {
8848 MonoInst *loc, *addr;
8851 MONO_INST_NEW (cfg, ins, *ip);
8854 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8855 CHECK_TYPELOAD (klass);
8856 mono_class_init (klass);
8858 if (cfg->generic_sharing_context)
8859 context_used = mono_class_check_context_used (klass);
8861 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8862 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8865 MonoInst *const_ins;
8866 int type_reg = alloc_preg (cfg);
8868 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
8869 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8870 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8871 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8872 } else if (cfg->compile_aot) {
8873 int const_reg = alloc_preg (cfg);
8874 int type_reg = alloc_preg (cfg);
8876 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8877 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8878 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8879 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8881 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8882 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8884 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8886 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8887 ins->type = STACK_VTYPE;
8888 ins->klass = mono_defaults.typed_reference_class;
8895 MonoClass *handle_class;
8897 CHECK_STACK_OVF (1);
8900 n = read32 (ip + 1);
8902 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
8903 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8904 handle = mono_method_get_wrapper_data (method, n);
8905 handle_class = mono_method_get_wrapper_data (method, n + 1);
8906 if (handle_class == mono_defaults.typehandle_class)
8907 handle = &((MonoClass*)handle)->byval_arg;
8910 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8914 mono_class_init (handle_class);
8915 if (cfg->generic_sharing_context) {
8916 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
8917 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
8918 /* This case handles ldtoken
8919 of an open type, like for
8922 } else if (handle_class == mono_defaults.typehandle_class) {
8923 /* If we get a MONO_TYPE_CLASS
8924 then we need to provide the
8926 instantiation of it. */
8927 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8930 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8931 } else if (handle_class == mono_defaults.fieldhandle_class)
8932 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8933 else if (handle_class == mono_defaults.methodhandle_class)
8934 context_used = mono_method_check_context_used (handle);
8936 g_assert_not_reached ();
8939 if ((cfg->opt & MONO_OPT_SHARED) &&
8940 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
8941 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
8942 MonoInst *addr, *vtvar, *iargs [3];
8943 int method_context_used;
8945 if (cfg->generic_sharing_context)
8946 method_context_used = mono_method_check_context_used (method);
8948 method_context_used = 0;
8950 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8952 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8953 EMIT_NEW_ICONST (cfg, iargs [1], n);
8954 if (method_context_used) {
8955 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
8956 method, MONO_RGCTX_INFO_METHOD);
8957 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
8959 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
8960 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
8962 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8964 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8966 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8968 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
8969 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
8970 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
8971 (cmethod->klass == mono_defaults.monotype_class->parent) &&
8972 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
8973 MonoClass *tclass = mono_class_from_mono_type (handle);
8975 mono_class_init (tclass);
8977 ins = emit_get_rgctx_klass (cfg, context_used,
8978 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
8979 } else if (cfg->compile_aot) {
8980 if (method->wrapper_type) {
8981 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
8982 /* Special case for static synchronized wrappers */
8983 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
8985 /* FIXME: n is not a normal token */
8986 cfg->disable_aot = TRUE;
8987 EMIT_NEW_PCONST (cfg, ins, NULL);
8990 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
8993 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
8995 ins->type = STACK_OBJ;
8996 ins->klass = cmethod->klass;
8999 MonoInst *addr, *vtvar;
9001 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9004 if (handle_class == mono_defaults.typehandle_class) {
9005 ins = emit_get_rgctx_klass (cfg, context_used,
9006 mono_class_from_mono_type (handle),
9007 MONO_RGCTX_INFO_TYPE);
9008 } else if (handle_class == mono_defaults.methodhandle_class) {
9009 ins = emit_get_rgctx_method (cfg, context_used,
9010 handle, MONO_RGCTX_INFO_METHOD);
9011 } else if (handle_class == mono_defaults.fieldhandle_class) {
9012 ins = emit_get_rgctx_field (cfg, context_used,
9013 handle, MONO_RGCTX_INFO_CLASS_FIELD);
9015 g_assert_not_reached ();
9017 } else if (cfg->compile_aot) {
9018 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
9020 EMIT_NEW_PCONST (cfg, ins, handle);
9022 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9023 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9024 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9034 MONO_INST_NEW (cfg, ins, OP_THROW);
9036 ins->sreg1 = sp [0]->dreg;
9038 bblock->out_of_line = TRUE;
9039 MONO_ADD_INS (bblock, ins);
9040 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
9041 MONO_ADD_INS (bblock, ins);
9044 link_bblock (cfg, bblock, end_bblock);
9045 start_new_bblock = 1;
9047 case CEE_ENDFINALLY:
9048 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
9049 MONO_ADD_INS (bblock, ins);
9051 start_new_bblock = 1;
9054 * Control will leave the method so empty the stack, otherwise
9055 * the next basic block will start with a nonempty stack.
9057 while (sp != stack_start) {
9065 if (*ip == CEE_LEAVE) {
9067 target = ip + 5 + (gint32)read32(ip + 1);
9070 target = ip + 2 + (signed char)(ip [1]);
9073 /* empty the stack */
9074 while (sp != stack_start) {
9079 * If this leave statement is in a catch block, check for a
9080 * pending exception, and rethrow it if necessary.
9081 * We avoid doing this in runtime invoke wrappers, since those are called
9082 * by native code which excepts the wrapper to catch all exceptions.
9084 for (i = 0; i < header->num_clauses; ++i) {
9085 MonoExceptionClause *clause = &header->clauses [i];
9088 * Use <= in the final comparison to handle clauses with multiple
9089 * leave statements, like in bug #78024.
9090 * The ordering of the exception clauses guarantees that we find the
9093 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
9095 MonoBasicBlock *dont_throw;
9100 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
9103 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
9105 NEW_BBLOCK (cfg, dont_throw);
9108 * Currently, we allways rethrow the abort exception, despite the
9109 * fact that this is not correct. See thread6.cs for an example.
9110 * But propagating the abort exception is more important than
9111 * getting the sematics right.
9113 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
9114 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
9115 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
9117 MONO_START_BB (cfg, dont_throw);
9122 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
9124 MonoExceptionClause *clause;
9126 for (tmp = handlers; tmp; tmp = tmp->next) {
9128 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
9130 link_bblock (cfg, bblock, tblock);
9131 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
9132 ins->inst_target_bb = tblock;
9133 ins->inst_eh_block = clause;
9134 MONO_ADD_INS (bblock, ins);
9135 bblock->has_call_handler = 1;
9136 if (COMPILE_LLVM (cfg)) {
9137 MonoBasicBlock *target_bb;
9140 * Link the finally bblock with the target, since it will
9141 * conceptually branch there.
9142 * FIXME: Have to link the bblock containing the endfinally.
9144 GET_BBLOCK (cfg, target_bb, target);
9145 link_bblock (cfg, tblock, target_bb);
9148 g_list_free (handlers);
9151 MONO_INST_NEW (cfg, ins, OP_BR);
9152 MONO_ADD_INS (bblock, ins);
9153 GET_BBLOCK (cfg, tblock, target);
9154 link_bblock (cfg, bblock, tblock);
9155 ins->inst_target_bb = tblock;
9156 start_new_bblock = 1;
9158 if (*ip == CEE_LEAVE)
9167 * Mono specific opcodes
9169 case MONO_CUSTOM_PREFIX: {
9171 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
9175 case CEE_MONO_ICALL: {
9177 MonoJitICallInfo *info;
9179 token = read32 (ip + 2);
9180 func = mono_method_get_wrapper_data (method, token);
9181 info = mono_find_jit_icall_by_addr (func);
9184 CHECK_STACK (info->sig->param_count);
9185 sp -= info->sig->param_count;
9187 ins = mono_emit_jit_icall (cfg, info->func, sp);
9188 if (!MONO_TYPE_IS_VOID (info->sig->ret))
9192 inline_costs += 10 * num_calls++;
9196 case CEE_MONO_LDPTR: {
9199 CHECK_STACK_OVF (1);
9201 token = read32 (ip + 2);
9203 ptr = mono_method_get_wrapper_data (method, token);
9204 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
9205 MonoJitICallInfo *callinfo;
9206 const char *icall_name;
9208 icall_name = method->name + strlen ("__icall_wrapper_");
9209 g_assert (icall_name);
9210 callinfo = mono_find_jit_icall_by_name (icall_name);
9211 g_assert (callinfo);
9213 if (ptr == callinfo->func) {
9214 /* Will be transformed into an AOTCONST later */
9215 EMIT_NEW_PCONST (cfg, ins, ptr);
9221 /* FIXME: Generalize this */
9222 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
9223 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
9228 EMIT_NEW_PCONST (cfg, ins, ptr);
9231 inline_costs += 10 * num_calls++;
9232 /* Can't embed random pointers into AOT code */
9233 cfg->disable_aot = 1;
9236 case CEE_MONO_ICALL_ADDR: {
9237 MonoMethod *cmethod;
9240 CHECK_STACK_OVF (1);
9242 token = read32 (ip + 2);
9244 cmethod = mono_method_get_wrapper_data (method, token);
9246 if (cfg->compile_aot) {
9247 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
9249 ptr = mono_lookup_internal_call (cmethod);
9251 EMIT_NEW_PCONST (cfg, ins, ptr);
9257 case CEE_MONO_VTADDR: {
9258 MonoInst *src_var, *src;
9264 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9265 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
9270 case CEE_MONO_NEWOBJ: {
9271 MonoInst *iargs [2];
9273 CHECK_STACK_OVF (1);
9275 token = read32 (ip + 2);
9276 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9277 mono_class_init (klass);
9278 NEW_DOMAINCONST (cfg, iargs [0]);
9279 MONO_ADD_INS (cfg->cbb, iargs [0]);
9280 NEW_CLASSCONST (cfg, iargs [1], klass);
9281 MONO_ADD_INS (cfg->cbb, iargs [1]);
9282 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
9284 inline_costs += 10 * num_calls++;
9287 case CEE_MONO_OBJADDR:
9290 MONO_INST_NEW (cfg, ins, OP_MOVE);
9291 ins->dreg = alloc_preg (cfg);
9292 ins->sreg1 = sp [0]->dreg;
9293 ins->type = STACK_MP;
9294 MONO_ADD_INS (cfg->cbb, ins);
9298 case CEE_MONO_LDNATIVEOBJ:
9300 * Similar to LDOBJ, but instead load the unmanaged
9301 * representation of the vtype to the stack.
9306 token = read32 (ip + 2);
9307 klass = mono_method_get_wrapper_data (method, token);
9308 g_assert (klass->valuetype);
9309 mono_class_init (klass);
9312 MonoInst *src, *dest, *temp;
9315 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
9316 temp->backend.is_pinvoke = 1;
9317 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
9318 mini_emit_stobj (cfg, dest, src, klass, TRUE);
9320 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
9321 dest->type = STACK_VTYPE;
9322 dest->klass = klass;
9328 case CEE_MONO_RETOBJ: {
9330 * Same as RET, but return the native representation of a vtype
9333 g_assert (cfg->ret);
9334 g_assert (mono_method_signature (method)->pinvoke);
9339 token = read32 (ip + 2);
9340 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9342 if (!cfg->vret_addr) {
9343 g_assert (cfg->ret_var_is_local);
9345 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
9347 EMIT_NEW_RETLOADA (cfg, ins);
9349 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
9351 if (sp != stack_start)
9354 MONO_INST_NEW (cfg, ins, OP_BR);
9355 ins->inst_target_bb = end_bblock;
9356 MONO_ADD_INS (bblock, ins);
9357 link_bblock (cfg, bblock, end_bblock);
9358 start_new_bblock = 1;
9362 case CEE_MONO_CISINST:
9363 case CEE_MONO_CCASTCLASS: {
9368 token = read32 (ip + 2);
9369 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9370 if (ip [1] == CEE_MONO_CISINST)
9371 ins = handle_cisinst (cfg, klass, sp [0]);
9373 ins = handle_ccastclass (cfg, klass, sp [0]);
9379 case CEE_MONO_SAVE_LMF:
9380 case CEE_MONO_RESTORE_LMF:
9381 #ifdef MONO_ARCH_HAVE_LMF_OPS
9382 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9383 MONO_ADD_INS (bblock, ins);
9384 cfg->need_lmf_area = TRUE;
9388 case CEE_MONO_CLASSCONST:
9389 CHECK_STACK_OVF (1);
9391 token = read32 (ip + 2);
9392 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9395 inline_costs += 10 * num_calls++;
9397 case CEE_MONO_NOT_TAKEN:
9398 bblock->out_of_line = TRUE;
9402 CHECK_STACK_OVF (1);
9404 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9405 ins->dreg = alloc_preg (cfg);
9406 ins->inst_offset = (gint32)read32 (ip + 2);
9407 ins->type = STACK_PTR;
9408 MONO_ADD_INS (bblock, ins);
9412 case CEE_MONO_DYN_CALL: {
9415 /* It would be easier to call a trampoline, but that would put an
9416 * extra frame on the stack, confusing exception handling. So
9417 * implement it inline using an opcode for now.
9420 if (!cfg->dyn_call_var) {
9421 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9422 /* prevent it from being register allocated */
9423 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
9426 /* Has to use a call inst since it local regalloc expects it */
9427 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
9428 ins = (MonoInst*)call;
9430 ins->sreg1 = sp [0]->dreg;
9431 ins->sreg2 = sp [1]->dreg;
9432 MONO_ADD_INS (bblock, ins);
9434 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
9435 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
9439 inline_costs += 10 * num_calls++;
9444 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9454 /* somewhat similar to LDTOKEN */
9455 MonoInst *addr, *vtvar;
9456 CHECK_STACK_OVF (1);
9457 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9459 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9460 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9462 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9463 ins->type = STACK_VTYPE;
9464 ins->klass = mono_defaults.argumenthandle_class;
9477 * The following transforms:
9478 * CEE_CEQ into OP_CEQ
9479 * CEE_CGT into OP_CGT
9480 * CEE_CGT_UN into OP_CGT_UN
9481 * CEE_CLT into OP_CLT
9482 * CEE_CLT_UN into OP_CLT_UN
9484 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9486 MONO_INST_NEW (cfg, ins, cmp->opcode);
9488 cmp->sreg1 = sp [0]->dreg;
9489 cmp->sreg2 = sp [1]->dreg;
9490 type_from_op (cmp, sp [0], sp [1]);
9492 if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9493 cmp->opcode = OP_LCOMPARE;
9494 else if (sp [0]->type == STACK_R8)
9495 cmp->opcode = OP_FCOMPARE;
9497 cmp->opcode = OP_ICOMPARE;
9498 MONO_ADD_INS (bblock, cmp);
9499 ins->type = STACK_I4;
9500 ins->dreg = alloc_dreg (cfg, ins->type);
9501 type_from_op (ins, sp [0], sp [1]);
9503 if (cmp->opcode == OP_FCOMPARE) {
9505 * The backends expect the fceq opcodes to do the
9508 cmp->opcode = OP_NOP;
9509 ins->sreg1 = cmp->sreg1;
9510 ins->sreg2 = cmp->sreg2;
9512 MONO_ADD_INS (bblock, ins);
9519 MonoMethod *cil_method;
9520 gboolean needs_static_rgctx_invoke;
9522 CHECK_STACK_OVF (1);
9524 n = read32 (ip + 2);
9525 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9528 mono_class_init (cmethod->klass);
9530 mono_save_token_info (cfg, image, n, cmethod);
9532 if (cfg->generic_sharing_context)
9533 context_used = mono_method_check_context_used (cmethod);
9535 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9537 cil_method = cmethod;
9538 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9539 METHOD_ACCESS_FAILURE;
9541 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9542 if (check_linkdemand (cfg, method, cmethod))
9544 CHECK_CFG_EXCEPTION;
9545 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9546 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9550 * Optimize the common case of ldftn+delegate creation
9552 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9553 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9554 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9556 int invoke_context_used = 0;
9558 invoke = mono_get_delegate_invoke (ctor_method->klass);
9559 if (!invoke || !mono_method_signature (invoke))
9562 if (cfg->generic_sharing_context)
9563 invoke_context_used = mono_method_check_context_used (invoke);
9565 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
9566 /* FIXME: SGEN support */
9567 if (!cfg->gen_write_barriers && invoke_context_used == 0) {
9568 MonoInst *target_ins;
9571 if (cfg->verbose_level > 3)
9572 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9573 target_ins = sp [-1];
9575 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
9576 CHECK_CFG_EXCEPTION;
9585 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
9586 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
9590 inline_costs += 10 * num_calls++;
9593 case CEE_LDVIRTFTN: {
9598 n = read32 (ip + 2);
9599 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9602 mono_class_init (cmethod->klass);
9604 if (cfg->generic_sharing_context)
9605 context_used = mono_method_check_context_used (cmethod);
9607 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9608 if (check_linkdemand (cfg, method, cmethod))
9610 CHECK_CFG_EXCEPTION;
9611 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9612 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9618 args [1] = emit_get_rgctx_method (cfg, context_used,
9619 cmethod, MONO_RGCTX_INFO_METHOD);
9622 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
9624 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
9627 inline_costs += 10 * num_calls++;
9631 CHECK_STACK_OVF (1);
9633 n = read16 (ip + 2);
9635 EMIT_NEW_ARGLOAD (cfg, ins, n);
9640 CHECK_STACK_OVF (1);
9642 n = read16 (ip + 2);
9644 NEW_ARGLOADA (cfg, ins, n);
9645 MONO_ADD_INS (cfg->cbb, ins);
9653 n = read16 (ip + 2);
9655 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9657 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9661 CHECK_STACK_OVF (1);
9663 n = read16 (ip + 2);
9665 EMIT_NEW_LOCLOAD (cfg, ins, n);
9670 unsigned char *tmp_ip;
9671 CHECK_STACK_OVF (1);
9673 n = read16 (ip + 2);
9676 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9682 EMIT_NEW_LOCLOADA (cfg, ins, n);
9691 n = read16 (ip + 2);
9693 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9695 emit_stloc_ir (cfg, sp, header, n);
9702 if (sp != stack_start)
9704 if (cfg->method != method)
9706 * Inlining this into a loop in a parent could lead to
9707 * stack overflows which is different behavior than the
9708 * non-inlined case, thus disable inlining in this case.
9710 goto inline_failure;
9712 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9713 ins->dreg = alloc_preg (cfg);
9714 ins->sreg1 = sp [0]->dreg;
9715 ins->type = STACK_PTR;
9716 MONO_ADD_INS (cfg->cbb, ins);
9718 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9720 ins->flags |= MONO_INST_INIT;
9725 case CEE_ENDFILTER: {
9726 MonoExceptionClause *clause, *nearest;
9727 int cc, nearest_num;
9731 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9733 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9734 ins->sreg1 = (*sp)->dreg;
9735 MONO_ADD_INS (bblock, ins);
9736 start_new_bblock = 1;
9741 for (cc = 0; cc < header->num_clauses; ++cc) {
9742 clause = &header->clauses [cc];
9743 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9744 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9745 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9751 if ((ip - header->code) != nearest->handler_offset)
9756 case CEE_UNALIGNED_:
9757 ins_flag |= MONO_INST_UNALIGNED;
9758 /* FIXME: record alignment? we can assume 1 for now */
9763 ins_flag |= MONO_INST_VOLATILE;
9767 ins_flag |= MONO_INST_TAILCALL;
9768 cfg->flags |= MONO_CFG_HAS_TAIL;
9769 /* Can't inline tail calls at this time */
9770 inline_costs += 100000;
9777 token = read32 (ip + 2);
9778 klass = mini_get_class (method, token, generic_context);
9779 CHECK_TYPELOAD (klass);
9780 if (generic_class_is_reference_type (cfg, klass))
9781 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9783 mini_emit_initobj (cfg, *sp, NULL, klass);
9787 case CEE_CONSTRAINED_:
9789 token = read32 (ip + 2);
9790 if (method->wrapper_type != MONO_WRAPPER_NONE)
9791 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
9793 constrained_call = mono_class_get_full (image, token, generic_context);
9794 CHECK_TYPELOAD (constrained_call);
9799 MonoInst *iargs [3];
9803 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9804 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9805 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9806 /* emit_memset only works when val == 0 */
9807 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9812 if (ip [1] == CEE_CPBLK) {
9813 MonoMethod *memcpy_method = get_memcpy_method ();
9814 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9816 MonoMethod *memset_method = get_memset_method ();
9817 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9827 ins_flag |= MONO_INST_NOTYPECHECK;
9829 ins_flag |= MONO_INST_NORANGECHECK;
9830 /* we ignore the no-nullcheck for now since we
9831 * really do it explicitly only when doing callvirt->call
9837 int handler_offset = -1;
9839 for (i = 0; i < header->num_clauses; ++i) {
9840 MonoExceptionClause *clause = &header->clauses [i];
9841 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9842 handler_offset = clause->handler_offset;
9847 bblock->flags |= BB_EXCEPTION_UNSAFE;
9849 g_assert (handler_offset != -1);
9851 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9852 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9853 ins->sreg1 = load->dreg;
9854 MONO_ADD_INS (bblock, ins);
9856 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
9857 MONO_ADD_INS (bblock, ins);
9860 link_bblock (cfg, bblock, end_bblock);
9861 start_new_bblock = 1;
9869 CHECK_STACK_OVF (1);
9871 token = read32 (ip + 2);
9872 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic) {
9873 MonoType *type = mono_type_create_from_typespec (image, token);
9874 token = mono_type_size (type, &ialign);
9876 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9877 CHECK_TYPELOAD (klass);
9878 mono_class_init (klass);
9879 token = mono_class_value_size (klass, &align);
9881 EMIT_NEW_ICONST (cfg, ins, token);
9886 case CEE_REFANYTYPE: {
9887 MonoInst *src_var, *src;
9893 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9895 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9896 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9897 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9915 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
9925 g_warning ("opcode 0x%02x not handled", *ip);
9929 if (start_new_bblock != 1)
9932 bblock->cil_length = ip - bblock->cil_code;
9933 bblock->next_bb = end_bblock;
9935 if (cfg->method == method && cfg->domainvar) {
9937 MonoInst *get_domain;
9939 cfg->cbb = init_localsbb;
9941 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9942 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9945 get_domain->dreg = alloc_preg (cfg);
9946 MONO_ADD_INS (cfg->cbb, get_domain);
9948 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9949 MONO_ADD_INS (cfg->cbb, store);
9952 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
9953 if (cfg->compile_aot)
9954 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
9955 mono_get_got_var (cfg);
9958 if (cfg->method == method && cfg->got_var)
9959 mono_emit_load_got_addr (cfg);
9964 cfg->cbb = init_localsbb;
9966 for (i = 0; i < header->num_locals; ++i) {
9967 MonoType *ptype = header->locals [i];
9968 int t = ptype->type;
9969 dreg = cfg->locals [i]->dreg;
9971 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
9972 t = mono_class_enum_basetype (ptype->data.klass)->type;
9974 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9975 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
9976 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
9977 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
9978 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
9979 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
9980 MONO_INST_NEW (cfg, ins, OP_R8CONST);
9981 ins->type = STACK_R8;
9982 ins->inst_p0 = (void*)&r8_0;
9983 ins->dreg = alloc_dreg (cfg, STACK_R8);
9984 MONO_ADD_INS (init_localsbb, ins);
9985 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
9986 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
9987 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
9988 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
9990 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9995 if (cfg->init_ref_vars && cfg->method == method) {
9996 /* Emit initialization for ref vars */
9997 // FIXME: Avoid duplication initialization for IL locals.
9998 for (i = 0; i < cfg->num_varinfo; ++i) {
9999 MonoInst *ins = cfg->varinfo [i];
10001 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
10002 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
10006 /* Add a sequence point for method entry/exit events */
10008 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
10009 MONO_ADD_INS (init_localsbb, ins);
10010 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
10011 MONO_ADD_INS (cfg->bb_exit, ins);
10016 if (cfg->method == method) {
10017 MonoBasicBlock *bb;
10018 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10019 bb->region = mono_find_block_region (cfg, bb->real_offset);
10021 mono_create_spvar_for_region (cfg, bb->region);
10022 if (cfg->verbose_level > 2)
10023 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
10027 g_slist_free (class_inits);
10028 dont_inline = g_list_remove (dont_inline, method);
10030 if (inline_costs < 0) {
10033 /* Method is too large */
10034 mname = mono_method_full_name (method, TRUE);
10035 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
10036 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
10038 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10039 mono_basic_block_free (original_bb);
10043 if ((cfg->verbose_level > 2) && (cfg->method == method))
10044 mono_print_code (cfg, "AFTER METHOD-TO-IR");
10046 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10047 mono_basic_block_free (original_bb);
10048 return inline_costs;
10051 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
10058 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
10062 set_exception_type_from_invalid_il (cfg, method, ip);
10066 g_slist_free (class_inits);
10067 mono_basic_block_free (original_bb);
10068 dont_inline = g_list_remove (dont_inline, method);
10069 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10074 store_membase_reg_to_store_membase_imm (int opcode)
10077 case OP_STORE_MEMBASE_REG:
10078 return OP_STORE_MEMBASE_IMM;
10079 case OP_STOREI1_MEMBASE_REG:
10080 return OP_STOREI1_MEMBASE_IMM;
10081 case OP_STOREI2_MEMBASE_REG:
10082 return OP_STOREI2_MEMBASE_IMM;
10083 case OP_STOREI4_MEMBASE_REG:
10084 return OP_STOREI4_MEMBASE_IMM;
10085 case OP_STOREI8_MEMBASE_REG:
10086 return OP_STOREI8_MEMBASE_IMM;
10088 g_assert_not_reached ();
10094 #endif /* DISABLE_JIT */
10097 mono_op_to_op_imm (int opcode)
10101 return OP_IADD_IMM;
10103 return OP_ISUB_IMM;
10105 return OP_IDIV_IMM;
10107 return OP_IDIV_UN_IMM;
10109 return OP_IREM_IMM;
10111 return OP_IREM_UN_IMM;
10113 return OP_IMUL_IMM;
10115 return OP_IAND_IMM;
10119 return OP_IXOR_IMM;
10121 return OP_ISHL_IMM;
10123 return OP_ISHR_IMM;
10125 return OP_ISHR_UN_IMM;
10128 return OP_LADD_IMM;
10130 return OP_LSUB_IMM;
10132 return OP_LAND_IMM;
10136 return OP_LXOR_IMM;
10138 return OP_LSHL_IMM;
10140 return OP_LSHR_IMM;
10142 return OP_LSHR_UN_IMM;
10145 return OP_COMPARE_IMM;
10147 return OP_ICOMPARE_IMM;
10149 return OP_LCOMPARE_IMM;
10151 case OP_STORE_MEMBASE_REG:
10152 return OP_STORE_MEMBASE_IMM;
10153 case OP_STOREI1_MEMBASE_REG:
10154 return OP_STOREI1_MEMBASE_IMM;
10155 case OP_STOREI2_MEMBASE_REG:
10156 return OP_STOREI2_MEMBASE_IMM;
10157 case OP_STOREI4_MEMBASE_REG:
10158 return OP_STOREI4_MEMBASE_IMM;
10160 #if defined(TARGET_X86) || defined (TARGET_AMD64)
10162 return OP_X86_PUSH_IMM;
10163 case OP_X86_COMPARE_MEMBASE_REG:
10164 return OP_X86_COMPARE_MEMBASE_IMM;
10166 #if defined(TARGET_AMD64)
10167 case OP_AMD64_ICOMPARE_MEMBASE_REG:
10168 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10170 case OP_VOIDCALL_REG:
10171 return OP_VOIDCALL;
10179 return OP_LOCALLOC_IMM;
10186 ldind_to_load_membase (int opcode)
10190 return OP_LOADI1_MEMBASE;
10192 return OP_LOADU1_MEMBASE;
10194 return OP_LOADI2_MEMBASE;
10196 return OP_LOADU2_MEMBASE;
10198 return OP_LOADI4_MEMBASE;
10200 return OP_LOADU4_MEMBASE;
10202 return OP_LOAD_MEMBASE;
10203 case CEE_LDIND_REF:
10204 return OP_LOAD_MEMBASE;
10206 return OP_LOADI8_MEMBASE;
10208 return OP_LOADR4_MEMBASE;
10210 return OP_LOADR8_MEMBASE;
10212 g_assert_not_reached ();
10219 stind_to_store_membase (int opcode)
10223 return OP_STOREI1_MEMBASE_REG;
10225 return OP_STOREI2_MEMBASE_REG;
10227 return OP_STOREI4_MEMBASE_REG;
10229 case CEE_STIND_REF:
10230 return OP_STORE_MEMBASE_REG;
10232 return OP_STOREI8_MEMBASE_REG;
10234 return OP_STORER4_MEMBASE_REG;
10236 return OP_STORER8_MEMBASE_REG;
10238 g_assert_not_reached ();
10245 mono_load_membase_to_load_mem (int opcode)
10247 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
10248 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10250 case OP_LOAD_MEMBASE:
10251 return OP_LOAD_MEM;
10252 case OP_LOADU1_MEMBASE:
10253 return OP_LOADU1_MEM;
10254 case OP_LOADU2_MEMBASE:
10255 return OP_LOADU2_MEM;
10256 case OP_LOADI4_MEMBASE:
10257 return OP_LOADI4_MEM;
10258 case OP_LOADU4_MEMBASE:
10259 return OP_LOADU4_MEM;
10260 #if SIZEOF_REGISTER == 8
10261 case OP_LOADI8_MEMBASE:
10262 return OP_LOADI8_MEM;
10271 op_to_op_dest_membase (int store_opcode, int opcode)
10273 #if defined(TARGET_X86)
10274 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
10279 return OP_X86_ADD_MEMBASE_REG;
10281 return OP_X86_SUB_MEMBASE_REG;
10283 return OP_X86_AND_MEMBASE_REG;
10285 return OP_X86_OR_MEMBASE_REG;
10287 return OP_X86_XOR_MEMBASE_REG;
10290 return OP_X86_ADD_MEMBASE_IMM;
10293 return OP_X86_SUB_MEMBASE_IMM;
10296 return OP_X86_AND_MEMBASE_IMM;
10299 return OP_X86_OR_MEMBASE_IMM;
10302 return OP_X86_XOR_MEMBASE_IMM;
10308 #if defined(TARGET_AMD64)
10309 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
10314 return OP_X86_ADD_MEMBASE_REG;
10316 return OP_X86_SUB_MEMBASE_REG;
10318 return OP_X86_AND_MEMBASE_REG;
10320 return OP_X86_OR_MEMBASE_REG;
10322 return OP_X86_XOR_MEMBASE_REG;
10324 return OP_X86_ADD_MEMBASE_IMM;
10326 return OP_X86_SUB_MEMBASE_IMM;
10328 return OP_X86_AND_MEMBASE_IMM;
10330 return OP_X86_OR_MEMBASE_IMM;
10332 return OP_X86_XOR_MEMBASE_IMM;
10334 return OP_AMD64_ADD_MEMBASE_REG;
10336 return OP_AMD64_SUB_MEMBASE_REG;
10338 return OP_AMD64_AND_MEMBASE_REG;
10340 return OP_AMD64_OR_MEMBASE_REG;
10342 return OP_AMD64_XOR_MEMBASE_REG;
10345 return OP_AMD64_ADD_MEMBASE_IMM;
10348 return OP_AMD64_SUB_MEMBASE_IMM;
10351 return OP_AMD64_AND_MEMBASE_IMM;
10354 return OP_AMD64_OR_MEMBASE_IMM;
10357 return OP_AMD64_XOR_MEMBASE_IMM;
10367 op_to_op_store_membase (int store_opcode, int opcode)
10369 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10372 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10373 return OP_X86_SETEQ_MEMBASE;
10375 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10376 return OP_X86_SETNE_MEMBASE;
10384 op_to_op_src1_membase (int load_opcode, int opcode)
10387 /* FIXME: This has sign extension issues */
10389 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10390 return OP_X86_COMPARE_MEMBASE8_IMM;
10393 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10398 return OP_X86_PUSH_MEMBASE;
10399 case OP_COMPARE_IMM:
10400 case OP_ICOMPARE_IMM:
10401 return OP_X86_COMPARE_MEMBASE_IMM;
10404 return OP_X86_COMPARE_MEMBASE_REG;
10408 #ifdef TARGET_AMD64
10409 /* FIXME: This has sign extension issues */
10411 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10412 return OP_X86_COMPARE_MEMBASE8_IMM;
10417 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10418 return OP_X86_PUSH_MEMBASE;
10420 /* FIXME: This only works for 32 bit immediates
10421 case OP_COMPARE_IMM:
10422 case OP_LCOMPARE_IMM:
10423 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10424 return OP_AMD64_COMPARE_MEMBASE_IMM;
10426 case OP_ICOMPARE_IMM:
10427 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10428 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10432 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10433 return OP_AMD64_COMPARE_MEMBASE_REG;
10436 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10437 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10446 op_to_op_src2_membase (int load_opcode, int opcode)
10449 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10455 return OP_X86_COMPARE_REG_MEMBASE;
10457 return OP_X86_ADD_REG_MEMBASE;
10459 return OP_X86_SUB_REG_MEMBASE;
10461 return OP_X86_AND_REG_MEMBASE;
10463 return OP_X86_OR_REG_MEMBASE;
10465 return OP_X86_XOR_REG_MEMBASE;
10469 #ifdef TARGET_AMD64
10470 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
10473 return OP_AMD64_ICOMPARE_REG_MEMBASE;
10475 return OP_X86_ADD_REG_MEMBASE;
10477 return OP_X86_SUB_REG_MEMBASE;
10479 return OP_X86_AND_REG_MEMBASE;
10481 return OP_X86_OR_REG_MEMBASE;
10483 return OP_X86_XOR_REG_MEMBASE;
10485 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
10489 return OP_AMD64_COMPARE_REG_MEMBASE;
10491 return OP_AMD64_ADD_REG_MEMBASE;
10493 return OP_AMD64_SUB_REG_MEMBASE;
10495 return OP_AMD64_AND_REG_MEMBASE;
10497 return OP_AMD64_OR_REG_MEMBASE;
10499 return OP_AMD64_XOR_REG_MEMBASE;
10508 mono_op_to_op_imm_noemul (int opcode)
10511 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10517 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10525 return mono_op_to_op_imm (opcode);
10529 #ifndef DISABLE_JIT
10532 * mono_handle_global_vregs:
10534 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10538 mono_handle_global_vregs (MonoCompile *cfg)
10540 gint32 *vreg_to_bb;
10541 MonoBasicBlock *bb;
10544 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
10546 #ifdef MONO_ARCH_SIMD_INTRINSICS
10547 if (cfg->uses_simd_intrinsics)
10548 mono_simd_simplify_indirection (cfg);
10551 /* Find local vregs used in more than one bb */
10552 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10553 MonoInst *ins = bb->code;
10554 int block_num = bb->block_num;
10556 if (cfg->verbose_level > 2)
10557 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
10560 for (; ins; ins = ins->next) {
10561 const char *spec = INS_INFO (ins->opcode);
10562 int regtype = 0, regindex;
10565 if (G_UNLIKELY (cfg->verbose_level > 2))
10566 mono_print_ins (ins);
10568 g_assert (ins->opcode >= MONO_CEE_LAST);
10570 for (regindex = 0; regindex < 4; regindex ++) {
10573 if (regindex == 0) {
10574 regtype = spec [MONO_INST_DEST];
10575 if (regtype == ' ')
10578 } else if (regindex == 1) {
10579 regtype = spec [MONO_INST_SRC1];
10580 if (regtype == ' ')
10583 } else if (regindex == 2) {
10584 regtype = spec [MONO_INST_SRC2];
10585 if (regtype == ' ')
10588 } else if (regindex == 3) {
10589 regtype = spec [MONO_INST_SRC3];
10590 if (regtype == ' ')
10595 #if SIZEOF_REGISTER == 4
10596 /* In the LLVM case, the long opcodes are not decomposed */
10597 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
10599 * Since some instructions reference the original long vreg,
10600 * and some reference the two component vregs, it is quite hard
10601 * to determine when it needs to be global. So be conservative.
10603 if (!get_vreg_to_inst (cfg, vreg)) {
10604 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10606 if (cfg->verbose_level > 2)
10607 printf ("LONG VREG R%d made global.\n", vreg);
10611 * Make the component vregs volatile since the optimizations can
10612 * get confused otherwise.
10614 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
10615 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
10619 g_assert (vreg != -1);
10621 prev_bb = vreg_to_bb [vreg];
10622 if (prev_bb == 0) {
10623 /* 0 is a valid block num */
10624 vreg_to_bb [vreg] = block_num + 1;
10625 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
10626 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
10629 if (!get_vreg_to_inst (cfg, vreg)) {
10630 if (G_UNLIKELY (cfg->verbose_level > 2))
10631 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
10635 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
10638 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10641 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
10644 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
10647 g_assert_not_reached ();
10651 /* Flag as having been used in more than one bb */
10652 vreg_to_bb [vreg] = -1;
10658 /* If a variable is used in only one bblock, convert it into a local vreg */
10659 for (i = 0; i < cfg->num_varinfo; i++) {
10660 MonoInst *var = cfg->varinfo [i];
10661 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
10663 switch (var->type) {
10669 #if SIZEOF_REGISTER == 8
10672 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
10673 /* Enabling this screws up the fp stack on x86 */
10676 /* Arguments are implicitly global */
10677 /* Putting R4 vars into registers doesn't work currently */
10678 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
10680 * Make that the variable's liveness interval doesn't contain a call, since
10681 * that would cause the lvreg to be spilled, making the whole optimization
10684 /* This is too slow for JIT compilation */
10686 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
10688 int def_index, call_index, ins_index;
10689 gboolean spilled = FALSE;
10694 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
10695 const char *spec = INS_INFO (ins->opcode);
10697 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
10698 def_index = ins_index;
10700 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
10701 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
10702 if (call_index > def_index) {
10708 if (MONO_IS_CALL (ins))
10709 call_index = ins_index;
10719 if (G_UNLIKELY (cfg->verbose_level > 2))
10720 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10721 var->flags |= MONO_INST_IS_DEAD;
10722 cfg->vreg_to_inst [var->dreg] = NULL;
10729 * Compress the varinfo and vars tables so the liveness computation is faster and
10730 * takes up less space.
10733 for (i = 0; i < cfg->num_varinfo; ++i) {
10734 MonoInst *var = cfg->varinfo [i];
10735 if (pos < i && cfg->locals_start == i)
10736 cfg->locals_start = pos;
10737 if (!(var->flags & MONO_INST_IS_DEAD)) {
10739 cfg->varinfo [pos] = cfg->varinfo [i];
10740 cfg->varinfo [pos]->inst_c0 = pos;
10741 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10742 cfg->vars [pos].idx = pos;
10743 #if SIZEOF_REGISTER == 4
10744 if (cfg->varinfo [pos]->type == STACK_I8) {
10745 /* Modify the two component vars too */
10748 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10749 var1->inst_c0 = pos;
10750 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10751 var1->inst_c0 = pos;
10758 cfg->num_varinfo = pos;
10759 if (cfg->locals_start > cfg->num_varinfo)
10760 cfg->locals_start = cfg->num_varinfo;
10764 * mono_spill_global_vars:
10766 * Generate spill code for variables which are not allocated to registers,
10767 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10768 * code is generated which could be optimized by the local optimization passes.
10771 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10773 MonoBasicBlock *bb;
10775 int orig_next_vreg;
10776 guint32 *vreg_to_lvreg;
10778 guint32 i, lvregs_len;
10779 gboolean dest_has_lvreg = FALSE;
10780 guint32 stacktypes [128];
10781 MonoInst **live_range_start, **live_range_end;
10782 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
10784 *need_local_opts = FALSE;
10786 memset (spec2, 0, sizeof (spec2));
10788 /* FIXME: Move this function to mini.c */
10789 stacktypes ['i'] = STACK_PTR;
10790 stacktypes ['l'] = STACK_I8;
10791 stacktypes ['f'] = STACK_R8;
10792 #ifdef MONO_ARCH_SIMD_INTRINSICS
10793 stacktypes ['x'] = STACK_VTYPE;
10796 #if SIZEOF_REGISTER == 4
10797 /* Create MonoInsts for longs */
10798 for (i = 0; i < cfg->num_varinfo; i++) {
10799 MonoInst *ins = cfg->varinfo [i];
10801 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10802 switch (ins->type) {
10807 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
10810 g_assert (ins->opcode == OP_REGOFFSET);
10812 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10814 tree->opcode = OP_REGOFFSET;
10815 tree->inst_basereg = ins->inst_basereg;
10816 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10818 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10820 tree->opcode = OP_REGOFFSET;
10821 tree->inst_basereg = ins->inst_basereg;
10822 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10832 /* FIXME: widening and truncation */
10835 * As an optimization, when a variable allocated to the stack is first loaded into
10836 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10837 * the variable again.
10839 orig_next_vreg = cfg->next_vreg;
10840 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10841 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10845 * These arrays contain the first and last instructions accessing a given
10847 * Since we emit bblocks in the same order we process them here, and we
10848 * don't split live ranges, these will precisely describe the live range of
10849 * the variable, i.e. the instruction range where a valid value can be found
10850 * in the variables location.
10851 * The live range is computed using the liveness info computed by the liveness pass.
10852 * We can't use vmv->range, since that is an abstract live range, and we need
10853 * one which is instruction precise.
10854 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
10856 /* FIXME: Only do this if debugging info is requested */
10857 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
10858 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
10859 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10860 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10862 /* Add spill loads/stores */
10863 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10866 if (cfg->verbose_level > 2)
10867 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10869 /* Clear vreg_to_lvreg array */
10870 for (i = 0; i < lvregs_len; i++)
10871 vreg_to_lvreg [lvregs [i]] = 0;
10875 MONO_BB_FOR_EACH_INS (bb, ins) {
10876 const char *spec = INS_INFO (ins->opcode);
10877 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
10878 gboolean store, no_lvreg;
10879 int sregs [MONO_MAX_SRC_REGS];
10881 if (G_UNLIKELY (cfg->verbose_level > 2))
10882 mono_print_ins (ins);
10884 if (ins->opcode == OP_NOP)
10888 * We handle LDADDR here as well, since it can only be decomposed
10889 * when variable addresses are known.
10891 if (ins->opcode == OP_LDADDR) {
10892 MonoInst *var = ins->inst_p0;
10894 if (var->opcode == OP_VTARG_ADDR) {
10895 /* Happens on SPARC/S390 where vtypes are passed by reference */
10896 MonoInst *vtaddr = var->inst_left;
10897 if (vtaddr->opcode == OP_REGVAR) {
10898 ins->opcode = OP_MOVE;
10899 ins->sreg1 = vtaddr->dreg;
10901 else if (var->inst_left->opcode == OP_REGOFFSET) {
10902 ins->opcode = OP_LOAD_MEMBASE;
10903 ins->inst_basereg = vtaddr->inst_basereg;
10904 ins->inst_offset = vtaddr->inst_offset;
10908 g_assert (var->opcode == OP_REGOFFSET);
10910 ins->opcode = OP_ADD_IMM;
10911 ins->sreg1 = var->inst_basereg;
10912 ins->inst_imm = var->inst_offset;
10915 *need_local_opts = TRUE;
10916 spec = INS_INFO (ins->opcode);
10919 if (ins->opcode < MONO_CEE_LAST) {
10920 mono_print_ins (ins);
10921 g_assert_not_reached ();
10925 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10929 if (MONO_IS_STORE_MEMBASE (ins)) {
10930 tmp_reg = ins->dreg;
10931 ins->dreg = ins->sreg2;
10932 ins->sreg2 = tmp_reg;
10935 spec2 [MONO_INST_DEST] = ' ';
10936 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10937 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10938 spec2 [MONO_INST_SRC3] = ' ';
10940 } else if (MONO_IS_STORE_MEMINDEX (ins))
10941 g_assert_not_reached ();
10946 if (G_UNLIKELY (cfg->verbose_level > 2)) {
10947 printf ("\t %.3s %d", spec, ins->dreg);
10948 num_sregs = mono_inst_get_src_registers (ins, sregs);
10949 for (srcindex = 0; srcindex < 3; ++srcindex)
10950 printf (" %d", sregs [srcindex]);
10957 regtype = spec [MONO_INST_DEST];
10958 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
10961 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
10962 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
10963 MonoInst *store_ins;
10965 MonoInst *def_ins = ins;
10966 int dreg = ins->dreg; /* The original vreg */
10968 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
10970 if (var->opcode == OP_REGVAR) {
10971 ins->dreg = var->dreg;
10972 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
10974 * Instead of emitting a load+store, use a _membase opcode.
10976 g_assert (var->opcode == OP_REGOFFSET);
10977 if (ins->opcode == OP_MOVE) {
10981 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
10982 ins->inst_basereg = var->inst_basereg;
10983 ins->inst_offset = var->inst_offset;
10986 spec = INS_INFO (ins->opcode);
10990 g_assert (var->opcode == OP_REGOFFSET);
10992 prev_dreg = ins->dreg;
10994 /* Invalidate any previous lvreg for this vreg */
10995 vreg_to_lvreg [ins->dreg] = 0;
10999 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
11001 store_opcode = OP_STOREI8_MEMBASE_REG;
11004 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
11006 if (regtype == 'l') {
11007 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
11008 mono_bblock_insert_after_ins (bb, ins, store_ins);
11009 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
11010 mono_bblock_insert_after_ins (bb, ins, store_ins);
11011 def_ins = store_ins;
11014 g_assert (store_opcode != OP_STOREV_MEMBASE);
11016 /* Try to fuse the store into the instruction itself */
11017 /* FIXME: Add more instructions */
11018 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
11019 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
11020 ins->inst_imm = ins->inst_c0;
11021 ins->inst_destbasereg = var->inst_basereg;
11022 ins->inst_offset = var->inst_offset;
11023 spec = INS_INFO (ins->opcode);
11024 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
11025 ins->opcode = store_opcode;
11026 ins->inst_destbasereg = var->inst_basereg;
11027 ins->inst_offset = var->inst_offset;
11031 tmp_reg = ins->dreg;
11032 ins->dreg = ins->sreg2;
11033 ins->sreg2 = tmp_reg;
11036 spec2 [MONO_INST_DEST] = ' ';
11037 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11038 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11039 spec2 [MONO_INST_SRC3] = ' ';
11041 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
11042 // FIXME: The backends expect the base reg to be in inst_basereg
11043 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
11045 ins->inst_basereg = var->inst_basereg;
11046 ins->inst_offset = var->inst_offset;
11047 spec = INS_INFO (ins->opcode);
11049 /* printf ("INS: "); mono_print_ins (ins); */
11050 /* Create a store instruction */
11051 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
11053 /* Insert it after the instruction */
11054 mono_bblock_insert_after_ins (bb, ins, store_ins);
11056 def_ins = store_ins;
11059 * We can't assign ins->dreg to var->dreg here, since the
11060 * sregs could use it. So set a flag, and do it after
11063 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
11064 dest_has_lvreg = TRUE;
11069 if (def_ins && !live_range_start [dreg]) {
11070 live_range_start [dreg] = def_ins;
11071 live_range_start_bb [dreg] = bb;
11078 num_sregs = mono_inst_get_src_registers (ins, sregs);
11079 for (srcindex = 0; srcindex < 3; ++srcindex) {
11080 regtype = spec [MONO_INST_SRC1 + srcindex];
11081 sreg = sregs [srcindex];
11083 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
11084 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
11085 MonoInst *var = get_vreg_to_inst (cfg, sreg);
11086 MonoInst *use_ins = ins;
11087 MonoInst *load_ins;
11088 guint32 load_opcode;
11090 if (var->opcode == OP_REGVAR) {
11091 sregs [srcindex] = var->dreg;
11092 //mono_inst_set_src_registers (ins, sregs);
11093 live_range_end [sreg] = use_ins;
11094 live_range_end_bb [sreg] = bb;
11098 g_assert (var->opcode == OP_REGOFFSET);
11100 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
11102 g_assert (load_opcode != OP_LOADV_MEMBASE);
11104 if (vreg_to_lvreg [sreg]) {
11105 g_assert (vreg_to_lvreg [sreg] != -1);
11107 /* The variable is already loaded to an lvreg */
11108 if (G_UNLIKELY (cfg->verbose_level > 2))
11109 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
11110 sregs [srcindex] = vreg_to_lvreg [sreg];
11111 //mono_inst_set_src_registers (ins, sregs);
11115 /* Try to fuse the load into the instruction */
11116 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
11117 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
11118 sregs [0] = var->inst_basereg;
11119 //mono_inst_set_src_registers (ins, sregs);
11120 ins->inst_offset = var->inst_offset;
11121 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
11122 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
11123 sregs [1] = var->inst_basereg;
11124 //mono_inst_set_src_registers (ins, sregs);
11125 ins->inst_offset = var->inst_offset;
11127 if (MONO_IS_REAL_MOVE (ins)) {
11128 ins->opcode = OP_NOP;
11131 //printf ("%d ", srcindex); mono_print_ins (ins);
11133 sreg = alloc_dreg (cfg, stacktypes [regtype]);
11135 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
11136 if (var->dreg == prev_dreg) {
11138 * sreg refers to the value loaded by the load
11139 * emitted below, but we need to use ins->dreg
11140 * since it refers to the store emitted earlier.
11144 g_assert (sreg != -1);
11145 vreg_to_lvreg [var->dreg] = sreg;
11146 g_assert (lvregs_len < 1024);
11147 lvregs [lvregs_len ++] = var->dreg;
11151 sregs [srcindex] = sreg;
11152 //mono_inst_set_src_registers (ins, sregs);
11154 if (regtype == 'l') {
11155 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
11156 mono_bblock_insert_before_ins (bb, ins, load_ins);
11157 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
11158 mono_bblock_insert_before_ins (bb, ins, load_ins);
11159 use_ins = load_ins;
11162 #if SIZEOF_REGISTER == 4
11163 g_assert (load_opcode != OP_LOADI8_MEMBASE);
11165 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
11166 mono_bblock_insert_before_ins (bb, ins, load_ins);
11167 use_ins = load_ins;
11171 if (var->dreg < orig_next_vreg) {
11172 live_range_end [var->dreg] = use_ins;
11173 live_range_end_bb [var->dreg] = bb;
11177 mono_inst_set_src_registers (ins, sregs);
11179 if (dest_has_lvreg) {
11180 g_assert (ins->dreg != -1);
11181 vreg_to_lvreg [prev_dreg] = ins->dreg;
11182 g_assert (lvregs_len < 1024);
11183 lvregs [lvregs_len ++] = prev_dreg;
11184 dest_has_lvreg = FALSE;
11188 tmp_reg = ins->dreg;
11189 ins->dreg = ins->sreg2;
11190 ins->sreg2 = tmp_reg;
11193 if (MONO_IS_CALL (ins)) {
11194 /* Clear vreg_to_lvreg array */
11195 for (i = 0; i < lvregs_len; i++)
11196 vreg_to_lvreg [lvregs [i]] = 0;
11198 } else if (ins->opcode == OP_NOP) {
11200 MONO_INST_NULLIFY_SREGS (ins);
11203 if (cfg->verbose_level > 2)
11204 mono_print_ins_index (1, ins);
11207 /* Extend the live range based on the liveness info */
11208 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
11209 for (i = 0; i < cfg->num_varinfo; i ++) {
11210 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
11212 if (vreg_is_volatile (cfg, vi->vreg))
11213 /* The liveness info is incomplete */
11216 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
11217 /* Live from at least the first ins of this bb */
11218 live_range_start [vi->vreg] = bb->code;
11219 live_range_start_bb [vi->vreg] = bb;
11222 if (mono_bitset_test_fast (bb->live_out_set, i)) {
11223 /* Live at least until the last ins of this bb */
11224 live_range_end [vi->vreg] = bb->last_ins;
11225 live_range_end_bb [vi->vreg] = bb;
11231 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
11233 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
11234 * by storing the current native offset into MonoMethodVar->live_range_start/end.
11236 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
11237 for (i = 0; i < cfg->num_varinfo; ++i) {
11238 int vreg = MONO_VARINFO (cfg, i)->vreg;
11241 if (live_range_start [vreg]) {
11242 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
11244 ins->inst_c1 = vreg;
11245 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
11247 if (live_range_end [vreg]) {
11248 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
11250 ins->inst_c1 = vreg;
11251 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
11252 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
11254 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
11260 g_free (live_range_start);
11261 g_free (live_range_end);
11262 g_free (live_range_start_bb);
11263 g_free (live_range_end_bb);
11268 * - use 'iadd' instead of 'int_add'
11269 * - handling ovf opcodes: decompose in method_to_ir.
11270 * - unify iregs/fregs
11271 * -> partly done, the missing parts are:
11272 * - a more complete unification would involve unifying the hregs as well, so
11273 * code wouldn't need if (fp) all over the place. but that would mean the hregs
11274 * would no longer map to the machine hregs, so the code generators would need to
11275 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
11276 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
11277 * fp/non-fp branches speeds it up by about 15%.
11278 * - use sext/zext opcodes instead of shifts
11280 * - get rid of TEMPLOADs if possible and use vregs instead
11281 * - clean up usage of OP_P/OP_ opcodes
11282 * - cleanup usage of DUMMY_USE
11283 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
11285 * - set the stack type and allocate a dreg in the EMIT_NEW macros
11286 * - get rid of all the <foo>2 stuff when the new JIT is ready.
11287 * - make sure handle_stack_args () is called before the branch is emitted
11288 * - when the new IR is done, get rid of all unused stuff
11289 * - COMPARE/BEQ as separate instructions or unify them ?
11290 * - keeping them separate allows specialized compare instructions like
11291 * compare_imm, compare_membase
11292 * - most back ends unify fp compare+branch, fp compare+ceq
11293 * - integrate mono_save_args into inline_method
11294 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
11295 * - handle long shift opts on 32 bit platforms somehow: they require
11296 * 3 sregs (2 for arg1 and 1 for arg2)
11297 * - make byref a 'normal' type.
11298 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
11299 * variable if needed.
11300 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
11301 * like inline_method.
11302 * - remove inlining restrictions
11303 * - fix LNEG and enable cfold of INEG
11304 * - generalize x86 optimizations like ldelema as a peephole optimization
11305 * - add store_mem_imm for amd64
11306 * - optimize the loading of the interruption flag in the managed->native wrappers
11307 * - avoid special handling of OP_NOP in passes
11308 * - move code inserting instructions into one function/macro.
11309 * - try a coalescing phase after liveness analysis
11310 * - add float -> vreg conversion + local optimizations on !x86
11311 * - figure out how to handle decomposed branches during optimizations, ie.
11312 * compare+branch, op_jump_table+op_br etc.
11313 * - promote RuntimeXHandles to vregs
11314 * - vtype cleanups:
11315 * - add a NEW_VARLOADA_VREG macro
11316 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11317 * accessing vtype fields.
11318 * - get rid of I8CONST on 64 bit platforms
11319 * - dealing with the increase in code size due to branches created during opcode
11321 * - use extended basic blocks
11322 * - all parts of the JIT
11323 * - handle_global_vregs () && local regalloc
11324 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
11325 * - sources of increase in code size:
11328 * - isinst and castclass
11329 * - lvregs not allocated to global registers even if used multiple times
11330 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
11332 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
11333 * - add all micro optimizations from the old JIT
11334 * - put tree optimizations into the deadce pass
11335 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
11336 * specific function.
11337 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
11338 * fcompare + branchCC.
11339 * - create a helper function for allocating a stack slot, taking into account
11340 * MONO_CFG_HAS_SPILLUP.
11342 * - merge the ia64 switch changes.
11343 * - optimize mono_regstate2_alloc_int/float.
11344 * - fix the pessimistic handling of variables accessed in exception handler blocks.
11345 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
11346 * parts of the tree could be separated by other instructions, killing the tree
11347 * arguments, or stores killing loads etc. Also, should we fold loads into other
11348 * instructions if the result of the load is used multiple times ?
11349 * - make the REM_IMM optimization in mini-x86.c arch-independent.
11350 * - LAST MERGE: 108395.
11351 * - when returning vtypes in registers, generate IR and append it to the end of the
11352 * last bb instead of doing it in the epilog.
11353 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
11361 - When to decompose opcodes:
11362 - earlier: this makes some optimizations hard to implement, since the low level IR
11363 no longer contains the neccessary information. But it is easier to do.
11364 - later: harder to implement, enables more optimizations.
11365 - Branches inside bblocks:
11366 - created when decomposing complex opcodes.
11367 - branches to another bblock: harmless, but not tracked by the branch
11368 optimizations, so need to branch to a label at the start of the bblock.
11369 - branches to inside the same bblock: very problematic, trips up the local
11370 reg allocator. Can be fixed by spitting the current bblock, but that is a
11371 complex operation, since some local vregs can become global vregs etc.
11372 - Local/global vregs:
11373 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11374 local register allocator.
11375 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11376 structure, created by mono_create_var (). Assigned to hregs or the stack by
11377 the global register allocator.
11378 - When to do optimizations like alu->alu_imm:
11379 - earlier -> saves work later on since the IR will be smaller/simpler
11380 - later -> can work on more instructions
11381 - Handling of valuetypes:
11382 - When a vtype is pushed on the stack, a new temporary is created, an
11383 instruction computing its address (LDADDR) is emitted and pushed on
11384 the stack. Need to optimize cases when the vtype is used immediately as in
11385 argument passing, stloc etc.
11386 - Instead of the to_end stuff in the old JIT, simply call the function handling
11387 the values on the stack before emitting the last instruction of the bb.
11390 #endif /* DISABLE_JIT */