2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
30 #include <mono/utils/memcheck.h>
32 #include <mono/metadata/assembly.h>
33 #include <mono/metadata/loader.h>
34 #include <mono/metadata/tabledefs.h>
35 #include <mono/metadata/class.h>
36 #include <mono/metadata/object.h>
37 #include <mono/metadata/exception.h>
38 #include <mono/metadata/opcodes.h>
39 #include <mono/metadata/mono-endian.h>
40 #include <mono/metadata/tokentype.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/marshal.h>
43 #include <mono/metadata/debug-helpers.h>
44 #include <mono/metadata/mono-debug.h>
45 #include <mono/metadata/gc-internal.h>
46 #include <mono/metadata/security-manager.h>
47 #include <mono/metadata/threads-types.h>
48 #include <mono/metadata/security-core-clr.h>
49 #include <mono/metadata/monitor.h>
50 #include <mono/metadata/profiler-private.h>
51 #include <mono/metadata/profiler.h>
52 #include <mono/utils/mono-compiler.h>
53 #include <mono/metadata/mono-basic-block.h>
60 #include "jit-icalls.h"
62 #include "debugger-agent.h"
64 #define BRANCH_COST 100
65 #define INLINE_LENGTH_LIMIT 20
66 #define INLINE_FAILURE do {\
67 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
70 #define CHECK_CFG_EXCEPTION do {\
71 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
74 #define METHOD_ACCESS_FAILURE do { \
75 char *method_fname = mono_method_full_name (method, TRUE); \
76 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
77 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
78 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
79 g_free (method_fname); \
80 g_free (cil_method_fname); \
81 goto exception_exit; \
83 #define FIELD_ACCESS_FAILURE do { \
84 char *method_fname = mono_method_full_name (method, TRUE); \
85 char *field_fname = mono_field_full_name (field); \
86 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
87 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
88 g_free (method_fname); \
89 g_free (field_fname); \
90 goto exception_exit; \
92 #define GENERIC_SHARING_FAILURE(opcode) do { \
93 if (cfg->generic_sharing_context) { \
94 if (cfg->verbose_level > 2) \
95 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
96 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
97 goto exception_exit; \
101 /* Determine whenever 'ins' represents a load of the 'this' argument */
102 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
104 static int ldind_to_load_membase (int opcode);
105 static int stind_to_store_membase (int opcode);
107 int mono_op_to_op_imm (int opcode);
108 int mono_op_to_op_imm_noemul (int opcode);
110 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
111 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
112 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
114 /* helper methods signature */
115 extern MonoMethodSignature *helper_sig_class_init_trampoline;
116 extern MonoMethodSignature *helper_sig_domain_get;
117 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
118 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm;
119 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
120 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
121 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
124 * Instruction metadata
132 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
133 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
139 #if SIZEOF_REGISTER == 8
144 /* keep in sync with the enum in mini.h */
147 #include "mini-ops.h"
152 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
153 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
155 * This should contain the index of the last sreg + 1. This is not the same
156 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
158 const gint8 ins_sreg_counts[] = {
159 #include "mini-ops.h"
164 #define MONO_INIT_VARINFO(vi,id) do { \
165 (vi)->range.first_use.pos.bid = 0xffff; \
171 mono_inst_set_src_registers (MonoInst *ins, int *regs)
173 ins->sreg1 = regs [0];
174 ins->sreg2 = regs [1];
175 ins->sreg3 = regs [2];
179 mono_alloc_ireg (MonoCompile *cfg)
181 return alloc_ireg (cfg);
185 mono_alloc_freg (MonoCompile *cfg)
187 return alloc_freg (cfg);
191 mono_alloc_preg (MonoCompile *cfg)
193 return alloc_preg (cfg);
197 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
199 return alloc_dreg (cfg, stack_type);
203 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
209 switch (type->type) {
212 case MONO_TYPE_BOOLEAN:
224 case MONO_TYPE_FNPTR:
226 case MONO_TYPE_CLASS:
227 case MONO_TYPE_STRING:
228 case MONO_TYPE_OBJECT:
229 case MONO_TYPE_SZARRAY:
230 case MONO_TYPE_ARRAY:
234 #if SIZEOF_REGISTER == 8
243 case MONO_TYPE_VALUETYPE:
244 if (type->data.klass->enumtype) {
245 type = mono_class_enum_basetype (type->data.klass);
248 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
251 case MONO_TYPE_TYPEDBYREF:
253 case MONO_TYPE_GENERICINST:
254 type = &type->data.generic_class->container_class->byval_arg;
258 g_assert (cfg->generic_sharing_context);
261 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
267 mono_print_bb (MonoBasicBlock *bb, const char *msg)
272 printf ("\n%s %d: [IN: ", msg, bb->block_num);
273 for (i = 0; i < bb->in_count; ++i)
274 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
276 for (i = 0; i < bb->out_count; ++i)
277 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
279 for (tree = bb->code; tree; tree = tree->next)
280 mono_print_ins_index (-1, tree);
284 * Can't put this at the beginning, since other files reference stuff from this
289 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
291 #define GET_BBLOCK(cfg,tblock,ip) do { \
292 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
294 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
295 NEW_BBLOCK (cfg, (tblock)); \
296 (tblock)->cil_code = (ip); \
297 ADD_BBLOCK (cfg, (tblock)); \
301 #if defined(TARGET_X86) || defined(TARGET_AMD64)
302 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
303 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
304 (dest)->dreg = alloc_preg ((cfg)); \
305 (dest)->sreg1 = (sr1); \
306 (dest)->sreg2 = (sr2); \
307 (dest)->inst_imm = (imm); \
308 (dest)->backend.shift_amount = (shift); \
309 MONO_ADD_INS ((cfg)->cbb, (dest)); \
313 #if SIZEOF_REGISTER == 8
314 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
315 /* FIXME: Need to add many more cases */ \
316 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
318 int dr = alloc_preg (cfg); \
319 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
320 (ins)->sreg2 = widen->dreg; \
324 #define ADD_WIDEN_OP(ins, arg1, arg2)
327 #define ADD_BINOP(op) do { \
328 MONO_INST_NEW (cfg, ins, (op)); \
330 ins->sreg1 = sp [0]->dreg; \
331 ins->sreg2 = sp [1]->dreg; \
332 type_from_op (ins, sp [0], sp [1]); \
334 /* Have to insert a widening op */ \
335 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
336 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
337 MONO_ADD_INS ((cfg)->cbb, (ins)); \
338 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
341 #define ADD_UNOP(op) do { \
342 MONO_INST_NEW (cfg, ins, (op)); \
344 ins->sreg1 = sp [0]->dreg; \
345 type_from_op (ins, sp [0], NULL); \
347 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
348 MONO_ADD_INS ((cfg)->cbb, (ins)); \
349 *sp++ = mono_decompose_opcode (cfg, ins); \
352 #define ADD_BINCOND(next_block) do { \
355 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
356 cmp->sreg1 = sp [0]->dreg; \
357 cmp->sreg2 = sp [1]->dreg; \
358 type_from_op (cmp, sp [0], sp [1]); \
360 type_from_op (ins, sp [0], sp [1]); \
361 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
362 GET_BBLOCK (cfg, tblock, target); \
363 link_bblock (cfg, bblock, tblock); \
364 ins->inst_true_bb = tblock; \
365 if ((next_block)) { \
366 link_bblock (cfg, bblock, (next_block)); \
367 ins->inst_false_bb = (next_block); \
368 start_new_bblock = 1; \
370 GET_BBLOCK (cfg, tblock, ip); \
371 link_bblock (cfg, bblock, tblock); \
372 ins->inst_false_bb = tblock; \
373 start_new_bblock = 2; \
375 if (sp != stack_start) { \
376 handle_stack_args (cfg, stack_start, sp - stack_start); \
377 CHECK_UNVERIFIABLE (cfg); \
379 MONO_ADD_INS (bblock, cmp); \
380 MONO_ADD_INS (bblock, ins); \
384 * link_bblock: Links two basic blocks
386 * links two basic blocks in the control flow graph, the 'from'
387 * argument is the starting block and the 'to' argument is the block
388 * the control flow ends to after 'from'.
391 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
393 MonoBasicBlock **newa;
397 if (from->cil_code) {
399 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
401 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
404 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
406 printf ("edge from entry to exit\n");
411 for (i = 0; i < from->out_count; ++i) {
412 if (to == from->out_bb [i]) {
418 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
419 for (i = 0; i < from->out_count; ++i) {
420 newa [i] = from->out_bb [i];
428 for (i = 0; i < to->in_count; ++i) {
429 if (from == to->in_bb [i]) {
435 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
436 for (i = 0; i < to->in_count; ++i) {
437 newa [i] = to->in_bb [i];
446 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
448 link_bblock (cfg, from, to);
452 * mono_find_block_region:
454 * We mark each basic block with a region ID. We use that to avoid BB
455 * optimizations when blocks are in different regions.
458 * A region token that encodes where this region is, and information
459 * about the clause owner for this block.
461 * The region encodes the try/catch/filter clause that owns this block
462 * as well as the type. -1 is a special value that represents a block
463 * that is in none of try/catch/filter.
466 mono_find_block_region (MonoCompile *cfg, int offset)
468 MonoMethodHeader *header = cfg->header;
469 MonoExceptionClause *clause;
472 for (i = 0; i < header->num_clauses; ++i) {
473 clause = &header->clauses [i];
474 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
475 (offset < (clause->handler_offset)))
476 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
478 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
479 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
480 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
481 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
482 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
484 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
487 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
488 return ((i + 1) << 8) | clause->flags;
495 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
497 MonoMethodHeader *header = cfg->header;
498 MonoExceptionClause *clause;
502 for (i = 0; i < header->num_clauses; ++i) {
503 clause = &header->clauses [i];
504 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
505 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
506 if (clause->flags == type)
507 res = g_list_append (res, clause);
514 mono_create_spvar_for_region (MonoCompile *cfg, int region)
518 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
522 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
523 /* prevent it from being register allocated */
524 var->flags |= MONO_INST_INDIRECT;
526 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
530 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
532 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
536 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
540 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
544 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
545 /* prevent it from being register allocated */
546 var->flags |= MONO_INST_INDIRECT;
548 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
554 * Returns the type used in the eval stack when @type is loaded.
555 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
558 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
562 inst->klass = klass = mono_class_from_mono_type (type);
564 inst->type = STACK_MP;
569 switch (type->type) {
571 inst->type = STACK_INV;
575 case MONO_TYPE_BOOLEAN:
581 inst->type = STACK_I4;
586 case MONO_TYPE_FNPTR:
587 inst->type = STACK_PTR;
589 case MONO_TYPE_CLASS:
590 case MONO_TYPE_STRING:
591 case MONO_TYPE_OBJECT:
592 case MONO_TYPE_SZARRAY:
593 case MONO_TYPE_ARRAY:
594 inst->type = STACK_OBJ;
598 inst->type = STACK_I8;
602 inst->type = STACK_R8;
604 case MONO_TYPE_VALUETYPE:
605 if (type->data.klass->enumtype) {
606 type = mono_class_enum_basetype (type->data.klass);
610 inst->type = STACK_VTYPE;
613 case MONO_TYPE_TYPEDBYREF:
614 inst->klass = mono_defaults.typed_reference_class;
615 inst->type = STACK_VTYPE;
617 case MONO_TYPE_GENERICINST:
618 type = &type->data.generic_class->container_class->byval_arg;
621 case MONO_TYPE_MVAR :
622 /* FIXME: all the arguments must be references for now,
623 * later look inside cfg and see if the arg num is
626 g_assert (cfg->generic_sharing_context);
627 inst->type = STACK_OBJ;
630 g_error ("unknown type 0x%02x in eval stack type", type->type);
635 * The following tables are used to quickly validate the IL code in type_from_op ().
638 bin_num_table [STACK_MAX] [STACK_MAX] = {
639 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
640 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
641 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
642 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
643 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
644 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
645 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
646 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
651 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
654 /* reduce the size of this table */
656 bin_int_table [STACK_MAX] [STACK_MAX] = {
657 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
658 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
659 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
660 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
661 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
662 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
663 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
664 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
668 bin_comp_table [STACK_MAX] [STACK_MAX] = {
669 /* Inv i L p F & O vt */
671 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
672 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
673 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
674 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
675 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
676 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
677 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
680 /* reduce the size of this table */
682 shift_table [STACK_MAX] [STACK_MAX] = {
683 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
684 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
685 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
686 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
687 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
688 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
689 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
690 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
694 * Tables to map from the non-specific opcode to the matching
695 * type-specific opcode.
697 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
699 binops_op_map [STACK_MAX] = {
700 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
703 /* handles from CEE_NEG to CEE_CONV_U8 */
705 unops_op_map [STACK_MAX] = {
706 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
709 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
711 ovfops_op_map [STACK_MAX] = {
712 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
715 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
717 ovf2ops_op_map [STACK_MAX] = {
718 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
721 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
723 ovf3ops_op_map [STACK_MAX] = {
724 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
727 /* handles from CEE_BEQ to CEE_BLT_UN */
729 beqops_op_map [STACK_MAX] = {
730 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
733 /* handles from CEE_CEQ to CEE_CLT_UN */
735 ceqops_op_map [STACK_MAX] = {
736 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
740 * Sets ins->type (the type on the eval stack) according to the
741 * type of the opcode and the arguments to it.
742 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
744 * FIXME: this function sets ins->type unconditionally in some cases, but
745 * it should set it to invalid for some types (a conv.x on an object)
748 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
750 switch (ins->opcode) {
757 /* FIXME: check unverifiable args for STACK_MP */
758 ins->type = bin_num_table [src1->type] [src2->type];
759 ins->opcode += binops_op_map [ins->type];
766 ins->type = bin_int_table [src1->type] [src2->type];
767 ins->opcode += binops_op_map [ins->type];
772 ins->type = shift_table [src1->type] [src2->type];
773 ins->opcode += binops_op_map [ins->type];
778 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
779 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
780 ins->opcode = OP_LCOMPARE;
781 else if (src1->type == STACK_R8)
782 ins->opcode = OP_FCOMPARE;
784 ins->opcode = OP_ICOMPARE;
786 case OP_ICOMPARE_IMM:
787 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
788 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
789 ins->opcode = OP_LCOMPARE_IMM;
801 ins->opcode += beqops_op_map [src1->type];
804 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
805 ins->opcode += ceqops_op_map [src1->type];
811 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
812 ins->opcode += ceqops_op_map [src1->type];
816 ins->type = neg_table [src1->type];
817 ins->opcode += unops_op_map [ins->type];
820 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
821 ins->type = src1->type;
823 ins->type = STACK_INV;
824 ins->opcode += unops_op_map [ins->type];
830 ins->type = STACK_I4;
831 ins->opcode += unops_op_map [src1->type];
834 ins->type = STACK_R8;
835 switch (src1->type) {
838 ins->opcode = OP_ICONV_TO_R_UN;
841 ins->opcode = OP_LCONV_TO_R_UN;
845 case CEE_CONV_OVF_I1:
846 case CEE_CONV_OVF_U1:
847 case CEE_CONV_OVF_I2:
848 case CEE_CONV_OVF_U2:
849 case CEE_CONV_OVF_I4:
850 case CEE_CONV_OVF_U4:
851 ins->type = STACK_I4;
852 ins->opcode += ovf3ops_op_map [src1->type];
854 case CEE_CONV_OVF_I_UN:
855 case CEE_CONV_OVF_U_UN:
856 ins->type = STACK_PTR;
857 ins->opcode += ovf2ops_op_map [src1->type];
859 case CEE_CONV_OVF_I1_UN:
860 case CEE_CONV_OVF_I2_UN:
861 case CEE_CONV_OVF_I4_UN:
862 case CEE_CONV_OVF_U1_UN:
863 case CEE_CONV_OVF_U2_UN:
864 case CEE_CONV_OVF_U4_UN:
865 ins->type = STACK_I4;
866 ins->opcode += ovf2ops_op_map [src1->type];
869 ins->type = STACK_PTR;
870 switch (src1->type) {
872 ins->opcode = OP_ICONV_TO_U;
876 #if SIZEOF_REGISTER == 8
877 ins->opcode = OP_LCONV_TO_U;
879 ins->opcode = OP_MOVE;
883 ins->opcode = OP_LCONV_TO_U;
886 ins->opcode = OP_FCONV_TO_U;
892 ins->type = STACK_I8;
893 ins->opcode += unops_op_map [src1->type];
895 case CEE_CONV_OVF_I8:
896 case CEE_CONV_OVF_U8:
897 ins->type = STACK_I8;
898 ins->opcode += ovf3ops_op_map [src1->type];
900 case CEE_CONV_OVF_U8_UN:
901 case CEE_CONV_OVF_I8_UN:
902 ins->type = STACK_I8;
903 ins->opcode += ovf2ops_op_map [src1->type];
907 ins->type = STACK_R8;
908 ins->opcode += unops_op_map [src1->type];
911 ins->type = STACK_R8;
915 ins->type = STACK_I4;
916 ins->opcode += ovfops_op_map [src1->type];
921 ins->type = STACK_PTR;
922 ins->opcode += ovfops_op_map [src1->type];
930 ins->type = bin_num_table [src1->type] [src2->type];
931 ins->opcode += ovfops_op_map [src1->type];
932 if (ins->type == STACK_R8)
933 ins->type = STACK_INV;
935 case OP_LOAD_MEMBASE:
936 ins->type = STACK_PTR;
938 case OP_LOADI1_MEMBASE:
939 case OP_LOADU1_MEMBASE:
940 case OP_LOADI2_MEMBASE:
941 case OP_LOADU2_MEMBASE:
942 case OP_LOADI4_MEMBASE:
943 case OP_LOADU4_MEMBASE:
944 ins->type = STACK_PTR;
946 case OP_LOADI8_MEMBASE:
947 ins->type = STACK_I8;
949 case OP_LOADR4_MEMBASE:
950 case OP_LOADR8_MEMBASE:
951 ins->type = STACK_R8;
954 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
958 if (ins->type == STACK_MP)
959 ins->klass = mono_defaults.object_class;
964 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
970 param_table [STACK_MAX] [STACK_MAX] = {
975 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
979 switch (args->type) {
989 for (i = 0; i < sig->param_count; ++i) {
990 switch (args [i].type) {
994 if (!sig->params [i]->byref)
998 if (sig->params [i]->byref)
1000 switch (sig->params [i]->type) {
1001 case MONO_TYPE_CLASS:
1002 case MONO_TYPE_STRING:
1003 case MONO_TYPE_OBJECT:
1004 case MONO_TYPE_SZARRAY:
1005 case MONO_TYPE_ARRAY:
1012 if (sig->params [i]->byref)
1014 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1023 /*if (!param_table [args [i].type] [sig->params [i]->type])
1031 * When we need a pointer to the current domain many times in a method, we
1032 * call mono_domain_get() once and we store the result in a local variable.
1033 * This function returns the variable that represents the MonoDomain*.
1035 inline static MonoInst *
1036 mono_get_domainvar (MonoCompile *cfg)
1038 if (!cfg->domainvar)
1039 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1040 return cfg->domainvar;
1044 * The got_var contains the address of the Global Offset Table when AOT
1048 mono_get_got_var (MonoCompile *cfg)
1050 #ifdef MONO_ARCH_NEED_GOT_VAR
1051 if (!cfg->compile_aot)
1053 if (!cfg->got_var) {
1054 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1056 return cfg->got_var;
1063 mono_get_vtable_var (MonoCompile *cfg)
1065 g_assert (cfg->generic_sharing_context);
1067 if (!cfg->rgctx_var) {
1068 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1069 /* force the var to be stack allocated */
1070 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1073 return cfg->rgctx_var;
1077 type_from_stack_type (MonoInst *ins) {
1078 switch (ins->type) {
1079 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1080 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1081 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1082 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1084 return &ins->klass->this_arg;
1085 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1086 case STACK_VTYPE: return &ins->klass->byval_arg;
1088 g_error ("stack type %d to monotype not handled\n", ins->type);
1093 static G_GNUC_UNUSED int
1094 type_to_stack_type (MonoType *t)
1096 t = mono_type_get_underlying_type (t);
1100 case MONO_TYPE_BOOLEAN:
1103 case MONO_TYPE_CHAR:
1110 case MONO_TYPE_FNPTR:
1112 case MONO_TYPE_CLASS:
1113 case MONO_TYPE_STRING:
1114 case MONO_TYPE_OBJECT:
1115 case MONO_TYPE_SZARRAY:
1116 case MONO_TYPE_ARRAY:
1124 case MONO_TYPE_VALUETYPE:
1125 case MONO_TYPE_TYPEDBYREF:
1127 case MONO_TYPE_GENERICINST:
1128 if (mono_type_generic_inst_is_valuetype (t))
1134 g_assert_not_reached ();
1141 array_access_to_klass (int opcode)
1145 return mono_defaults.byte_class;
1147 return mono_defaults.uint16_class;
1150 return mono_defaults.int_class;
1153 return mono_defaults.sbyte_class;
1156 return mono_defaults.int16_class;
1159 return mono_defaults.int32_class;
1161 return mono_defaults.uint32_class;
1164 return mono_defaults.int64_class;
1167 return mono_defaults.single_class;
1170 return mono_defaults.double_class;
1171 case CEE_LDELEM_REF:
1172 case CEE_STELEM_REF:
1173 return mono_defaults.object_class;
1175 g_assert_not_reached ();
1181 * We try to share variables when possible
1184 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1189 /* inlining can result in deeper stacks */
1190 if (slot >= cfg->header->max_stack)
1191 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1193 pos = ins->type - 1 + slot * STACK_MAX;
1195 switch (ins->type) {
1202 if ((vnum = cfg->intvars [pos]))
1203 return cfg->varinfo [vnum];
1204 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1205 cfg->intvars [pos] = res->inst_c0;
1208 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1214 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1217 * Don't use this if a generic_context is set, since that means AOT can't
1218 * look up the method using just the image+token.
1219 * table == 0 means this is a reference made from a wrapper.
1221 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1222 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1223 jump_info_token->image = image;
1224 jump_info_token->token = token;
1225 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1230 * This function is called to handle items that are left on the evaluation stack
1231 * at basic block boundaries. What happens is that we save the values to local variables
1232 * and we reload them later when first entering the target basic block (with the
1233 * handle_loaded_temps () function).
1234 * A single joint point will use the same variables (stored in the array bb->out_stack or
1235 * bb->in_stack, if the basic block is before or after the joint point).
1237 * This function needs to be called _before_ emitting the last instruction of
1238 * the bb (i.e. before emitting a branch).
1239 * If the stack merge fails at a join point, cfg->unverifiable is set.
1242 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1245 MonoBasicBlock *bb = cfg->cbb;
1246 MonoBasicBlock *outb;
1247 MonoInst *inst, **locals;
1252 if (cfg->verbose_level > 3)
1253 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1254 if (!bb->out_scount) {
1255 bb->out_scount = count;
1256 //printf ("bblock %d has out:", bb->block_num);
1258 for (i = 0; i < bb->out_count; ++i) {
1259 outb = bb->out_bb [i];
1260 /* exception handlers are linked, but they should not be considered for stack args */
1261 if (outb->flags & BB_EXCEPTION_HANDLER)
1263 //printf (" %d", outb->block_num);
1264 if (outb->in_stack) {
1266 bb->out_stack = outb->in_stack;
1272 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1273 for (i = 0; i < count; ++i) {
1275 * try to reuse temps already allocated for this purpouse, if they occupy the same
1276 * stack slot and if they are of the same type.
1277 * This won't cause conflicts since if 'local' is used to
1278 * store one of the values in the in_stack of a bblock, then
1279 * the same variable will be used for the same outgoing stack
1281 * This doesn't work when inlining methods, since the bblocks
1282 * in the inlined methods do not inherit their in_stack from
1283 * the bblock they are inlined to. See bug #58863 for an
1286 if (cfg->inlined_method)
1287 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1289 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1294 for (i = 0; i < bb->out_count; ++i) {
1295 outb = bb->out_bb [i];
1296 /* exception handlers are linked, but they should not be considered for stack args */
1297 if (outb->flags & BB_EXCEPTION_HANDLER)
1299 if (outb->in_scount) {
1300 if (outb->in_scount != bb->out_scount) {
1301 cfg->unverifiable = TRUE;
1304 continue; /* check they are the same locals */
1306 outb->in_scount = count;
1307 outb->in_stack = bb->out_stack;
1310 locals = bb->out_stack;
1312 for (i = 0; i < count; ++i) {
1313 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1314 inst->cil_code = sp [i]->cil_code;
1315 sp [i] = locals [i];
1316 if (cfg->verbose_level > 3)
1317 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1321 * It is possible that the out bblocks already have in_stack assigned, and
1322 * the in_stacks differ. In this case, we will store to all the different
1329 /* Find a bblock which has a different in_stack */
1331 while (bindex < bb->out_count) {
1332 outb = bb->out_bb [bindex];
1333 /* exception handlers are linked, but they should not be considered for stack args */
1334 if (outb->flags & BB_EXCEPTION_HANDLER) {
1338 if (outb->in_stack != locals) {
1339 for (i = 0; i < count; ++i) {
1340 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1341 inst->cil_code = sp [i]->cil_code;
1342 sp [i] = locals [i];
1343 if (cfg->verbose_level > 3)
1344 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1346 locals = outb->in_stack;
1355 /* Emit code which loads interface_offsets [klass->interface_id]
1356 * The array is stored in memory before vtable.
1359 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1361 if (cfg->compile_aot) {
1362 int ioffset_reg = alloc_preg (cfg);
1363 int iid_reg = alloc_preg (cfg);
1365 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1366 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1367 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1370 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1375 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1377 int ibitmap_reg = alloc_preg (cfg);
1378 #ifdef COMPRESSED_INTERFACE_BITMAP
1380 MonoInst *res, *ins;
1381 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1382 MONO_ADD_INS (cfg->cbb, ins);
1384 if (cfg->compile_aot)
1385 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1387 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1388 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1389 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1391 int ibitmap_byte_reg = alloc_preg (cfg);
1393 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1395 if (cfg->compile_aot) {
1396 int iid_reg = alloc_preg (cfg);
1397 int shifted_iid_reg = alloc_preg (cfg);
1398 int ibitmap_byte_address_reg = alloc_preg (cfg);
1399 int masked_iid_reg = alloc_preg (cfg);
1400 int iid_one_bit_reg = alloc_preg (cfg);
1401 int iid_bit_reg = alloc_preg (cfg);
1402 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1403 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1404 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1405 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1406 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1407 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1408 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1409 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1411 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1412 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1418 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1419 * stored in "klass_reg" implements the interface "klass".
1422 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1424 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1428 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1429 * stored in "vtable_reg" implements the interface "klass".
1432 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1434 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1438 * Emit code which checks whenever the interface id of @klass is smaller than
1439 * than the value given by max_iid_reg.
1442 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1443 MonoBasicBlock *false_target)
1445 if (cfg->compile_aot) {
1446 int iid_reg = alloc_preg (cfg);
1447 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1448 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1451 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1453 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1455 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1458 /* Same as above, but obtains max_iid from a vtable */
1460 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1461 MonoBasicBlock *false_target)
1463 int max_iid_reg = alloc_preg (cfg);
1465 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1466 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1469 /* Same as above, but obtains max_iid from a klass */
1471 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1472 MonoBasicBlock *false_target)
1474 int max_iid_reg = alloc_preg (cfg);
1476 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1477 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1481 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1483 int idepth_reg = alloc_preg (cfg);
1484 int stypes_reg = alloc_preg (cfg);
1485 int stype = alloc_preg (cfg);
1487 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1488 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1489 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1490 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1492 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1493 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1495 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1496 } else if (cfg->compile_aot) {
1497 int const_reg = alloc_preg (cfg);
1498 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1499 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1501 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1503 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1507 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1509 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1513 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1515 int intf_reg = alloc_preg (cfg);
1517 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1518 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1519 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1521 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1523 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1527 * Variant of the above that takes a register to the class, not the vtable.
1530 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1532 int intf_bit_reg = alloc_preg (cfg);
1534 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1535 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1536 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1538 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1540 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1544 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1547 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1548 } else if (cfg->compile_aot) {
1549 int const_reg = alloc_preg (cfg);
1550 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1551 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1553 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1555 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1559 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1561 return mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1565 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1567 if (cfg->compile_aot) {
1568 int const_reg = alloc_preg (cfg);
1569 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1570 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1572 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1574 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1578 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1581 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1584 int rank_reg = alloc_preg (cfg);
1585 int eclass_reg = alloc_preg (cfg);
1587 g_assert (!klass_inst);
1588 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1589 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1590 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1591 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1592 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1593 if (klass->cast_class == mono_defaults.object_class) {
1594 int parent_reg = alloc_preg (cfg);
1595 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1596 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1597 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1598 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1599 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1600 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1601 } else if (klass->cast_class == mono_defaults.enum_class) {
1602 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1603 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1604 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1606 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1607 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1610 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1611 /* Check that the object is a vector too */
1612 int bounds_reg = alloc_preg (cfg);
1613 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1614 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1615 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1618 int idepth_reg = alloc_preg (cfg);
1619 int stypes_reg = alloc_preg (cfg);
1620 int stype = alloc_preg (cfg);
1622 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1623 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1624 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1625 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1627 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1628 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1629 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1634 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1636 return mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1640 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1644 g_assert (val == 0);
1649 if ((size <= 4) && (size <= align)) {
1652 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1655 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1658 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1660 #if SIZEOF_REGISTER == 8
1662 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1668 val_reg = alloc_preg (cfg);
1670 if (SIZEOF_REGISTER == 8)
1671 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1673 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1676 /* This could be optimized further if neccesary */
1678 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1685 #if !NO_UNALIGNED_ACCESS
1686 if (SIZEOF_REGISTER == 8) {
1688 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1693 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1701 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1706 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1711 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1718 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1725 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1726 g_assert (size < 10000);
1729 /* This could be optimized further if neccesary */
1731 cur_reg = alloc_preg (cfg);
1732 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1733 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1740 #if !NO_UNALIGNED_ACCESS
1741 if (SIZEOF_REGISTER == 8) {
1743 cur_reg = alloc_preg (cfg);
1744 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1745 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1754 cur_reg = alloc_preg (cfg);
1755 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1756 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1762 cur_reg = alloc_preg (cfg);
1763 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1764 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1770 cur_reg = alloc_preg (cfg);
1771 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1772 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1780 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1783 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1786 type = mini_get_basic_type_from_generic (gsctx, type);
1787 switch (type->type) {
1788 case MONO_TYPE_VOID:
1789 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1792 case MONO_TYPE_BOOLEAN:
1795 case MONO_TYPE_CHAR:
1798 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1802 case MONO_TYPE_FNPTR:
1803 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1804 case MONO_TYPE_CLASS:
1805 case MONO_TYPE_STRING:
1806 case MONO_TYPE_OBJECT:
1807 case MONO_TYPE_SZARRAY:
1808 case MONO_TYPE_ARRAY:
1809 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1812 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1815 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1816 case MONO_TYPE_VALUETYPE:
1817 if (type->data.klass->enumtype) {
1818 type = mono_class_enum_basetype (type->data.klass);
1821 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1822 case MONO_TYPE_TYPEDBYREF:
1823 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1824 case MONO_TYPE_GENERICINST:
1825 type = &type->data.generic_class->container_class->byval_arg;
1828 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1834 * target_type_is_incompatible:
1835 * @cfg: MonoCompile context
1837 * Check that the item @arg on the evaluation stack can be stored
1838 * in the target type (can be a local, or field, etc).
1839 * The cfg arg can be used to check if we need verification or just
1842 * Returns: non-0 value if arg can't be stored on a target.
1845 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1847 MonoType *simple_type;
1850 if (target->byref) {
1851 /* FIXME: check that the pointed to types match */
1852 if (arg->type == STACK_MP)
1853 return arg->klass != mono_class_from_mono_type (target);
1854 if (arg->type == STACK_PTR)
1859 simple_type = mono_type_get_underlying_type (target);
1860 switch (simple_type->type) {
1861 case MONO_TYPE_VOID:
1865 case MONO_TYPE_BOOLEAN:
1868 case MONO_TYPE_CHAR:
1871 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1875 /* STACK_MP is needed when setting pinned locals */
1876 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1881 case MONO_TYPE_FNPTR:
1882 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1885 case MONO_TYPE_CLASS:
1886 case MONO_TYPE_STRING:
1887 case MONO_TYPE_OBJECT:
1888 case MONO_TYPE_SZARRAY:
1889 case MONO_TYPE_ARRAY:
1890 if (arg->type != STACK_OBJ)
1892 /* FIXME: check type compatibility */
1896 if (arg->type != STACK_I8)
1901 if (arg->type != STACK_R8)
1904 case MONO_TYPE_VALUETYPE:
1905 if (arg->type != STACK_VTYPE)
1907 klass = mono_class_from_mono_type (simple_type);
1908 if (klass != arg->klass)
1911 case MONO_TYPE_TYPEDBYREF:
1912 if (arg->type != STACK_VTYPE)
1914 klass = mono_class_from_mono_type (simple_type);
1915 if (klass != arg->klass)
1918 case MONO_TYPE_GENERICINST:
1919 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1920 if (arg->type != STACK_VTYPE)
1922 klass = mono_class_from_mono_type (simple_type);
1923 if (klass != arg->klass)
1927 if (arg->type != STACK_OBJ)
1929 /* FIXME: check type compatibility */
1933 case MONO_TYPE_MVAR:
1934 /* FIXME: all the arguments must be references for now,
1935 * later look inside cfg and see if the arg num is
1936 * really a reference
1938 g_assert (cfg->generic_sharing_context);
1939 if (arg->type != STACK_OBJ)
1943 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1949 * Prepare arguments for passing to a function call.
1950 * Return a non-zero value if the arguments can't be passed to the given
1952 * The type checks are not yet complete and some conversions may need
1953 * casts on 32 or 64 bit architectures.
1955 * FIXME: implement this using target_type_is_incompatible ()
1958 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1960 MonoType *simple_type;
1964 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1968 for (i = 0; i < sig->param_count; ++i) {
1969 if (sig->params [i]->byref) {
1970 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1974 simple_type = sig->params [i];
1975 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1977 switch (simple_type->type) {
1978 case MONO_TYPE_VOID:
1983 case MONO_TYPE_BOOLEAN:
1986 case MONO_TYPE_CHAR:
1989 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
1995 case MONO_TYPE_FNPTR:
1996 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
1999 case MONO_TYPE_CLASS:
2000 case MONO_TYPE_STRING:
2001 case MONO_TYPE_OBJECT:
2002 case MONO_TYPE_SZARRAY:
2003 case MONO_TYPE_ARRAY:
2004 if (args [i]->type != STACK_OBJ)
2009 if (args [i]->type != STACK_I8)
2014 if (args [i]->type != STACK_R8)
2017 case MONO_TYPE_VALUETYPE:
2018 if (simple_type->data.klass->enumtype) {
2019 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2022 if (args [i]->type != STACK_VTYPE)
2025 case MONO_TYPE_TYPEDBYREF:
2026 if (args [i]->type != STACK_VTYPE)
2029 case MONO_TYPE_GENERICINST:
2030 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2034 g_error ("unknown type 0x%02x in check_call_signature",
2042 callvirt_to_call (int opcode)
2047 case OP_VOIDCALLVIRT:
2056 g_assert_not_reached ();
2063 callvirt_to_call_membase (int opcode)
2067 return OP_CALL_MEMBASE;
2068 case OP_VOIDCALLVIRT:
2069 return OP_VOIDCALL_MEMBASE;
2071 return OP_FCALL_MEMBASE;
2073 return OP_LCALL_MEMBASE;
2075 return OP_VCALL_MEMBASE;
2077 g_assert_not_reached ();
2083 #ifdef MONO_ARCH_HAVE_IMT
2085 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2089 if (COMPILE_LLVM (cfg)) {
2090 method_reg = alloc_preg (cfg);
2093 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2094 } else if (cfg->compile_aot) {
2095 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2098 MONO_INST_NEW (cfg, ins, OP_PCONST);
2099 ins->inst_p0 = call->method;
2100 ins->dreg = method_reg;
2101 MONO_ADD_INS (cfg->cbb, ins);
2105 call->imt_arg_reg = method_reg;
2107 #ifdef MONO_ARCH_IMT_REG
2108 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2110 /* Need this to keep the IMT arg alive */
2111 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2116 #ifdef MONO_ARCH_IMT_REG
2117 method_reg = alloc_preg (cfg);
2120 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2121 } else if (cfg->compile_aot) {
2122 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2125 MONO_INST_NEW (cfg, ins, OP_PCONST);
2126 ins->inst_p0 = call->method;
2127 ins->dreg = method_reg;
2128 MONO_ADD_INS (cfg->cbb, ins);
2131 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2133 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2138 static MonoJumpInfo *
2139 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2141 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2145 ji->data.target = target;
2150 inline static MonoCallInst *
2151 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2152 MonoInst **args, int calli, int virtual, int tail)
2155 #ifdef MONO_ARCH_SOFT_FLOAT
2160 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2162 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2165 call->signature = sig;
2167 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2170 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2171 call->vret_var = cfg->vret_addr;
2172 //g_assert_not_reached ();
2174 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2175 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2178 temp->backend.is_pinvoke = sig->pinvoke;
2181 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2182 * address of return value to increase optimization opportunities.
2183 * Before vtype decomposition, the dreg of the call ins itself represents the
2184 * fact the call modifies the return value. After decomposition, the call will
2185 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2186 * will be transformed into an LDADDR.
2188 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2189 loada->dreg = alloc_preg (cfg);
2190 loada->inst_p0 = temp;
2191 /* We reference the call too since call->dreg could change during optimization */
2192 loada->inst_p1 = call;
2193 MONO_ADD_INS (cfg->cbb, loada);
2195 call->inst.dreg = temp->dreg;
2197 call->vret_var = loada;
2198 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2199 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2201 #ifdef MONO_ARCH_SOFT_FLOAT
2202 if (COMPILE_SOFT_FLOAT (cfg)) {
2204 * If the call has a float argument, we would need to do an r8->r4 conversion using
2205 * an icall, but that cannot be done during the call sequence since it would clobber
2206 * the call registers + the stack. So we do it before emitting the call.
2208 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2210 MonoInst *in = call->args [i];
2212 if (i >= sig->hasthis)
2213 t = sig->params [i - sig->hasthis];
2215 t = &mono_defaults.int_class->byval_arg;
2216 t = mono_type_get_underlying_type (t);
2218 if (!t->byref && t->type == MONO_TYPE_R4) {
2219 MonoInst *iargs [1];
2223 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2225 /* The result will be in an int vreg */
2226 call->args [i] = conv;
2233 if (COMPILE_LLVM (cfg))
2234 mono_llvm_emit_call (cfg, call);
2236 mono_arch_emit_call (cfg, call);
2238 mono_arch_emit_call (cfg, call);
2241 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2242 cfg->flags |= MONO_CFG_HAS_CALLS;
2247 inline static MonoInst*
2248 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2250 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE);
2252 call->inst.sreg1 = addr->dreg;
2254 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2256 return (MonoInst*)call;
2260 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2262 #ifdef MONO_ARCH_RGCTX_REG
2263 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2264 cfg->uses_rgctx_reg = TRUE;
2265 call->rgctx_reg = TRUE;
2267 call->rgctx_arg_reg = rgctx_reg;
2274 inline static MonoInst*
2275 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2281 rgctx_reg = mono_alloc_preg (cfg);
2282 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2284 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2286 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2287 return (MonoInst*)call;
2291 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, int rgctx_type);
2293 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, int rgctx_type);
2296 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2297 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2299 gboolean might_be_remote;
2300 gboolean virtual = this != NULL;
2301 gboolean enable_for_aot = TRUE;
2305 if (method->string_ctor) {
2306 /* Create the real signature */
2307 /* FIXME: Cache these */
2308 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2309 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2314 might_be_remote = this && sig->hasthis &&
2315 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2316 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this);
2318 context_used = mono_method_check_context_used (method);
2319 if (might_be_remote && context_used) {
2322 g_assert (cfg->generic_sharing_context);
2324 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2326 return mono_emit_calli (cfg, sig, args, addr);
2329 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE);
2331 if (might_be_remote)
2332 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2334 call->method = method;
2335 call->inst.flags |= MONO_INST_HAS_METHOD;
2336 call->inst.inst_left = this;
2339 int vtable_reg, slot_reg, this_reg;
2341 this_reg = this->dreg;
2343 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2344 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2345 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2347 /* Make a call to delegate->invoke_impl */
2348 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2349 call->inst.inst_basereg = this_reg;
2350 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2351 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2353 return (MonoInst*)call;
2357 if ((!cfg->compile_aot || enable_for_aot) &&
2358 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2359 (MONO_METHOD_IS_FINAL (method) &&
2360 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2361 !(method->klass->marshalbyref && context_used)) {
2363 * the method is not virtual, we just need to ensure this is not null
2364 * and then we can call the method directly.
2366 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2368 * The check above ensures method is not gshared, this is needed since
2369 * gshared methods can't have wrappers.
2371 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2374 if (!method->string_ctor)
2375 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2377 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2379 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2381 return (MonoInst*)call;
2384 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2386 * the method is virtual, but we can statically dispatch since either
2387 * it's class or the method itself are sealed.
2388 * But first we need to ensure it's not a null reference.
2390 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2392 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2393 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2395 return (MonoInst*)call;
2398 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2400 vtable_reg = alloc_preg (cfg);
2401 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2402 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2404 #ifdef MONO_ARCH_HAVE_IMT
2406 guint32 imt_slot = mono_method_get_imt_slot (method);
2407 emit_imt_argument (cfg, call, imt_arg);
2408 slot_reg = vtable_reg;
2409 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2412 if (slot_reg == -1) {
2413 slot_reg = alloc_preg (cfg);
2414 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2415 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2418 slot_reg = vtable_reg;
2419 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2420 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2421 #ifdef MONO_ARCH_HAVE_IMT
2423 g_assert (mono_method_signature (method)->generic_param_count);
2424 emit_imt_argument (cfg, call, imt_arg);
2429 call->inst.sreg1 = slot_reg;
2430 call->virtual = TRUE;
2433 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2435 return (MonoInst*)call;
2439 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2440 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2447 rgctx_reg = mono_alloc_preg (cfg);
2448 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2450 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2452 call = (MonoCallInst*)ins;
2454 set_rgctx_arg (cfg, call, rgctx_reg, vtable_arg);
2460 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2462 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2466 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2473 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE);
2476 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2478 return (MonoInst*)call;
2482 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2484 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2488 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2492 * mono_emit_abs_call:
2494 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2496 inline static MonoInst*
2497 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2498 MonoMethodSignature *sig, MonoInst **args)
2500 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2504 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2507 if (cfg->abs_patches == NULL)
2508 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2509 g_hash_table_insert (cfg->abs_patches, ji, ji);
2510 ins = mono_emit_native_call (cfg, ji, sig, args);
2511 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2516 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2518 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2519 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2523 * Native code might return non register sized integers
2524 * without initializing the upper bits.
2526 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2527 case OP_LOADI1_MEMBASE:
2528 widen_op = OP_ICONV_TO_I1;
2530 case OP_LOADU1_MEMBASE:
2531 widen_op = OP_ICONV_TO_U1;
2533 case OP_LOADI2_MEMBASE:
2534 widen_op = OP_ICONV_TO_I2;
2536 case OP_LOADU2_MEMBASE:
2537 widen_op = OP_ICONV_TO_U2;
2543 if (widen_op != -1) {
2544 int dreg = alloc_preg (cfg);
2547 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2548 widen->type = ins->type;
2558 get_memcpy_method (void)
2560 static MonoMethod *memcpy_method = NULL;
2561 if (!memcpy_method) {
2562 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2564 g_error ("Old corlib found. Install a new one");
2566 return memcpy_method;
2570 create_write_barrier_bitmap (MonoClass *klass, unsigned *wb_bitmap, int offset)
2572 MonoClassField *field;
2573 gpointer iter = NULL;
2575 while ((field = mono_class_get_fields (klass, &iter))) {
2578 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2580 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2581 if (mono_type_is_reference (field->type)) {
2582 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2583 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2585 /*FIXME support nested value types so this works for: struct X { Y y; int z;} struct Y { object a,b; }*/
2586 MonoClass *field_class = mono_class_from_mono_type (field->type);
2587 if (field_class->has_references)
2588 create_write_barrier_bitmap (field_class, wb_bitmap, offset + foffset);
2594 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
2596 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
2597 unsigned need_wb = 0;
2602 /*types with references can't have alignment smaller than sizeof(void*) */
2603 if (align < SIZEOF_VOID_P)
2606 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
2607 if (size > 32 * SIZEOF_VOID_P)
2610 create_write_barrier_bitmap (klass, &need_wb, 0);
2612 /* We don't unroll more than 5 stores to avoid code bloat. */
2613 if (size > 5 * SIZEOF_VOID_P) {
2614 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
2615 size += (SIZEOF_VOID_P - 1);
2616 size &= ~(SIZEOF_VOID_P - 1);
2618 EMIT_NEW_ICONST (cfg, iargs [2], size);
2619 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
2620 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
2624 destreg = iargs [0]->dreg;
2625 srcreg = iargs [1]->dreg;
2628 dest_ptr_reg = alloc_preg (cfg);
2629 tmp_reg = alloc_preg (cfg);
2632 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
2634 while (size >= SIZEOF_VOID_P) {
2635 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, tmp_reg, srcreg, offset);
2636 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
2638 if (need_wb & 0x1) {
2639 MonoInst *dummy_use;
2641 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2642 mono_emit_method_call (cfg, write_barrier, &iargs [0], NULL);
2644 MONO_INST_NEW (cfg, dummy_use, OP_DUMMY_USE);
2645 dummy_use->sreg1 = dest_ptr_reg;
2646 MONO_ADD_INS (cfg->cbb, dummy_use);
2650 offset += SIZEOF_VOID_P;
2651 size -= SIZEOF_VOID_P;
2654 /*tmp += sizeof (void*)*/
2655 if (size >= SIZEOF_VOID_P) {
2656 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
2657 MONO_ADD_INS (cfg->cbb, iargs [0]);
2661 /* Those cannot be references since size < sizeof (void*) */
2663 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
2664 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
2670 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
2671 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
2677 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
2678 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
2687 * Emit code to copy a valuetype of type @klass whose address is stored in
2688 * @src->dreg to memory whose address is stored at @dest->dreg.
2691 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2693 MonoInst *iargs [4];
2696 MonoMethod *memcpy_method;
2700 * This check breaks with spilled vars... need to handle it during verification anyway.
2701 * g_assert (klass && klass == src->klass && klass == dest->klass);
2705 n = mono_class_native_size (klass, &align);
2707 n = mono_class_value_size (klass, &align);
2709 /* if native is true there should be no references in the struct */
2710 if (cfg->gen_write_barriers && klass->has_references && !native) {
2711 /* Avoid barriers when storing to the stack */
2712 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2713 (dest->opcode == OP_LDADDR))) {
2714 int context_used = 0;
2719 if (cfg->generic_sharing_context)
2720 context_used = mono_class_check_context_used (klass);
2722 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
2723 if ((cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
2725 } else if (context_used) {
2726 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2728 if (cfg->compile_aot) {
2729 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2731 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2732 mono_class_compute_gc_descriptor (klass);
2736 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2741 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2742 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2743 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2747 EMIT_NEW_ICONST (cfg, iargs [2], n);
2749 memcpy_method = get_memcpy_method ();
2750 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2755 get_memset_method (void)
2757 static MonoMethod *memset_method = NULL;
2758 if (!memset_method) {
2759 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2761 g_error ("Old corlib found. Install a new one");
2763 return memset_method;
2767 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2769 MonoInst *iargs [3];
2772 MonoMethod *memset_method;
2774 /* FIXME: Optimize this for the case when dest is an LDADDR */
2776 mono_class_init (klass);
2777 n = mono_class_value_size (klass, &align);
2779 if (n <= sizeof (gpointer) * 5) {
2780 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2783 memset_method = get_memset_method ();
2785 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2786 EMIT_NEW_ICONST (cfg, iargs [2], n);
2787 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2792 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2794 MonoInst *this = NULL;
2796 g_assert (cfg->generic_sharing_context);
2798 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2799 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2800 !method->klass->valuetype)
2801 EMIT_NEW_ARGLOAD (cfg, this, 0);
2803 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2804 MonoInst *mrgctx_loc, *mrgctx_var;
2807 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2809 mrgctx_loc = mono_get_vtable_var (cfg);
2810 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2813 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2814 MonoInst *vtable_loc, *vtable_var;
2818 vtable_loc = mono_get_vtable_var (cfg);
2819 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2821 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2822 MonoInst *mrgctx_var = vtable_var;
2825 vtable_reg = alloc_preg (cfg);
2826 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2827 vtable_var->type = STACK_PTR;
2833 int vtable_reg, res_reg;
2835 vtable_reg = alloc_preg (cfg);
2836 res_reg = alloc_preg (cfg);
2837 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2842 static MonoJumpInfoRgctxEntry *
2843 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2845 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2846 res->method = method;
2847 res->in_mrgctx = in_mrgctx;
2848 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2849 res->data->type = patch_type;
2850 res->data->data.target = patch_data;
2851 res->info_type = info_type;
2856 static inline MonoInst*
2857 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2859 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2863 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2864 MonoClass *klass, int rgctx_type)
2866 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2867 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2869 return emit_rgctx_fetch (cfg, rgctx, entry);
2873 * emit_get_rgctx_method:
2875 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2876 * normal constants, else emit a load from the rgctx.
2879 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2880 MonoMethod *cmethod, int rgctx_type)
2882 if (!context_used) {
2885 switch (rgctx_type) {
2886 case MONO_RGCTX_INFO_METHOD:
2887 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
2889 case MONO_RGCTX_INFO_METHOD_RGCTX:
2890 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
2893 g_assert_not_reached ();
2896 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2897 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2899 return emit_rgctx_fetch (cfg, rgctx, entry);
2904 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2905 MonoClassField *field, int rgctx_type)
2907 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2908 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2910 return emit_rgctx_fetch (cfg, rgctx, entry);
2914 * On return the caller must check @klass for load errors.
2917 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
2919 MonoInst *vtable_arg;
2921 int context_used = 0;
2923 if (cfg->generic_sharing_context)
2924 context_used = mono_class_check_context_used (klass);
2927 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
2928 klass, MONO_RGCTX_INFO_VTABLE);
2930 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2934 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
2937 if (COMPILE_LLVM (cfg))
2938 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
2940 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
2941 #ifdef MONO_ARCH_VTABLE_REG
2942 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
2943 cfg->uses_vtable_reg = TRUE;
2950 * On return the caller must check @array_class for load errors
2953 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
2955 int vtable_reg = alloc_preg (cfg);
2956 int context_used = 0;
2958 if (cfg->generic_sharing_context)
2959 context_used = mono_class_check_context_used (array_class);
2961 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2963 if (cfg->opt & MONO_OPT_SHARED) {
2964 int class_reg = alloc_preg (cfg);
2965 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2966 if (cfg->compile_aot) {
2967 int klass_reg = alloc_preg (cfg);
2968 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
2969 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
2971 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
2973 } else if (context_used) {
2974 MonoInst *vtable_ins;
2976 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
2977 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
2979 if (cfg->compile_aot) {
2983 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2985 vt_reg = alloc_preg (cfg);
2986 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
2987 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
2990 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2992 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
2996 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3000 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
3002 if (mini_get_debug_options ()->better_cast_details) {
3003 int to_klass_reg = alloc_preg (cfg);
3004 int vtable_reg = alloc_preg (cfg);
3005 int klass_reg = alloc_preg (cfg);
3006 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3009 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3013 MONO_ADD_INS (cfg->cbb, tls_get);
3014 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3015 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3017 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3018 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3019 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3024 reset_cast_details (MonoCompile *cfg)
3026 /* Reset the variables holding the cast details */
3027 if (mini_get_debug_options ()->better_cast_details) {
3028 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3030 MONO_ADD_INS (cfg->cbb, tls_get);
3031 /* It is enough to reset the from field */
3032 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3037 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3038 * generic code is generated.
3041 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3043 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3046 MonoInst *rgctx, *addr;
3048 /* FIXME: What if the class is shared? We might not
3049 have to get the address of the method from the
3051 addr = emit_get_rgctx_method (cfg, context_used, method,
3052 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3054 rgctx = emit_get_rgctx (cfg, method, context_used);
3056 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3058 return mono_emit_method_call (cfg, method, &val, NULL);
3063 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3067 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3068 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3069 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3070 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3072 obj_reg = sp [0]->dreg;
3073 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3074 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3076 /* FIXME: generics */
3077 g_assert (klass->rank == 0);
3080 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3081 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3083 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3084 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3087 MonoInst *element_class;
3089 /* This assertion is from the unboxcast insn */
3090 g_assert (klass->rank == 0);
3092 element_class = emit_get_rgctx_klass (cfg, context_used,
3093 klass->element_class, MONO_RGCTX_INFO_KLASS);
3095 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3096 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3098 save_cast_details (cfg, klass->element_class, obj_reg);
3099 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3100 reset_cast_details (cfg);
3103 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
3104 MONO_ADD_INS (cfg->cbb, add);
3105 add->type = STACK_MP;
3112 * Returns NULL and set the cfg exception on error.
3115 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3117 MonoInst *iargs [2];
3123 MonoInst *iargs [2];
3126 FIXME: we cannot get managed_alloc here because we can't get
3127 the class's vtable (because it's not a closed class)
3129 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3130 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3133 if (cfg->opt & MONO_OPT_SHARED)
3134 rgctx_info = MONO_RGCTX_INFO_KLASS;
3136 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3137 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3139 if (cfg->opt & MONO_OPT_SHARED) {
3140 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3142 alloc_ftn = mono_object_new;
3145 alloc_ftn = mono_object_new_specific;
3148 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3151 if (cfg->opt & MONO_OPT_SHARED) {
3152 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3153 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3155 alloc_ftn = mono_object_new;
3156 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3157 /* This happens often in argument checking code, eg. throw new FooException... */
3158 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3159 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3160 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3162 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3163 MonoMethod *managed_alloc = NULL;
3167 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3168 cfg->exception_ptr = klass;
3172 #ifndef MONO_CROSS_COMPILE
3173 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3176 if (managed_alloc) {
3177 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3178 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3180 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3182 guint32 lw = vtable->klass->instance_size;
3183 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3184 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3185 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3188 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3192 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3196 * Returns NULL and set the cfg exception on error.
3199 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
3201 MonoInst *alloc, *ins;
3203 if (mono_class_is_nullable (klass)) {
3204 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3207 /* FIXME: What if the class is shared? We might not
3208 have to get the method address from the RGCTX. */
3209 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3210 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3211 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3213 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3215 return mono_emit_method_call (cfg, method, &val, NULL);
3219 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3223 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3228 // FIXME: This doesn't work yet (class libs tests fail?)
3229 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || mono_class_has_variant_generic_params (klass) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3232 * Returns NULL and set the cfg exception on error.
3235 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3237 MonoBasicBlock *is_null_bb;
3238 int obj_reg = src->dreg;
3239 int vtable_reg = alloc_preg (cfg);
3240 MonoInst *klass_inst = NULL;
3245 klass_inst = emit_get_rgctx_klass (cfg, context_used,
3246 klass, MONO_RGCTX_INFO_KLASS);
3248 if (is_complex_isinst (klass)) {
3249 /* Complex case, handle by an icall */
3255 args [1] = klass_inst;
3257 return mono_emit_jit_icall (cfg, mono_object_castclass, args);
3259 /* Simple case, handled by the code below */
3263 NEW_BBLOCK (cfg, is_null_bb);
3265 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3266 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3268 save_cast_details (cfg, klass, obj_reg);
3270 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3271 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3272 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3274 int klass_reg = alloc_preg (cfg);
3276 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3278 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3279 /* the remoting code is broken, access the class for now */
3280 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3281 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3283 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3284 cfg->exception_ptr = klass;
3287 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3289 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3290 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3292 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3294 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3295 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3299 MONO_START_BB (cfg, is_null_bb);
3301 reset_cast_details (cfg);
3307 * Returns NULL and set the cfg exception on error.
3310 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3313 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3314 int obj_reg = src->dreg;
3315 int vtable_reg = alloc_preg (cfg);
3316 int res_reg = alloc_preg (cfg);
3317 MonoInst *klass_inst = NULL;
3320 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3322 if (is_complex_isinst (klass)) {
3325 /* Complex case, handle by an icall */
3331 args [1] = klass_inst;
3333 return mono_emit_jit_icall (cfg, mono_object_isinst, args);
3335 /* Simple case, the code below can handle it */
3339 NEW_BBLOCK (cfg, is_null_bb);
3340 NEW_BBLOCK (cfg, false_bb);
3341 NEW_BBLOCK (cfg, end_bb);
3343 /* Do the assignment at the beginning, so the other assignment can be if converted */
3344 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3345 ins->type = STACK_OBJ;
3348 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3349 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3351 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3353 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3354 g_assert (!context_used);
3355 /* the is_null_bb target simply copies the input register to the output */
3356 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3358 int klass_reg = alloc_preg (cfg);
3361 int rank_reg = alloc_preg (cfg);
3362 int eclass_reg = alloc_preg (cfg);
3364 g_assert (!context_used);
3365 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3366 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3367 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3368 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3369 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3370 if (klass->cast_class == mono_defaults.object_class) {
3371 int parent_reg = alloc_preg (cfg);
3372 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3373 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3374 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3375 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3376 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3377 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3378 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3379 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3380 } else if (klass->cast_class == mono_defaults.enum_class) {
3381 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3382 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3383 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3384 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3386 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3387 /* Check that the object is a vector too */
3388 int bounds_reg = alloc_preg (cfg);
3389 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3390 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3391 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3394 /* the is_null_bb target simply copies the input register to the output */
3395 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3397 } else if (mono_class_is_nullable (klass)) {
3398 g_assert (!context_used);
3399 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3400 /* the is_null_bb target simply copies the input register to the output */
3401 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3403 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3404 g_assert (!context_used);
3405 /* the remoting code is broken, access the class for now */
3406 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3407 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3409 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3410 cfg->exception_ptr = klass;
3413 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3415 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3416 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3418 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3419 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3421 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3422 /* the is_null_bb target simply copies the input register to the output */
3423 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
3428 MONO_START_BB (cfg, false_bb);
3430 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3431 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3433 MONO_START_BB (cfg, is_null_bb);
3435 MONO_START_BB (cfg, end_bb);
3441 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3443 /* This opcode takes as input an object reference and a class, and returns:
3444 0) if the object is an instance of the class,
3445 1) if the object is not instance of the class,
3446 2) if the object is a proxy whose type cannot be determined */
3449 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3450 int obj_reg = src->dreg;
3451 int dreg = alloc_ireg (cfg);
3453 int klass_reg = alloc_preg (cfg);
3455 NEW_BBLOCK (cfg, true_bb);
3456 NEW_BBLOCK (cfg, false_bb);
3457 NEW_BBLOCK (cfg, false2_bb);
3458 NEW_BBLOCK (cfg, end_bb);
3459 NEW_BBLOCK (cfg, no_proxy_bb);
3461 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3462 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3464 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3465 NEW_BBLOCK (cfg, interface_fail_bb);
3467 tmp_reg = alloc_preg (cfg);
3468 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3469 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3470 MONO_START_BB (cfg, interface_fail_bb);
3471 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3473 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3475 tmp_reg = alloc_preg (cfg);
3476 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3477 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3478 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3480 tmp_reg = alloc_preg (cfg);
3481 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3482 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3484 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3485 tmp_reg = alloc_preg (cfg);
3486 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3487 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3489 tmp_reg = alloc_preg (cfg);
3490 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3491 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3492 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3494 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3495 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3497 MONO_START_BB (cfg, no_proxy_bb);
3499 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3502 MONO_START_BB (cfg, false_bb);
3504 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3505 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3507 MONO_START_BB (cfg, false2_bb);
3509 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3510 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3512 MONO_START_BB (cfg, true_bb);
3514 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3516 MONO_START_BB (cfg, end_bb);
3519 MONO_INST_NEW (cfg, ins, OP_ICONST);
3521 ins->type = STACK_I4;
3527 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3529 /* This opcode takes as input an object reference and a class, and returns:
3530 0) if the object is an instance of the class,
3531 1) if the object is a proxy whose type cannot be determined
3532 an InvalidCastException exception is thrown otherwhise*/
3535 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3536 int obj_reg = src->dreg;
3537 int dreg = alloc_ireg (cfg);
3538 int tmp_reg = alloc_preg (cfg);
3539 int klass_reg = alloc_preg (cfg);
3541 NEW_BBLOCK (cfg, end_bb);
3542 NEW_BBLOCK (cfg, ok_result_bb);
3544 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3545 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3547 save_cast_details (cfg, klass, obj_reg);
3549 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3550 NEW_BBLOCK (cfg, interface_fail_bb);
3552 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3553 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3554 MONO_START_BB (cfg, interface_fail_bb);
3555 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3557 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3559 tmp_reg = alloc_preg (cfg);
3560 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3561 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3562 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3564 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3565 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3568 NEW_BBLOCK (cfg, no_proxy_bb);
3570 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3571 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3572 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3574 tmp_reg = alloc_preg (cfg);
3575 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3576 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3578 tmp_reg = alloc_preg (cfg);
3579 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3580 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3581 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3583 NEW_BBLOCK (cfg, fail_1_bb);
3585 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3587 MONO_START_BB (cfg, fail_1_bb);
3589 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3590 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3592 MONO_START_BB (cfg, no_proxy_bb);
3594 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3597 MONO_START_BB (cfg, ok_result_bb);
3599 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3601 MONO_START_BB (cfg, end_bb);
3604 MONO_INST_NEW (cfg, ins, OP_ICONST);
3606 ins->type = STACK_I4;
3612 * Returns NULL and set the cfg exception on error.
3614 static G_GNUC_UNUSED MonoInst*
3615 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3617 gpointer *trampoline;
3618 MonoInst *obj, *method_ins, *tramp_ins;
3622 obj = handle_alloc (cfg, klass, FALSE, 0);
3626 /* Inline the contents of mono_delegate_ctor */
3628 /* Set target field */
3629 /* Optimize away setting of NULL target */
3630 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3631 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3633 /* Set method field */
3634 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3635 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3638 * To avoid looking up the compiled code belonging to the target method
3639 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3640 * store it, and we fill it after the method has been compiled.
3642 if (!cfg->compile_aot && !method->dynamic) {
3643 MonoInst *code_slot_ins;
3646 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3648 domain = mono_domain_get ();
3649 mono_domain_lock (domain);
3650 if (!domain_jit_info (domain)->method_code_hash)
3651 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3652 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3654 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3655 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3657 mono_domain_unlock (domain);
3659 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3661 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3664 /* Set invoke_impl field */
3665 if (cfg->compile_aot) {
3666 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3668 trampoline = mono_create_delegate_trampoline (klass);
3669 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3671 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3673 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3679 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3681 MonoJitICallInfo *info;
3683 /* Need to register the icall so it gets an icall wrapper */
3684 info = mono_get_array_new_va_icall (rank);
3686 cfg->flags |= MONO_CFG_HAS_VARARGS;
3688 /* mono_array_new_va () needs a vararg calling convention */
3689 cfg->disable_llvm = TRUE;
3691 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3692 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3696 mono_emit_load_got_addr (MonoCompile *cfg)
3698 MonoInst *getaddr, *dummy_use;
3700 if (!cfg->got_var || cfg->got_var_allocated)
3703 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3704 getaddr->dreg = cfg->got_var->dreg;
3706 /* Add it to the start of the first bblock */
3707 if (cfg->bb_entry->code) {
3708 getaddr->next = cfg->bb_entry->code;
3709 cfg->bb_entry->code = getaddr;
3712 MONO_ADD_INS (cfg->bb_entry, getaddr);
3714 cfg->got_var_allocated = TRUE;
3717 * Add a dummy use to keep the got_var alive, since real uses might
3718 * only be generated by the back ends.
3719 * Add it to end_bblock, so the variable's lifetime covers the whole
3721 * It would be better to make the usage of the got var explicit in all
3722 * cases when the backend needs it (i.e. calls, throw etc.), so this
3723 * wouldn't be needed.
3725 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3726 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3729 static int inline_limit;
3730 static gboolean inline_limit_inited;
3733 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3735 MonoMethodHeaderSummary header;
3737 #ifdef MONO_ARCH_SOFT_FLOAT
3738 MonoMethodSignature *sig = mono_method_signature (method);
3742 if (cfg->generic_sharing_context)
3745 if (cfg->inline_depth > 10)
3748 #ifdef MONO_ARCH_HAVE_LMF_OPS
3749 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3750 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3751 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3756 if (!mono_method_get_header_summary (method, &header))
3759 /*runtime, icall and pinvoke are checked by summary call*/
3760 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3761 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3762 (method->klass->marshalbyref) ||
3766 /* also consider num_locals? */
3767 /* Do the size check early to avoid creating vtables */
3768 if (!inline_limit_inited) {
3769 if (getenv ("MONO_INLINELIMIT"))
3770 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3772 inline_limit = INLINE_LENGTH_LIMIT;
3773 inline_limit_inited = TRUE;
3775 if (header.code_size >= inline_limit)
3779 * if we can initialize the class of the method right away, we do,
3780 * otherwise we don't allow inlining if the class needs initialization,
3781 * since it would mean inserting a call to mono_runtime_class_init()
3782 * inside the inlined code
3784 if (!(cfg->opt & MONO_OPT_SHARED)) {
3785 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3786 if (cfg->run_cctors && method->klass->has_cctor) {
3787 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3788 if (!method->klass->runtime_info)
3789 /* No vtable created yet */
3791 vtable = mono_class_vtable (cfg->domain, method->klass);
3794 /* This makes so that inline cannot trigger */
3795 /* .cctors: too many apps depend on them */
3796 /* running with a specific order... */
3797 if (! vtable->initialized)
3799 mono_runtime_class_init (vtable);
3801 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3802 if (!method->klass->runtime_info)
3803 /* No vtable created yet */
3805 vtable = mono_class_vtable (cfg->domain, method->klass);
3808 if (!vtable->initialized)
3813 * If we're compiling for shared code
3814 * the cctor will need to be run at aot method load time, for example,
3815 * or at the end of the compilation of the inlining method.
3817 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3822 * CAS - do not inline methods with declarative security
3823 * Note: this has to be before any possible return TRUE;
3825 if (mono_method_has_declsec (method))
3828 #ifdef MONO_ARCH_SOFT_FLOAT
3830 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3832 for (i = 0; i < sig->param_count; ++i)
3833 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3841 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3843 if (vtable->initialized && !cfg->compile_aot)
3846 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3849 if (!mono_class_needs_cctor_run (vtable->klass, method))
3852 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3853 /* The initialization is already done before the method is called */
3860 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
3864 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3866 mono_class_init (klass);
3867 size = mono_class_array_element_size (klass);
3869 mult_reg = alloc_preg (cfg);
3870 array_reg = arr->dreg;
3871 index_reg = index->dreg;
3873 #if SIZEOF_REGISTER == 8
3874 /* The array reg is 64 bits but the index reg is only 32 */
3875 if (COMPILE_LLVM (cfg)) {
3877 index2_reg = index_reg;
3879 index2_reg = alloc_preg (cfg);
3880 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3883 if (index->type == STACK_I8) {
3884 index2_reg = alloc_preg (cfg);
3885 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
3887 index2_reg = index_reg;
3892 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3894 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3895 if (size == 1 || size == 2 || size == 4 || size == 8) {
3896 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3898 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3899 ins->type = STACK_PTR;
3905 add_reg = alloc_preg (cfg);
3907 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3908 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3909 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3910 ins->type = STACK_PTR;
3911 MONO_ADD_INS (cfg->cbb, ins);
3916 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3918 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3920 int bounds_reg = alloc_preg (cfg);
3921 int add_reg = alloc_preg (cfg);
3922 int mult_reg = alloc_preg (cfg);
3923 int mult2_reg = alloc_preg (cfg);
3924 int low1_reg = alloc_preg (cfg);
3925 int low2_reg = alloc_preg (cfg);
3926 int high1_reg = alloc_preg (cfg);
3927 int high2_reg = alloc_preg (cfg);
3928 int realidx1_reg = alloc_preg (cfg);
3929 int realidx2_reg = alloc_preg (cfg);
3930 int sum_reg = alloc_preg (cfg);
3935 mono_class_init (klass);
3936 size = mono_class_array_element_size (klass);
3938 index1 = index_ins1->dreg;
3939 index2 = index_ins2->dreg;
3941 /* range checking */
3942 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3943 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3945 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3946 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3947 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3948 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3949 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3950 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3951 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3953 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3954 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3955 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3956 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3957 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3958 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3959 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3961 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3962 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3963 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3964 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3965 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3967 ins->type = STACK_MP;
3969 MONO_ADD_INS (cfg->cbb, ins);
3976 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3980 MonoMethod *addr_method;
3983 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3986 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
3988 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3989 /* emit_ldelema_2 depends on OP_LMUL */
3990 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3991 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3995 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3996 addr_method = mono_marshal_get_array_address (rank, element_size);
3997 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4002 static MonoBreakPolicy
4003 always_insert_breakpoint (MonoMethod *method)
4005 return MONO_BREAK_POLICY_ALWAYS;
4008 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4011 * mono_set_break_policy:
4012 * policy_callback: the new callback function
4014 * Allow embedders to decide wherther to actually obey breakpoint instructions
4015 * (both break IL instructions and Debugger.Break () method calls), for example
4016 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4017 * untrusted or semi-trusted code.
4019 * @policy_callback will be called every time a break point instruction needs to
4020 * be inserted with the method argument being the method that calls Debugger.Break()
4021 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4022 * if it wants the breakpoint to not be effective in the given method.
4023 * #MONO_BREAK_POLICY_ALWAYS is the default.
4026 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4028 if (policy_callback)
4029 break_policy_func = policy_callback;
4031 break_policy_func = always_insert_breakpoint;
4035 should_insert_brekpoint (MonoMethod *method) {
4036 switch (break_policy_func (method)) {
4037 case MONO_BREAK_POLICY_ALWAYS:
4039 case MONO_BREAK_POLICY_NEVER:
4041 case MONO_BREAK_POLICY_ON_DBG:
4042 return mono_debug_using_mono_debugger ();
4044 g_warning ("Incorrect value returned from break policy callback");
4049 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4051 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4053 MonoInst *addr, *store, *load;
4054 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4056 /* the bounds check is already done by the callers */
4057 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4059 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4060 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4062 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4063 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4069 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4071 MonoInst *ins = NULL;
4072 #ifdef MONO_ARCH_SIMD_INTRINSICS
4073 if (cfg->opt & MONO_OPT_SIMD) {
4074 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4084 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4086 MonoInst *ins = NULL;
4088 static MonoClass *runtime_helpers_class = NULL;
4089 if (! runtime_helpers_class)
4090 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
4091 "System.Runtime.CompilerServices", "RuntimeHelpers");
4093 if (cmethod->klass == mono_defaults.string_class) {
4094 if (strcmp (cmethod->name, "get_Chars") == 0) {
4095 int dreg = alloc_ireg (cfg);
4096 int index_reg = alloc_preg (cfg);
4097 int mult_reg = alloc_preg (cfg);
4098 int add_reg = alloc_preg (cfg);
4100 #if SIZEOF_REGISTER == 8
4101 /* The array reg is 64 bits but the index reg is only 32 */
4102 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
4104 index_reg = args [1]->dreg;
4106 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
4108 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4109 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
4110 add_reg = ins->dreg;
4111 /* Avoid a warning */
4113 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4116 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
4117 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4118 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4119 add_reg, G_STRUCT_OFFSET (MonoString, chars));
4121 type_from_op (ins, NULL, NULL);
4123 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4124 int dreg = alloc_ireg (cfg);
4125 /* Decompose later to allow more optimizations */
4126 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
4127 ins->type = STACK_I4;
4128 ins->flags |= MONO_INST_FAULT;
4129 cfg->cbb->has_array_access = TRUE;
4130 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
4133 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
4134 int mult_reg = alloc_preg (cfg);
4135 int add_reg = alloc_preg (cfg);
4137 /* The corlib functions check for oob already. */
4138 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
4139 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4140 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
4141 return cfg->cbb->last_ins;
4144 } else if (cmethod->klass == mono_defaults.object_class) {
4146 if (strcmp (cmethod->name, "GetType") == 0) {
4147 int dreg = alloc_preg (cfg);
4148 int vt_reg = alloc_preg (cfg);
4149 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4150 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
4151 type_from_op (ins, NULL, NULL);
4154 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
4155 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
4156 int dreg = alloc_ireg (cfg);
4157 int t1 = alloc_ireg (cfg);
4159 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
4160 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
4161 ins->type = STACK_I4;
4165 } else if (strcmp (cmethod->name, ".ctor") == 0) {
4166 MONO_INST_NEW (cfg, ins, OP_NOP);
4167 MONO_ADD_INS (cfg->cbb, ins);
4171 } else if (cmethod->klass == mono_defaults.array_class) {
4172 if (strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
4173 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
4175 #ifndef MONO_BIG_ARRAYS
4177 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
4180 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
4181 int dreg = alloc_ireg (cfg);
4182 int bounds_reg = alloc_ireg (cfg);
4183 MonoBasicBlock *end_bb, *szarray_bb;
4184 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
4186 NEW_BBLOCK (cfg, end_bb);
4187 NEW_BBLOCK (cfg, szarray_bb);
4189 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
4190 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4191 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4192 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
4193 /* Non-szarray case */
4195 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4196 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4198 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4199 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4200 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4201 MONO_START_BB (cfg, szarray_bb);
4204 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4205 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4207 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4208 MONO_START_BB (cfg, end_bb);
4210 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
4211 ins->type = STACK_I4;
4217 if (cmethod->name [0] != 'g')
4220 if (strcmp (cmethod->name, "get_Rank") == 0) {
4221 int dreg = alloc_ireg (cfg);
4222 int vtable_reg = alloc_preg (cfg);
4223 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
4224 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4225 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
4226 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4227 type_from_op (ins, NULL, NULL);
4230 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4231 int dreg = alloc_ireg (cfg);
4233 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4234 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4235 type_from_op (ins, NULL, NULL);
4240 } else if (cmethod->klass == runtime_helpers_class) {
4242 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
4243 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
4247 } else if (cmethod->klass == mono_defaults.thread_class) {
4248 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
4249 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
4250 MONO_ADD_INS (cfg->cbb, ins);
4252 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
4253 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4254 MONO_ADD_INS (cfg->cbb, ins);
4257 } else if (cmethod->klass == mono_defaults.monitor_class) {
4258 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4259 /* The trampolines don't work under SGEN */
4260 gboolean is_moving_gc = mono_gc_is_moving ();
4262 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1 && !is_moving_gc) {
4265 if (COMPILE_LLVM (cfg)) {
4267 * Pass the argument normally, the LLVM backend will handle the
4268 * calling convention problems.
4270 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4272 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
4273 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4274 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4275 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4278 return (MonoInst*)call;
4279 } else if (strcmp (cmethod->name, "Exit") == 0 && !is_moving_gc) {
4282 if (COMPILE_LLVM (cfg)) {
4283 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4285 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
4286 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4287 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4288 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4291 return (MonoInst*)call;
4293 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4294 MonoMethod *fast_method = NULL;
4296 /* Avoid infinite recursion */
4297 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
4298 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
4299 strcmp (cfg->method->name, "FastMonitorExit") == 0))
4302 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) ||
4303 strcmp (cmethod->name, "Exit") == 0)
4304 fast_method = mono_monitor_get_fast_path (cmethod);
4308 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4310 } else if (cmethod->klass->image == mono_defaults.corlib &&
4311 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4312 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4315 #if SIZEOF_REGISTER == 8
4316 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
4317 /* 64 bit reads are already atomic */
4318 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
4319 ins->dreg = mono_alloc_preg (cfg);
4320 ins->inst_basereg = args [0]->dreg;
4321 ins->inst_offset = 0;
4322 MONO_ADD_INS (cfg->cbb, ins);
4326 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4327 if (strcmp (cmethod->name, "Increment") == 0) {
4328 MonoInst *ins_iconst;
4331 if (fsig->params [0]->type == MONO_TYPE_I4)
4332 opcode = OP_ATOMIC_ADD_NEW_I4;
4333 #if SIZEOF_REGISTER == 8
4334 else if (fsig->params [0]->type == MONO_TYPE_I8)
4335 opcode = OP_ATOMIC_ADD_NEW_I8;
4338 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4339 ins_iconst->inst_c0 = 1;
4340 ins_iconst->dreg = mono_alloc_ireg (cfg);
4341 MONO_ADD_INS (cfg->cbb, ins_iconst);
4343 MONO_INST_NEW (cfg, ins, opcode);
4344 ins->dreg = mono_alloc_ireg (cfg);
4345 ins->inst_basereg = args [0]->dreg;
4346 ins->inst_offset = 0;
4347 ins->sreg2 = ins_iconst->dreg;
4348 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4349 MONO_ADD_INS (cfg->cbb, ins);
4351 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4352 MonoInst *ins_iconst;
4355 if (fsig->params [0]->type == MONO_TYPE_I4)
4356 opcode = OP_ATOMIC_ADD_NEW_I4;
4357 #if SIZEOF_REGISTER == 8
4358 else if (fsig->params [0]->type == MONO_TYPE_I8)
4359 opcode = OP_ATOMIC_ADD_NEW_I8;
4362 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4363 ins_iconst->inst_c0 = -1;
4364 ins_iconst->dreg = mono_alloc_ireg (cfg);
4365 MONO_ADD_INS (cfg->cbb, ins_iconst);
4367 MONO_INST_NEW (cfg, ins, opcode);
4368 ins->dreg = mono_alloc_ireg (cfg);
4369 ins->inst_basereg = args [0]->dreg;
4370 ins->inst_offset = 0;
4371 ins->sreg2 = ins_iconst->dreg;
4372 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4373 MONO_ADD_INS (cfg->cbb, ins);
4375 } else if (strcmp (cmethod->name, "Add") == 0) {
4378 if (fsig->params [0]->type == MONO_TYPE_I4)
4379 opcode = OP_ATOMIC_ADD_NEW_I4;
4380 #if SIZEOF_REGISTER == 8
4381 else if (fsig->params [0]->type == MONO_TYPE_I8)
4382 opcode = OP_ATOMIC_ADD_NEW_I8;
4386 MONO_INST_NEW (cfg, ins, opcode);
4387 ins->dreg = mono_alloc_ireg (cfg);
4388 ins->inst_basereg = args [0]->dreg;
4389 ins->inst_offset = 0;
4390 ins->sreg2 = args [1]->dreg;
4391 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4392 MONO_ADD_INS (cfg->cbb, ins);
4395 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4397 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4398 if (strcmp (cmethod->name, "Exchange") == 0) {
4400 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4402 if (fsig->params [0]->type == MONO_TYPE_I4)
4403 opcode = OP_ATOMIC_EXCHANGE_I4;
4404 #if SIZEOF_REGISTER == 8
4405 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4406 (fsig->params [0]->type == MONO_TYPE_I))
4407 opcode = OP_ATOMIC_EXCHANGE_I8;
4409 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4410 opcode = OP_ATOMIC_EXCHANGE_I4;
4415 MONO_INST_NEW (cfg, ins, opcode);
4416 ins->dreg = mono_alloc_ireg (cfg);
4417 ins->inst_basereg = args [0]->dreg;
4418 ins->inst_offset = 0;
4419 ins->sreg2 = args [1]->dreg;
4420 MONO_ADD_INS (cfg->cbb, ins);
4422 switch (fsig->params [0]->type) {
4424 ins->type = STACK_I4;
4428 ins->type = STACK_I8;
4430 case MONO_TYPE_OBJECT:
4431 ins->type = STACK_OBJ;
4434 g_assert_not_reached ();
4437 if (cfg->gen_write_barriers && is_ref) {
4438 MonoInst *dummy_use;
4439 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4440 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4441 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [1]);
4444 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4446 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4447 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4449 gboolean is_ref = MONO_TYPE_IS_REFERENCE (fsig->params [1]);
4450 if (fsig->params [1]->type == MONO_TYPE_I4)
4452 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4453 size = sizeof (gpointer);
4454 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
4457 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4458 ins->dreg = alloc_ireg (cfg);
4459 ins->sreg1 = args [0]->dreg;
4460 ins->sreg2 = args [1]->dreg;
4461 ins->sreg3 = args [2]->dreg;
4462 ins->type = STACK_I4;
4463 MONO_ADD_INS (cfg->cbb, ins);
4464 } else if (size == 8) {
4465 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4466 ins->dreg = alloc_ireg (cfg);
4467 ins->sreg1 = args [0]->dreg;
4468 ins->sreg2 = args [1]->dreg;
4469 ins->sreg3 = args [2]->dreg;
4470 ins->type = STACK_I8;
4471 MONO_ADD_INS (cfg->cbb, ins);
4473 /* g_assert_not_reached (); */
4475 if (cfg->gen_write_barriers && is_ref) {
4476 MonoInst *dummy_use;
4477 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4478 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4479 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [1]);
4482 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4486 } else if (cmethod->klass->image == mono_defaults.corlib) {
4487 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4488 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4489 if (should_insert_brekpoint (cfg->method))
4490 MONO_INST_NEW (cfg, ins, OP_BREAK);
4492 MONO_INST_NEW (cfg, ins, OP_NOP);
4493 MONO_ADD_INS (cfg->cbb, ins);
4496 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4497 && strcmp (cmethod->klass->name, "Environment") == 0) {
4499 EMIT_NEW_ICONST (cfg, ins, 1);
4501 EMIT_NEW_ICONST (cfg, ins, 0);
4505 } else if (cmethod->klass == mono_defaults.math_class) {
4507 * There is general branches code for Min/Max, but it does not work for
4509 * http://everything2.com/?node_id=1051618
4513 #ifdef MONO_ARCH_SIMD_INTRINSICS
4514 if (cfg->opt & MONO_OPT_SIMD) {
4515 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4521 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4525 * This entry point could be used later for arbitrary method
4528 inline static MonoInst*
4529 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4530 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4532 if (method->klass == mono_defaults.string_class) {
4533 /* managed string allocation support */
4534 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS)) {
4535 MonoInst *iargs [2];
4536 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4537 MonoMethod *managed_alloc = NULL;
4539 g_assert (vtable); /*Should not fail since it System.String*/
4540 #ifndef MONO_CROSS_COMPILE
4541 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4545 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4546 iargs [1] = args [0];
4547 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4554 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4556 MonoInst *store, *temp;
4559 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4560 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4563 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4564 * would be different than the MonoInst's used to represent arguments, and
4565 * the ldelema implementation can't deal with that.
4566 * Solution: When ldelema is used on an inline argument, create a var for
4567 * it, emit ldelema on that var, and emit the saving code below in
4568 * inline_method () if needed.
4570 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4571 cfg->args [i] = temp;
4572 /* This uses cfg->args [i] which is set by the preceeding line */
4573 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4574 store->cil_code = sp [0]->cil_code;
4579 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4580 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4582 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4584 check_inline_called_method_name_limit (MonoMethod *called_method)
4587 static char *limit = NULL;
4589 if (limit == NULL) {
4590 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4592 if (limit_string != NULL)
4593 limit = limit_string;
4595 limit = (char *) "";
4598 if (limit [0] != '\0') {
4599 char *called_method_name = mono_method_full_name (called_method, TRUE);
4601 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4602 g_free (called_method_name);
4604 //return (strncmp_result <= 0);
4605 return (strncmp_result == 0);
4612 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4614 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4617 static char *limit = NULL;
4619 if (limit == NULL) {
4620 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4621 if (limit_string != NULL) {
4622 limit = limit_string;
4624 limit = (char *) "";
4628 if (limit [0] != '\0') {
4629 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4631 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4632 g_free (caller_method_name);
4634 //return (strncmp_result <= 0);
4635 return (strncmp_result == 0);
4643 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4644 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4646 MonoInst *ins, *rvar = NULL;
4647 MonoMethodHeader *cheader;
4648 MonoBasicBlock *ebblock, *sbblock;
4650 MonoMethod *prev_inlined_method;
4651 MonoInst **prev_locals, **prev_args;
4652 MonoType **prev_arg_types;
4653 guint prev_real_offset;
4654 GHashTable *prev_cbb_hash;
4655 MonoBasicBlock **prev_cil_offset_to_bb;
4656 MonoBasicBlock *prev_cbb;
4657 unsigned char* prev_cil_start;
4658 guint32 prev_cil_offset_to_bb_len;
4659 MonoMethod *prev_current_method;
4660 MonoGenericContext *prev_generic_context;
4661 gboolean ret_var_set, prev_ret_var_set;
4663 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4665 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4666 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4669 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4670 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4674 if (cfg->verbose_level > 2)
4675 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4677 if (!cmethod->inline_info) {
4678 mono_jit_stats.inlineable_methods++;
4679 cmethod->inline_info = 1;
4682 /* allocate local variables */
4683 cheader = mono_method_get_header (cmethod);
4685 if (cheader == NULL || mono_loader_get_last_error ()) {
4687 mono_metadata_free_mh (cheader);
4688 mono_loader_clear_error ();
4692 /* allocate space to store the return value */
4693 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4694 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4698 prev_locals = cfg->locals;
4699 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4700 for (i = 0; i < cheader->num_locals; ++i)
4701 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4703 /* allocate start and end blocks */
4704 /* This is needed so if the inline is aborted, we can clean up */
4705 NEW_BBLOCK (cfg, sbblock);
4706 sbblock->real_offset = real_offset;
4708 NEW_BBLOCK (cfg, ebblock);
4709 ebblock->block_num = cfg->num_bblocks++;
4710 ebblock->real_offset = real_offset;
4712 prev_args = cfg->args;
4713 prev_arg_types = cfg->arg_types;
4714 prev_inlined_method = cfg->inlined_method;
4715 cfg->inlined_method = cmethod;
4716 cfg->ret_var_set = FALSE;
4717 cfg->inline_depth ++;
4718 prev_real_offset = cfg->real_offset;
4719 prev_cbb_hash = cfg->cbb_hash;
4720 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4721 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4722 prev_cil_start = cfg->cil_start;
4723 prev_cbb = cfg->cbb;
4724 prev_current_method = cfg->current_method;
4725 prev_generic_context = cfg->generic_context;
4726 prev_ret_var_set = cfg->ret_var_set;
4728 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4730 ret_var_set = cfg->ret_var_set;
4732 cfg->inlined_method = prev_inlined_method;
4733 cfg->real_offset = prev_real_offset;
4734 cfg->cbb_hash = prev_cbb_hash;
4735 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4736 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4737 cfg->cil_start = prev_cil_start;
4738 cfg->locals = prev_locals;
4739 cfg->args = prev_args;
4740 cfg->arg_types = prev_arg_types;
4741 cfg->current_method = prev_current_method;
4742 cfg->generic_context = prev_generic_context;
4743 cfg->ret_var_set = prev_ret_var_set;
4744 cfg->inline_depth --;
4746 if ((costs >= 0 && costs < 60) || inline_allways) {
4747 if (cfg->verbose_level > 2)
4748 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4750 mono_jit_stats.inlined_methods++;
4752 /* always add some code to avoid block split failures */
4753 MONO_INST_NEW (cfg, ins, OP_NOP);
4754 MONO_ADD_INS (prev_cbb, ins);
4756 prev_cbb->next_bb = sbblock;
4757 link_bblock (cfg, prev_cbb, sbblock);
4760 * Get rid of the begin and end bblocks if possible to aid local
4763 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4765 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4766 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4768 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4769 MonoBasicBlock *prev = ebblock->in_bb [0];
4770 mono_merge_basic_blocks (cfg, prev, ebblock);
4772 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4773 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4774 cfg->cbb = prev_cbb;
4782 * If the inlined method contains only a throw, then the ret var is not
4783 * set, so set it to a dummy value.
4786 static double r8_0 = 0.0;
4788 switch (rvar->type) {
4790 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4793 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4798 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4801 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4802 ins->type = STACK_R8;
4803 ins->inst_p0 = (void*)&r8_0;
4804 ins->dreg = rvar->dreg;
4805 MONO_ADD_INS (cfg->cbb, ins);
4808 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4811 g_assert_not_reached ();
4815 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4818 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4821 if (cfg->verbose_level > 2)
4822 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4823 cfg->exception_type = MONO_EXCEPTION_NONE;
4824 mono_loader_clear_error ();
4826 /* This gets rid of the newly added bblocks */
4827 cfg->cbb = prev_cbb;
4829 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4834 * Some of these comments may well be out-of-date.
4835 * Design decisions: we do a single pass over the IL code (and we do bblock
4836 * splitting/merging in the few cases when it's required: a back jump to an IL
4837 * address that was not already seen as bblock starting point).
4838 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4839 * Complex operations are decomposed in simpler ones right away. We need to let the
4840 * arch-specific code peek and poke inside this process somehow (except when the
4841 * optimizations can take advantage of the full semantic info of coarse opcodes).
4842 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4843 * MonoInst->opcode initially is the IL opcode or some simplification of that
4844 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4845 * opcode with value bigger than OP_LAST.
4846 * At this point the IR can be handed over to an interpreter, a dumb code generator
4847 * or to the optimizing code generator that will translate it to SSA form.
4849 * Profiling directed optimizations.
4850 * We may compile by default with few or no optimizations and instrument the code
4851 * or the user may indicate what methods to optimize the most either in a config file
4852 * or through repeated runs where the compiler applies offline the optimizations to
4853 * each method and then decides if it was worth it.
4856 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4857 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4858 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4859 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4860 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4861 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4862 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4863 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4865 /* offset from br.s -> br like opcodes */
4866 #define BIG_BRANCH_OFFSET 13
4869 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4871 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4873 return b == NULL || b == bb;
4877 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4879 unsigned char *ip = start;
4880 unsigned char *target;
4883 MonoBasicBlock *bblock;
4884 const MonoOpcode *opcode;
4887 cli_addr = ip - start;
4888 i = mono_opcode_value ((const guint8 **)&ip, end);
4891 opcode = &mono_opcodes [i];
4892 switch (opcode->argument) {
4893 case MonoInlineNone:
4896 case MonoInlineString:
4897 case MonoInlineType:
4898 case MonoInlineField:
4899 case MonoInlineMethod:
4902 case MonoShortInlineR:
4909 case MonoShortInlineVar:
4910 case MonoShortInlineI:
4913 case MonoShortInlineBrTarget:
4914 target = start + cli_addr + 2 + (signed char)ip [1];
4915 GET_BBLOCK (cfg, bblock, target);
4918 GET_BBLOCK (cfg, bblock, ip);
4920 case MonoInlineBrTarget:
4921 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4922 GET_BBLOCK (cfg, bblock, target);
4925 GET_BBLOCK (cfg, bblock, ip);
4927 case MonoInlineSwitch: {
4928 guint32 n = read32 (ip + 1);
4931 cli_addr += 5 + 4 * n;
4932 target = start + cli_addr;
4933 GET_BBLOCK (cfg, bblock, target);
4935 for (j = 0; j < n; ++j) {
4936 target = start + cli_addr + (gint32)read32 (ip);
4937 GET_BBLOCK (cfg, bblock, target);
4947 g_assert_not_reached ();
4950 if (i == CEE_THROW) {
4951 unsigned char *bb_start = ip - 1;
4953 /* Find the start of the bblock containing the throw */
4955 while ((bb_start >= start) && !bblock) {
4956 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4960 bblock->out_of_line = 1;
4969 static inline MonoMethod *
4970 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4974 if (m->wrapper_type != MONO_WRAPPER_NONE)
4975 return mono_method_get_wrapper_data (m, token);
4977 method = mono_get_method_full (m->klass->image, token, klass, context);
4982 static inline MonoMethod *
4983 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4985 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4987 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4993 static inline MonoClass*
4994 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4998 if (method->wrapper_type != MONO_WRAPPER_NONE)
4999 klass = mono_method_get_wrapper_data (method, token);
5001 klass = mono_class_get_full (method->klass->image, token, context);
5003 mono_class_init (klass);
5008 * Returns TRUE if the JIT should abort inlining because "callee"
5009 * is influenced by security attributes.
5012 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
5016 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
5020 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
5021 if (result == MONO_JIT_SECURITY_OK)
5024 if (result == MONO_JIT_LINKDEMAND_ECMA) {
5025 /* Generate code to throw a SecurityException before the actual call/link */
5026 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5029 NEW_ICONST (cfg, args [0], 4);
5030 NEW_METHODCONST (cfg, args [1], caller);
5031 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
5032 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
5033 /* don't hide previous results */
5034 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
5035 cfg->exception_data = result;
5043 throw_exception (void)
5045 static MonoMethod *method = NULL;
5048 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5049 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
5056 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
5058 MonoMethod *thrower = throw_exception ();
5061 EMIT_NEW_PCONST (cfg, args [0], ex);
5062 mono_emit_method_call (cfg, thrower, args, NULL);
5066 * Return the original method is a wrapper is specified. We can only access
5067 * the custom attributes from the original method.
5070 get_original_method (MonoMethod *method)
5072 if (method->wrapper_type == MONO_WRAPPER_NONE)
5075 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
5076 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
5079 /* in other cases we need to find the original method */
5080 return mono_marshal_method_from_wrapper (method);
5084 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
5085 MonoBasicBlock *bblock, unsigned char *ip)
5087 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5088 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
5090 emit_throw_exception (cfg, ex);
5094 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
5095 MonoBasicBlock *bblock, unsigned char *ip)
5097 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5098 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
5100 emit_throw_exception (cfg, ex);
5104 * Check that the IL instructions at ip are the array initialization
5105 * sequence and return the pointer to the data and the size.
5108 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
5111 * newarr[System.Int32]
5113 * ldtoken field valuetype ...
5114 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
5116 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
5117 guint32 token = read32 (ip + 7);
5118 guint32 field_token = read32 (ip + 2);
5119 guint32 field_index = field_token & 0xffffff;
5121 const char *data_ptr;
5123 MonoMethod *cmethod;
5124 MonoClass *dummy_class;
5125 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
5131 *out_field_token = field_token;
5133 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
5136 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
5138 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
5139 case MONO_TYPE_BOOLEAN:
5143 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
5144 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
5145 case MONO_TYPE_CHAR:
5155 return NULL; /* stupid ARM FP swapped format */
5165 if (size > mono_type_size (field->type, &dummy_align))
5168 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
5169 if (!method->klass->image->dynamic) {
5170 field_index = read32 (ip + 2) & 0xffffff;
5171 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
5172 data_ptr = mono_image_rva_map (method->klass->image, rva);
5173 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
5174 /* for aot code we do the lookup on load */
5175 if (aot && data_ptr)
5176 return GUINT_TO_POINTER (rva);
5178 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
5180 data_ptr = mono_field_get_data (field);
5188 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
5190 char *method_fname = mono_method_full_name (method, TRUE);
5192 MonoMethodHeader *header = mono_method_get_header (method);
5194 if (header->code_size == 0)
5195 method_code = g_strdup ("method body is empty.");
5197 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
5198 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
5199 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
5200 g_free (method_fname);
5201 g_free (method_code);
5202 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
5206 set_exception_object (MonoCompile *cfg, MonoException *exception)
5208 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
5209 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
5210 cfg->exception_ptr = exception;
5214 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5218 if (cfg->generic_sharing_context)
5219 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
5221 type = &klass->byval_arg;
5222 return MONO_TYPE_IS_REFERENCE (type);
5226 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5229 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5230 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5231 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5232 /* Optimize reg-reg moves away */
5234 * Can't optimize other opcodes, since sp[0] might point to
5235 * the last ins of a decomposed opcode.
5237 sp [0]->dreg = (cfg)->locals [n]->dreg;
5239 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5244 * ldloca inhibits many optimizations so try to get rid of it in common
5247 static inline unsigned char *
5248 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5257 local = read16 (ip + 2);
5261 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5262 gboolean skip = FALSE;
5264 /* From the INITOBJ case */
5265 token = read32 (ip + 2);
5266 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5267 CHECK_TYPELOAD (klass);
5268 if (generic_class_is_reference_type (cfg, klass)) {
5269 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5270 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5271 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5272 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5273 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5286 is_exception_class (MonoClass *class)
5289 if (class == mono_defaults.exception_class)
5291 class = class->parent;
5297 * mono_method_to_ir:
5299 * Translate the .net IL into linear IR.
5302 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5303 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5304 guint inline_offset, gboolean is_virtual_call)
5307 MonoInst *ins, **sp, **stack_start;
5308 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5309 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
5310 MonoMethod *cmethod, *method_definition;
5311 MonoInst **arg_array;
5312 MonoMethodHeader *header;
5314 guint32 token, ins_flag;
5316 MonoClass *constrained_call = NULL;
5317 unsigned char *ip, *end, *target, *err_pos;
5318 static double r8_0 = 0.0;
5319 MonoMethodSignature *sig;
5320 MonoGenericContext *generic_context = NULL;
5321 MonoGenericContainer *generic_container = NULL;
5322 MonoType **param_types;
5323 int i, n, start_new_bblock, dreg;
5324 int num_calls = 0, inline_costs = 0;
5325 int breakpoint_id = 0;
5327 MonoBoolean security, pinvoke;
5328 MonoSecurityManager* secman = NULL;
5329 MonoDeclSecurityActions actions;
5330 GSList *class_inits = NULL;
5331 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5333 gboolean init_locals, seq_points, skip_dead_blocks;
5335 /* serialization and xdomain stuff may need access to private fields and methods */
5336 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5337 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5338 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5339 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5340 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5341 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5343 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5345 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5346 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5347 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5348 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5350 image = method->klass->image;
5351 header = mono_method_get_header (method);
5353 MonoLoaderError *error;
5355 if ((error = mono_loader_get_last_error ())) {
5356 cfg->exception_type = error->exception_type;
5358 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
5359 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
5361 goto exception_exit;
5363 generic_container = mono_method_get_generic_container (method);
5364 sig = mono_method_signature (method);
5365 num_args = sig->hasthis + sig->param_count;
5366 ip = (unsigned char*)header->code;
5367 cfg->cil_start = ip;
5368 end = ip + header->code_size;
5369 mono_jit_stats.cil_code_size += header->code_size;
5370 init_locals = header->init_locals;
5372 seq_points = cfg->gen_seq_points && cfg->method == method;
5375 * Methods without init_locals set could cause asserts in various passes
5380 method_definition = method;
5381 while (method_definition->is_inflated) {
5382 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5383 method_definition = imethod->declaring;
5386 /* SkipVerification is not allowed if core-clr is enabled */
5387 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5389 dont_verify_stloc = TRUE;
5392 if (!dont_verify && mini_method_verify (cfg, method_definition))
5393 goto exception_exit;
5395 if (mono_debug_using_mono_debugger ())
5396 cfg->keep_cil_nops = TRUE;
5398 if (sig->is_inflated)
5399 generic_context = mono_method_get_context (method);
5400 else if (generic_container)
5401 generic_context = &generic_container->context;
5402 cfg->generic_context = generic_context;
5404 if (!cfg->generic_sharing_context)
5405 g_assert (!sig->has_type_parameters);
5407 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5408 g_assert (method->is_inflated);
5409 g_assert (mono_method_get_context (method)->method_inst);
5411 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5412 g_assert (sig->generic_param_count);
5414 if (cfg->method == method) {
5415 cfg->real_offset = 0;
5417 cfg->real_offset = inline_offset;
5420 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5421 cfg->cil_offset_to_bb_len = header->code_size;
5423 cfg->current_method = method;
5425 if (cfg->verbose_level > 2)
5426 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5428 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5430 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5431 for (n = 0; n < sig->param_count; ++n)
5432 param_types [n + sig->hasthis] = sig->params [n];
5433 cfg->arg_types = param_types;
5435 dont_inline = g_list_prepend (dont_inline, method);
5436 if (cfg->method == method) {
5438 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5439 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5442 NEW_BBLOCK (cfg, start_bblock);
5443 cfg->bb_entry = start_bblock;
5444 start_bblock->cil_code = NULL;
5445 start_bblock->cil_length = 0;
5448 NEW_BBLOCK (cfg, end_bblock);
5449 cfg->bb_exit = end_bblock;
5450 end_bblock->cil_code = NULL;
5451 end_bblock->cil_length = 0;
5452 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
5453 g_assert (cfg->num_bblocks == 2);
5455 arg_array = cfg->args;
5457 if (header->num_clauses) {
5458 cfg->spvars = g_hash_table_new (NULL, NULL);
5459 cfg->exvars = g_hash_table_new (NULL, NULL);
5461 /* handle exception clauses */
5462 for (i = 0; i < header->num_clauses; ++i) {
5463 MonoBasicBlock *try_bb;
5464 MonoExceptionClause *clause = &header->clauses [i];
5465 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5466 try_bb->real_offset = clause->try_offset;
5467 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5468 tblock->real_offset = clause->handler_offset;
5469 tblock->flags |= BB_EXCEPTION_HANDLER;
5471 link_bblock (cfg, try_bb, tblock);
5473 if (*(ip + clause->handler_offset) == CEE_POP)
5474 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5476 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5477 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5478 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5479 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5480 MONO_ADD_INS (tblock, ins);
5482 /* todo: is a fault block unsafe to optimize? */
5483 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5484 tblock->flags |= BB_EXCEPTION_UNSAFE;
5488 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5490 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5492 /* catch and filter blocks get the exception object on the stack */
5493 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5494 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5495 MonoInst *dummy_use;
5497 /* mostly like handle_stack_args (), but just sets the input args */
5498 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5499 tblock->in_scount = 1;
5500 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5501 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5504 * Add a dummy use for the exvar so its liveness info will be
5508 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5510 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5511 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5512 tblock->flags |= BB_EXCEPTION_HANDLER;
5513 tblock->real_offset = clause->data.filter_offset;
5514 tblock->in_scount = 1;
5515 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5516 /* The filter block shares the exvar with the handler block */
5517 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5518 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5519 MONO_ADD_INS (tblock, ins);
5523 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5524 clause->data.catch_class &&
5525 cfg->generic_sharing_context &&
5526 mono_class_check_context_used (clause->data.catch_class)) {
5528 * In shared generic code with catch
5529 * clauses containing type variables
5530 * the exception handling code has to
5531 * be able to get to the rgctx.
5532 * Therefore we have to make sure that
5533 * the vtable/mrgctx argument (for
5534 * static or generic methods) or the
5535 * "this" argument (for non-static
5536 * methods) are live.
5538 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5539 mini_method_get_context (method)->method_inst ||
5540 method->klass->valuetype) {
5541 mono_get_vtable_var (cfg);
5543 MonoInst *dummy_use;
5545 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5550 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5551 cfg->cbb = start_bblock;
5552 cfg->args = arg_array;
5553 mono_save_args (cfg, sig, inline_args);
5556 /* FIRST CODE BLOCK */
5557 NEW_BBLOCK (cfg, bblock);
5558 bblock->cil_code = ip;
5562 ADD_BBLOCK (cfg, bblock);
5564 if (cfg->method == method) {
5565 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5566 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5567 MONO_INST_NEW (cfg, ins, OP_BREAK);
5568 MONO_ADD_INS (bblock, ins);
5572 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5573 secman = mono_security_manager_get_methods ();
5575 security = (secman && mono_method_has_declsec (method));
5576 /* at this point having security doesn't mean we have any code to generate */
5577 if (security && (cfg->method == method)) {
5578 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5579 * And we do not want to enter the next section (with allocation) if we
5580 * have nothing to generate */
5581 security = mono_declsec_get_demands (method, &actions);
5584 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5585 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5587 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5588 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5589 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5591 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5592 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5596 mono_custom_attrs_free (custom);
5599 custom = mono_custom_attrs_from_class (wrapped->klass);
5600 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5604 mono_custom_attrs_free (custom);
5607 /* not a P/Invoke after all */
5612 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5613 /* we use a separate basic block for the initialization code */
5614 NEW_BBLOCK (cfg, init_localsbb);
5615 cfg->bb_init = init_localsbb;
5616 init_localsbb->real_offset = cfg->real_offset;
5617 start_bblock->next_bb = init_localsbb;
5618 init_localsbb->next_bb = bblock;
5619 link_bblock (cfg, start_bblock, init_localsbb);
5620 link_bblock (cfg, init_localsbb, bblock);
5622 cfg->cbb = init_localsbb;
5624 start_bblock->next_bb = bblock;
5625 link_bblock (cfg, start_bblock, bblock);
5628 /* at this point we know, if security is TRUE, that some code needs to be generated */
5629 if (security && (cfg->method == method)) {
5632 mono_jit_stats.cas_demand_generation++;
5634 if (actions.demand.blob) {
5635 /* Add code for SecurityAction.Demand */
5636 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5637 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5638 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5639 mono_emit_method_call (cfg, secman->demand, args, NULL);
5641 if (actions.noncasdemand.blob) {
5642 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5643 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5644 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5645 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5646 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5647 mono_emit_method_call (cfg, secman->demand, args, NULL);
5649 if (actions.demandchoice.blob) {
5650 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5651 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5652 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5653 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5654 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5658 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5660 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5663 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5664 /* check if this is native code, e.g. an icall or a p/invoke */
5665 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5666 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5668 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
5669 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
5671 /* if this ia a native call then it can only be JITted from platform code */
5672 if ((icall || pinvk) && method->klass && method->klass->image) {
5673 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
5674 MonoException *ex = icall ? mono_get_exception_security () :
5675 mono_get_exception_method_access ();
5676 emit_throw_exception (cfg, ex);
5683 if (header->code_size == 0)
5686 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5691 if (cfg->method == method)
5692 mono_debug_init_method (cfg, bblock, breakpoint_id);
5694 for (n = 0; n < header->num_locals; ++n) {
5695 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5700 /* We force the vtable variable here for all shared methods
5701 for the possibility that they might show up in a stack
5702 trace where their exact instantiation is needed. */
5703 if (cfg->generic_sharing_context && method == cfg->method) {
5704 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5705 mini_method_get_context (method)->method_inst ||
5706 method->klass->valuetype) {
5707 mono_get_vtable_var (cfg);
5709 /* FIXME: Is there a better way to do this?
5710 We need the variable live for the duration
5711 of the whole method. */
5712 cfg->args [0]->flags |= MONO_INST_INDIRECT;
5716 /* add a check for this != NULL to inlined methods */
5717 if (is_virtual_call) {
5720 NEW_ARGLOAD (cfg, arg_ins, 0);
5721 MONO_ADD_INS (cfg->cbb, arg_ins);
5722 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
5725 skip_dead_blocks = !dont_verify;
5726 if (skip_dead_blocks) {
5727 original_bb = bb = mono_basic_block_split (method, &error);
5728 if (!mono_error_ok (&error)) {
5729 mono_error_cleanup (&error);
5735 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5736 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5739 start_new_bblock = 0;
5742 if (cfg->method == method)
5743 cfg->real_offset = ip - header->code;
5745 cfg->real_offset = inline_offset;
5750 if (start_new_bblock) {
5751 bblock->cil_length = ip - bblock->cil_code;
5752 if (start_new_bblock == 2) {
5753 g_assert (ip == tblock->cil_code);
5755 GET_BBLOCK (cfg, tblock, ip);
5757 bblock->next_bb = tblock;
5760 start_new_bblock = 0;
5761 for (i = 0; i < bblock->in_scount; ++i) {
5762 if (cfg->verbose_level > 3)
5763 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5764 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5768 g_slist_free (class_inits);
5771 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5772 link_bblock (cfg, bblock, tblock);
5773 if (sp != stack_start) {
5774 handle_stack_args (cfg, stack_start, sp - stack_start);
5776 CHECK_UNVERIFIABLE (cfg);
5778 bblock->next_bb = tblock;
5781 for (i = 0; i < bblock->in_scount; ++i) {
5782 if (cfg->verbose_level > 3)
5783 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5784 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5787 g_slist_free (class_inits);
5792 if (skip_dead_blocks) {
5793 int ip_offset = ip - header->code;
5795 if (ip_offset == bb->end)
5799 int op_size = mono_opcode_size (ip, end);
5800 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
5802 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
5804 if (ip_offset + op_size == bb->end) {
5805 MONO_INST_NEW (cfg, ins, OP_NOP);
5806 MONO_ADD_INS (bblock, ins);
5807 start_new_bblock = 1;
5815 * Sequence points are points where the debugger can place a breakpoint.
5816 * Currently, we generate these automatically at points where the IL
5819 if (seq_points && sp == stack_start) {
5820 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
5821 MONO_ADD_INS (cfg->cbb, ins);
5824 bblock->real_offset = cfg->real_offset;
5826 if ((cfg->method == method) && cfg->coverage_info) {
5827 guint32 cil_offset = ip - header->code;
5828 cfg->coverage_info->data [cil_offset].cil_code = ip;
5830 /* TODO: Use an increment here */
5831 #if defined(TARGET_X86)
5832 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5833 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5835 MONO_ADD_INS (cfg->cbb, ins);
5837 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5838 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5842 if (cfg->verbose_level > 3)
5843 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5847 if (cfg->keep_cil_nops)
5848 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
5850 MONO_INST_NEW (cfg, ins, OP_NOP);
5852 MONO_ADD_INS (bblock, ins);
5855 if (should_insert_brekpoint (cfg->method))
5856 MONO_INST_NEW (cfg, ins, OP_BREAK);
5858 MONO_INST_NEW (cfg, ins, OP_NOP);
5860 MONO_ADD_INS (bblock, ins);
5866 CHECK_STACK_OVF (1);
5867 n = (*ip)-CEE_LDARG_0;
5869 EMIT_NEW_ARGLOAD (cfg, ins, n);
5877 CHECK_STACK_OVF (1);
5878 n = (*ip)-CEE_LDLOC_0;
5880 EMIT_NEW_LOCLOAD (cfg, ins, n);
5889 n = (*ip)-CEE_STLOC_0;
5892 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5894 emit_stloc_ir (cfg, sp, header, n);
5901 CHECK_STACK_OVF (1);
5904 EMIT_NEW_ARGLOAD (cfg, ins, n);
5910 CHECK_STACK_OVF (1);
5913 NEW_ARGLOADA (cfg, ins, n);
5914 MONO_ADD_INS (cfg->cbb, ins);
5924 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5926 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5931 CHECK_STACK_OVF (1);
5934 EMIT_NEW_LOCLOAD (cfg, ins, n);
5938 case CEE_LDLOCA_S: {
5939 unsigned char *tmp_ip;
5941 CHECK_STACK_OVF (1);
5942 CHECK_LOCAL (ip [1]);
5944 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
5950 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5959 CHECK_LOCAL (ip [1]);
5960 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5962 emit_stloc_ir (cfg, sp, header, ip [1]);
5967 CHECK_STACK_OVF (1);
5968 EMIT_NEW_PCONST (cfg, ins, NULL);
5969 ins->type = STACK_OBJ;
5974 CHECK_STACK_OVF (1);
5975 EMIT_NEW_ICONST (cfg, ins, -1);
5988 CHECK_STACK_OVF (1);
5989 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
5995 CHECK_STACK_OVF (1);
5997 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
6003 CHECK_STACK_OVF (1);
6004 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
6010 CHECK_STACK_OVF (1);
6011 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6012 ins->type = STACK_I8;
6013 ins->dreg = alloc_dreg (cfg, STACK_I8);
6015 ins->inst_l = (gint64)read64 (ip);
6016 MONO_ADD_INS (bblock, ins);
6022 gboolean use_aotconst = FALSE;
6024 #ifdef TARGET_POWERPC
6025 /* FIXME: Clean this up */
6026 if (cfg->compile_aot)
6027 use_aotconst = TRUE;
6030 /* FIXME: we should really allocate this only late in the compilation process */
6031 f = mono_domain_alloc (cfg->domain, sizeof (float));
6033 CHECK_STACK_OVF (1);
6039 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
6041 dreg = alloc_freg (cfg);
6042 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
6043 ins->type = STACK_R8;
6045 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6046 ins->type = STACK_R8;
6047 ins->dreg = alloc_dreg (cfg, STACK_R8);
6049 MONO_ADD_INS (bblock, ins);
6059 gboolean use_aotconst = FALSE;
6061 #ifdef TARGET_POWERPC
6062 /* FIXME: Clean this up */
6063 if (cfg->compile_aot)
6064 use_aotconst = TRUE;
6067 /* FIXME: we should really allocate this only late in the compilation process */
6068 d = mono_domain_alloc (cfg->domain, sizeof (double));
6070 CHECK_STACK_OVF (1);
6076 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
6078 dreg = alloc_freg (cfg);
6079 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
6080 ins->type = STACK_R8;
6082 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6083 ins->type = STACK_R8;
6084 ins->dreg = alloc_dreg (cfg, STACK_R8);
6086 MONO_ADD_INS (bblock, ins);
6095 MonoInst *temp, *store;
6097 CHECK_STACK_OVF (1);
6101 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
6102 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
6104 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6107 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6120 if (sp [0]->type == STACK_R8)
6121 /* we need to pop the value from the x86 FP stack */
6122 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
6131 if (stack_start != sp)
6133 token = read32 (ip + 1);
6134 /* FIXME: check the signature matches */
6135 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6140 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
6141 GENERIC_SHARING_FAILURE (CEE_JMP);
6143 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
6144 CHECK_CFG_EXCEPTION;
6146 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6148 MonoMethodSignature *fsig = mono_method_signature (cmethod);
6151 /* Handle tail calls similarly to calls */
6152 n = fsig->param_count + fsig->hasthis;
6154 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
6155 call->method = cmethod;
6156 call->tail_call = TRUE;
6157 call->signature = mono_method_signature (cmethod);
6158 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
6159 call->inst.inst_p0 = cmethod;
6160 for (i = 0; i < n; ++i)
6161 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
6163 mono_arch_emit_call (cfg, call);
6164 MONO_ADD_INS (bblock, (MonoInst*)call);
6167 for (i = 0; i < num_args; ++i)
6168 /* Prevent arguments from being optimized away */
6169 arg_array [i]->flags |= MONO_INST_VOLATILE;
6171 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6172 ins = (MonoInst*)call;
6173 ins->inst_p0 = cmethod;
6174 MONO_ADD_INS (bblock, ins);
6178 start_new_bblock = 1;
6183 case CEE_CALLVIRT: {
6184 MonoInst *addr = NULL;
6185 MonoMethodSignature *fsig = NULL;
6187 int virtual = *ip == CEE_CALLVIRT;
6188 int calli = *ip == CEE_CALLI;
6189 gboolean pass_imt_from_rgctx = FALSE;
6190 MonoInst *imt_arg = NULL;
6191 gboolean pass_vtable = FALSE;
6192 gboolean pass_mrgctx = FALSE;
6193 MonoInst *vtable_arg = NULL;
6194 gboolean check_this = FALSE;
6195 gboolean supported_tail_call = FALSE;
6198 token = read32 (ip + 1);
6205 if (method->wrapper_type != MONO_WRAPPER_NONE)
6206 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6208 fsig = mono_metadata_parse_signature (image, token);
6210 n = fsig->param_count + fsig->hasthis;
6212 if (method->dynamic && fsig->pinvoke) {
6216 * This is a call through a function pointer using a pinvoke
6217 * signature. Have to create a wrapper and call that instead.
6218 * FIXME: This is very slow, need to create a wrapper at JIT time
6219 * instead based on the signature.
6221 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
6222 EMIT_NEW_PCONST (cfg, args [1], fsig);
6224 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
6227 MonoMethod *cil_method;
6229 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6230 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6231 cil_method = cmethod;
6232 } else if (constrained_call) {
6233 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6235 * This is needed since get_method_constrained can't find
6236 * the method in klass representing a type var.
6237 * The type var is guaranteed to be a reference type in this
6240 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6241 cil_method = cmethod;
6242 g_assert (!cmethod->klass->valuetype);
6244 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6247 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6248 cil_method = cmethod;
6253 if (!dont_verify && !cfg->skip_visibility) {
6254 MonoMethod *target_method = cil_method;
6255 if (method->is_inflated) {
6256 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6258 if (!mono_method_can_access_method (method_definition, target_method) &&
6259 !mono_method_can_access_method (method, cil_method))
6260 METHOD_ACCESS_FAILURE;
6263 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6264 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6266 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6267 /* MS.NET seems to silently convert this to a callvirt */
6272 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
6273 * converts to a callvirt.
6275 * tests/bug-515884.il is an example of this behavior
6277 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
6278 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
6279 if (!virtual && cmethod->klass->marshalbyref && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
6283 if (!cmethod->klass->inited)
6284 if (!mono_class_init (cmethod->klass))
6287 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6288 mini_class_is_system_array (cmethod->klass)) {
6289 array_rank = cmethod->klass->rank;
6290 fsig = mono_method_signature (cmethod);
6292 fsig = mono_method_signature (cmethod);
6297 if (fsig->pinvoke) {
6298 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6299 check_for_pending_exc, FALSE);
6300 fsig = mono_method_signature (wrapper);
6301 } else if (constrained_call) {
6302 fsig = mono_method_signature (cmethod);
6304 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6308 mono_save_token_info (cfg, image, token, cil_method);
6310 n = fsig->param_count + fsig->hasthis;
6312 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6313 if (check_linkdemand (cfg, method, cmethod))
6315 CHECK_CFG_EXCEPTION;
6318 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6319 g_assert_not_reached ();
6322 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6325 if (!cfg->generic_sharing_context && cmethod)
6326 g_assert (!mono_method_check_context_used (cmethod));
6330 //g_assert (!virtual || fsig->hasthis);
6334 if (constrained_call) {
6336 * We have the `constrained.' prefix opcode.
6338 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6340 * The type parameter is instantiated as a valuetype,
6341 * but that type doesn't override the method we're
6342 * calling, so we need to box `this'.
6344 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
6345 ins->klass = constrained_call;
6346 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call));
6347 CHECK_CFG_EXCEPTION;
6348 } else if (!constrained_call->valuetype) {
6349 int dreg = alloc_preg (cfg);
6352 * The type parameter is instantiated as a reference
6353 * type. We have a managed pointer on the stack, so
6354 * we need to dereference it here.
6356 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6357 ins->type = STACK_OBJ;
6359 } else if (cmethod->klass->valuetype)
6361 constrained_call = NULL;
6364 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6368 * If the callee is a shared method, then its static cctor
6369 * might not get called after the call was patched.
6371 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6372 emit_generic_class_init (cfg, cmethod->klass);
6373 CHECK_TYPELOAD (cmethod->klass);
6376 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6377 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6378 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6379 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6380 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6383 * Pass vtable iff target method might
6384 * be shared, which means that sharing
6385 * is enabled for its class and its
6386 * context is sharable (and it's not a
6389 if (sharing_enabled && context_sharable &&
6390 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6394 if (cmethod && mini_method_get_context (cmethod) &&
6395 mini_method_get_context (cmethod)->method_inst) {
6396 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6397 MonoGenericContext *context = mini_method_get_context (cmethod);
6398 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6400 g_assert (!pass_vtable);
6402 if (sharing_enabled && context_sharable)
6406 if (cfg->generic_sharing_context && cmethod) {
6407 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6409 context_used = mono_method_check_context_used (cmethod);
6411 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6412 /* Generic method interface
6413 calls are resolved via a
6414 helper function and don't
6416 if (!cmethod_context || !cmethod_context->method_inst)
6417 pass_imt_from_rgctx = TRUE;
6421 * If a shared method calls another
6422 * shared method then the caller must
6423 * have a generic sharing context
6424 * because the magic trampoline
6425 * requires it. FIXME: We shouldn't
6426 * have to force the vtable/mrgctx
6427 * variable here. Instead there
6428 * should be a flag in the cfg to
6429 * request a generic sharing context.
6432 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6433 mono_get_vtable_var (cfg);
6438 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6440 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6442 CHECK_TYPELOAD (cmethod->klass);
6443 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6448 g_assert (!vtable_arg);
6450 if (!cfg->compile_aot) {
6452 * emit_get_rgctx_method () calls mono_class_vtable () so check
6453 * for type load errors before.
6455 mono_class_setup_vtable (cmethod->klass);
6456 CHECK_TYPELOAD (cmethod->klass);
6459 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6461 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6462 MONO_METHOD_IS_FINAL (cmethod)) {
6469 if (pass_imt_from_rgctx) {
6470 g_assert (!pass_vtable);
6473 imt_arg = emit_get_rgctx_method (cfg, context_used,
6474 cmethod, MONO_RGCTX_INFO_METHOD);
6478 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
6480 /* Calling virtual generic methods */
6481 if (cmethod && virtual &&
6482 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6483 !(MONO_METHOD_IS_FINAL (cmethod) &&
6484 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6485 mono_method_signature (cmethod)->generic_param_count) {
6486 MonoInst *this_temp, *this_arg_temp, *store;
6487 MonoInst *iargs [4];
6489 g_assert (mono_method_signature (cmethod)->is_inflated);
6491 /* Prevent inlining of methods that contain indirect calls */
6494 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
6495 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt) {
6496 g_assert (!imt_arg);
6498 g_assert (cmethod->is_inflated);
6499 imt_arg = emit_get_rgctx_method (cfg, context_used,
6500 cmethod, MONO_RGCTX_INFO_METHOD);
6501 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6505 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6506 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6507 MONO_ADD_INS (bblock, store);
6509 /* FIXME: This should be a managed pointer */
6510 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6512 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6513 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6514 cmethod, MONO_RGCTX_INFO_METHOD);
6515 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6516 addr = mono_emit_jit_icall (cfg,
6517 mono_helper_compile_generic_method, iargs);
6519 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6521 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6524 if (!MONO_TYPE_IS_VOID (fsig->ret))
6525 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6527 CHECK_CFG_EXCEPTION;
6534 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6535 supported_tail_call = cmethod && MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
6537 supported_tail_call = cmethod && mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6541 /* FIXME: runtime generic context pointer for jumps? */
6542 /* FIXME: handle this for generic sharing eventually */
6543 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) && supported_tail_call) {
6546 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6549 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6550 /* Handle tail calls similarly to calls */
6551 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE);
6553 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6554 call->tail_call = TRUE;
6555 call->method = cmethod;
6556 call->signature = mono_method_signature (cmethod);
6559 * We implement tail calls by storing the actual arguments into the
6560 * argument variables, then emitting a CEE_JMP.
6562 for (i = 0; i < n; ++i) {
6563 /* Prevent argument from being register allocated */
6564 arg_array [i]->flags |= MONO_INST_VOLATILE;
6565 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6569 ins = (MonoInst*)call;
6570 ins->inst_p0 = cmethod;
6571 ins->inst_p1 = arg_array [0];
6572 MONO_ADD_INS (bblock, ins);
6573 link_bblock (cfg, bblock, end_bblock);
6574 start_new_bblock = 1;
6576 CHECK_CFG_EXCEPTION;
6578 /* skip CEE_RET as well */
6584 /* Conversion to a JIT intrinsic */
6585 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6587 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6588 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6593 CHECK_CFG_EXCEPTION;
6601 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6602 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6603 mono_method_check_inlining (cfg, cmethod) &&
6604 !g_list_find (dont_inline, cmethod)) {
6606 gboolean allways = FALSE;
6608 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6609 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6610 /* Prevent inlining of methods that call wrappers */
6612 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6616 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6618 cfg->real_offset += 5;
6621 if (!MONO_TYPE_IS_VOID (fsig->ret))
6622 /* *sp is already set by inline_method */
6625 inline_costs += costs;
6631 inline_costs += 10 * num_calls++;
6633 /* Tail recursion elimination */
6634 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6635 gboolean has_vtargs = FALSE;
6638 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6641 /* keep it simple */
6642 for (i = fsig->param_count - 1; i >= 0; i--) {
6643 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6648 for (i = 0; i < n; ++i)
6649 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6650 MONO_INST_NEW (cfg, ins, OP_BR);
6651 MONO_ADD_INS (bblock, ins);
6652 tblock = start_bblock->out_bb [0];
6653 link_bblock (cfg, bblock, tblock);
6654 ins->inst_target_bb = tblock;
6655 start_new_bblock = 1;
6657 /* skip the CEE_RET, too */
6658 if (ip_in_bb (cfg, bblock, ip + 5))
6668 /* Generic sharing */
6669 /* FIXME: only do this for generic methods if
6670 they are not shared! */
6671 if (context_used && !imt_arg && !array_rank &&
6672 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6673 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6674 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6675 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6678 g_assert (cfg->generic_sharing_context && cmethod);
6682 * We are compiling a call to a
6683 * generic method from shared code,
6684 * which means that we have to look up
6685 * the method in the rgctx and do an
6688 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6691 /* Indirect calls */
6693 g_assert (!imt_arg);
6695 if (*ip == CEE_CALL)
6696 g_assert (context_used);
6697 else if (*ip == CEE_CALLI)
6698 g_assert (!vtable_arg);
6700 /* FIXME: what the hell is this??? */
6701 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6702 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6704 /* Prevent inlining of methods with indirect calls */
6709 int rgctx_reg = mono_alloc_preg (cfg);
6711 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6712 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6713 call = (MonoCallInst*)ins;
6714 set_rgctx_arg (cfg, call, rgctx_reg, vtable_arg);
6716 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6718 * Instead of emitting an indirect call, emit a direct call
6719 * with the contents of the aotconst as the patch info.
6721 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
6723 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6724 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
6727 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6730 if (!MONO_TYPE_IS_VOID (fsig->ret))
6731 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6733 CHECK_CFG_EXCEPTION;
6744 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6745 if (sp [fsig->param_count]->type == STACK_OBJ) {
6746 MonoInst *iargs [2];
6749 iargs [1] = sp [fsig->param_count];
6751 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6754 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6755 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6756 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6757 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6759 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6762 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6763 if (!cmethod->klass->element_class->valuetype && !readonly)
6764 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6765 CHECK_TYPELOAD (cmethod->klass);
6768 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6771 g_assert_not_reached ();
6774 CHECK_CFG_EXCEPTION;
6781 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6783 if (!MONO_TYPE_IS_VOID (fsig->ret))
6784 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6786 CHECK_CFG_EXCEPTION;
6796 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6798 } else if (imt_arg) {
6799 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6801 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6804 if (!MONO_TYPE_IS_VOID (fsig->ret))
6805 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6807 CHECK_CFG_EXCEPTION;
6814 if (cfg->method != method) {
6815 /* return from inlined method */
6817 * If in_count == 0, that means the ret is unreachable due to
6818 * being preceeded by a throw. In that case, inline_method () will
6819 * handle setting the return value
6820 * (test case: test_0_inline_throw ()).
6822 if (return_var && cfg->cbb->in_count) {
6826 //g_assert (returnvar != -1);
6827 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6828 cfg->ret_var_set = TRUE;
6832 MonoType *ret_type = mono_method_signature (method)->ret;
6836 * Place a seq point here too even through the IL stack is not
6837 * empty, so a step over on
6840 * will work correctly.
6842 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
6843 MONO_ADD_INS (cfg->cbb, ins);
6846 g_assert (!return_var);
6849 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6852 if (!cfg->vret_addr) {
6855 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6857 EMIT_NEW_RETLOADA (cfg, ret_addr);
6859 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6860 ins->klass = mono_class_from_mono_type (ret_type);
6863 #ifdef MONO_ARCH_SOFT_FLOAT
6864 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6865 MonoInst *iargs [1];
6869 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6870 mono_arch_emit_setret (cfg, method, conv);
6872 mono_arch_emit_setret (cfg, method, *sp);
6875 mono_arch_emit_setret (cfg, method, *sp);
6880 if (sp != stack_start)
6882 MONO_INST_NEW (cfg, ins, OP_BR);
6884 ins->inst_target_bb = end_bblock;
6885 MONO_ADD_INS (bblock, ins);
6886 link_bblock (cfg, bblock, end_bblock);
6887 start_new_bblock = 1;
6891 MONO_INST_NEW (cfg, ins, OP_BR);
6893 target = ip + 1 + (signed char)(*ip);
6895 GET_BBLOCK (cfg, tblock, target);
6896 link_bblock (cfg, bblock, tblock);
6897 ins->inst_target_bb = tblock;
6898 if (sp != stack_start) {
6899 handle_stack_args (cfg, stack_start, sp - stack_start);
6901 CHECK_UNVERIFIABLE (cfg);
6903 MONO_ADD_INS (bblock, ins);
6904 start_new_bblock = 1;
6905 inline_costs += BRANCH_COST;
6919 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6921 target = ip + 1 + *(signed char*)ip;
6927 inline_costs += BRANCH_COST;
6931 MONO_INST_NEW (cfg, ins, OP_BR);
6934 target = ip + 4 + (gint32)read32(ip);
6936 GET_BBLOCK (cfg, tblock, target);
6937 link_bblock (cfg, bblock, tblock);
6938 ins->inst_target_bb = tblock;
6939 if (sp != stack_start) {
6940 handle_stack_args (cfg, stack_start, sp - stack_start);
6942 CHECK_UNVERIFIABLE (cfg);
6945 MONO_ADD_INS (bblock, ins);
6947 start_new_bblock = 1;
6948 inline_costs += BRANCH_COST;
6955 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6956 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6957 guint32 opsize = is_short ? 1 : 4;
6959 CHECK_OPSIZE (opsize);
6961 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6964 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6969 GET_BBLOCK (cfg, tblock, target);
6970 link_bblock (cfg, bblock, tblock);
6971 GET_BBLOCK (cfg, tblock, ip);
6972 link_bblock (cfg, bblock, tblock);
6974 if (sp != stack_start) {
6975 handle_stack_args (cfg, stack_start, sp - stack_start);
6976 CHECK_UNVERIFIABLE (cfg);
6979 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6980 cmp->sreg1 = sp [0]->dreg;
6981 type_from_op (cmp, sp [0], NULL);
6984 #if SIZEOF_REGISTER == 4
6985 if (cmp->opcode == OP_LCOMPARE_IMM) {
6986 /* Convert it to OP_LCOMPARE */
6987 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6988 ins->type = STACK_I8;
6989 ins->dreg = alloc_dreg (cfg, STACK_I8);
6991 MONO_ADD_INS (bblock, ins);
6992 cmp->opcode = OP_LCOMPARE;
6993 cmp->sreg2 = ins->dreg;
6996 MONO_ADD_INS (bblock, cmp);
6998 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
6999 type_from_op (ins, sp [0], NULL);
7000 MONO_ADD_INS (bblock, ins);
7001 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
7002 GET_BBLOCK (cfg, tblock, target);
7003 ins->inst_true_bb = tblock;
7004 GET_BBLOCK (cfg, tblock, ip);
7005 ins->inst_false_bb = tblock;
7006 start_new_bblock = 2;
7009 inline_costs += BRANCH_COST;
7024 MONO_INST_NEW (cfg, ins, *ip);
7026 target = ip + 4 + (gint32)read32(ip);
7032 inline_costs += BRANCH_COST;
7036 MonoBasicBlock **targets;
7037 MonoBasicBlock *default_bblock;
7038 MonoJumpInfoBBTable *table;
7039 int offset_reg = alloc_preg (cfg);
7040 int target_reg = alloc_preg (cfg);
7041 int table_reg = alloc_preg (cfg);
7042 int sum_reg = alloc_preg (cfg);
7043 gboolean use_op_switch;
7047 n = read32 (ip + 1);
7050 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
7054 CHECK_OPSIZE (n * sizeof (guint32));
7055 target = ip + n * sizeof (guint32);
7057 GET_BBLOCK (cfg, default_bblock, target);
7058 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7060 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
7061 for (i = 0; i < n; ++i) {
7062 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
7063 targets [i] = tblock;
7064 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
7068 if (sp != stack_start) {
7070 * Link the current bb with the targets as well, so handle_stack_args
7071 * will set their in_stack correctly.
7073 link_bblock (cfg, bblock, default_bblock);
7074 for (i = 0; i < n; ++i)
7075 link_bblock (cfg, bblock, targets [i]);
7077 handle_stack_args (cfg, stack_start, sp - stack_start);
7079 CHECK_UNVERIFIABLE (cfg);
7082 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
7083 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
7086 for (i = 0; i < n; ++i)
7087 link_bblock (cfg, bblock, targets [i]);
7089 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
7090 table->table = targets;
7091 table->table_size = n;
7093 use_op_switch = FALSE;
7095 /* ARM implements SWITCH statements differently */
7096 /* FIXME: Make it use the generic implementation */
7097 if (!cfg->compile_aot)
7098 use_op_switch = TRUE;
7101 if (COMPILE_LLVM (cfg))
7102 use_op_switch = TRUE;
7104 cfg->cbb->has_jump_table = 1;
7106 if (use_op_switch) {
7107 MONO_INST_NEW (cfg, ins, OP_SWITCH);
7108 ins->sreg1 = src1->dreg;
7109 ins->inst_p0 = table;
7110 ins->inst_many_bb = targets;
7111 ins->klass = GUINT_TO_POINTER (n);
7112 MONO_ADD_INS (cfg->cbb, ins);
7114 if (sizeof (gpointer) == 8)
7115 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
7117 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
7119 #if SIZEOF_REGISTER == 8
7120 /* The upper word might not be zero, and we add it to a 64 bit address later */
7121 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
7124 if (cfg->compile_aot) {
7125 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
7127 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
7128 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
7129 ins->inst_p0 = table;
7130 ins->dreg = table_reg;
7131 MONO_ADD_INS (cfg->cbb, ins);
7134 /* FIXME: Use load_memindex */
7135 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
7136 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
7137 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
7139 start_new_bblock = 1;
7140 inline_costs += (BRANCH_COST * 2);
7160 dreg = alloc_freg (cfg);
7163 dreg = alloc_lreg (cfg);
7166 dreg = alloc_preg (cfg);
7169 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
7170 ins->type = ldind_type [*ip - CEE_LDIND_I1];
7171 ins->flags |= ins_flag;
7173 MONO_ADD_INS (bblock, ins);
7188 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
7189 ins->flags |= ins_flag;
7191 MONO_ADD_INS (bblock, ins);
7193 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0))) {
7194 MonoInst *dummy_use;
7195 /* insert call to write barrier */
7196 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
7197 mono_emit_method_call (cfg, write_barrier, sp, NULL);
7198 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
7208 MONO_INST_NEW (cfg, ins, (*ip));
7210 ins->sreg1 = sp [0]->dreg;
7211 ins->sreg2 = sp [1]->dreg;
7212 type_from_op (ins, sp [0], sp [1]);
7214 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7216 /* Use the immediate opcodes if possible */
7217 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
7218 int imm_opcode = mono_op_to_op_imm (ins->opcode);
7219 if (imm_opcode != -1) {
7220 ins->opcode = imm_opcode;
7221 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7224 sp [1]->opcode = OP_NOP;
7228 MONO_ADD_INS ((cfg)->cbb, (ins));
7230 *sp++ = mono_decompose_opcode (cfg, ins);
7247 MONO_INST_NEW (cfg, ins, (*ip));
7249 ins->sreg1 = sp [0]->dreg;
7250 ins->sreg2 = sp [1]->dreg;
7251 type_from_op (ins, sp [0], sp [1]);
7253 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7254 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7256 /* FIXME: Pass opcode to is_inst_imm */
7258 /* Use the immediate opcodes if possible */
7259 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7262 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7263 if (imm_opcode != -1) {
7264 ins->opcode = imm_opcode;
7265 if (sp [1]->opcode == OP_I8CONST) {
7266 #if SIZEOF_REGISTER == 8
7267 ins->inst_imm = sp [1]->inst_l;
7269 ins->inst_ls_word = sp [1]->inst_ls_word;
7270 ins->inst_ms_word = sp [1]->inst_ms_word;
7274 ins->inst_imm = (gssize)(sp [1]->inst_c0);
7277 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7278 if (sp [1]->next == NULL)
7279 sp [1]->opcode = OP_NOP;
7282 MONO_ADD_INS ((cfg)->cbb, (ins));
7284 *sp++ = mono_decompose_opcode (cfg, ins);
7297 case CEE_CONV_OVF_I8:
7298 case CEE_CONV_OVF_U8:
7302 /* Special case this earlier so we have long constants in the IR */
7303 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7304 int data = sp [-1]->inst_c0;
7305 sp [-1]->opcode = OP_I8CONST;
7306 sp [-1]->type = STACK_I8;
7307 #if SIZEOF_REGISTER == 8
7308 if ((*ip) == CEE_CONV_U8)
7309 sp [-1]->inst_c0 = (guint32)data;
7311 sp [-1]->inst_c0 = data;
7313 sp [-1]->inst_ls_word = data;
7314 if ((*ip) == CEE_CONV_U8)
7315 sp [-1]->inst_ms_word = 0;
7317 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7319 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7326 case CEE_CONV_OVF_I4:
7327 case CEE_CONV_OVF_I1:
7328 case CEE_CONV_OVF_I2:
7329 case CEE_CONV_OVF_I:
7330 case CEE_CONV_OVF_U:
7333 if (sp [-1]->type == STACK_R8) {
7334 ADD_UNOP (CEE_CONV_OVF_I8);
7341 case CEE_CONV_OVF_U1:
7342 case CEE_CONV_OVF_U2:
7343 case CEE_CONV_OVF_U4:
7346 if (sp [-1]->type == STACK_R8) {
7347 ADD_UNOP (CEE_CONV_OVF_U8);
7354 case CEE_CONV_OVF_I1_UN:
7355 case CEE_CONV_OVF_I2_UN:
7356 case CEE_CONV_OVF_I4_UN:
7357 case CEE_CONV_OVF_I8_UN:
7358 case CEE_CONV_OVF_U1_UN:
7359 case CEE_CONV_OVF_U2_UN:
7360 case CEE_CONV_OVF_U4_UN:
7361 case CEE_CONV_OVF_U8_UN:
7362 case CEE_CONV_OVF_I_UN:
7363 case CEE_CONV_OVF_U_UN:
7370 CHECK_CFG_EXCEPTION;
7374 case CEE_ADD_OVF_UN:
7376 case CEE_MUL_OVF_UN:
7378 case CEE_SUB_OVF_UN:
7386 token = read32 (ip + 1);
7387 klass = mini_get_class (method, token, generic_context);
7388 CHECK_TYPELOAD (klass);
7390 if (generic_class_is_reference_type (cfg, klass)) {
7391 MonoInst *store, *load;
7392 int dreg = alloc_preg (cfg);
7394 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7395 load->flags |= ins_flag;
7396 MONO_ADD_INS (cfg->cbb, load);
7398 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7399 store->flags |= ins_flag;
7400 MONO_ADD_INS (cfg->cbb, store);
7402 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER) {
7403 MonoInst *dummy_use;
7404 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
7405 mono_emit_method_call (cfg, write_barrier, sp, NULL);
7406 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
7409 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7421 token = read32 (ip + 1);
7422 klass = mini_get_class (method, token, generic_context);
7423 CHECK_TYPELOAD (klass);
7425 /* Optimize the common ldobj+stloc combination */
7435 loc_index = ip [5] - CEE_STLOC_0;
7442 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7443 CHECK_LOCAL (loc_index);
7445 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7446 ins->dreg = cfg->locals [loc_index]->dreg;
7452 /* Optimize the ldobj+stobj combination */
7453 /* The reference case ends up being a load+store anyway */
7454 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7459 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7466 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7475 CHECK_STACK_OVF (1);
7477 n = read32 (ip + 1);
7479 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7480 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7481 ins->type = STACK_OBJ;
7484 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7485 MonoInst *iargs [1];
7487 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7488 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7490 if (cfg->opt & MONO_OPT_SHARED) {
7491 MonoInst *iargs [3];
7493 if (cfg->compile_aot) {
7494 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7496 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7497 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7498 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7499 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7500 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7502 if (bblock->out_of_line) {
7503 MonoInst *iargs [2];
7505 if (image == mono_defaults.corlib) {
7507 * Avoid relocations in AOT and save some space by using a
7508 * version of helper_ldstr specialized to mscorlib.
7510 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7511 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7513 /* Avoid creating the string object */
7514 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7515 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7516 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7520 if (cfg->compile_aot) {
7521 NEW_LDSTRCONST (cfg, ins, image, n);
7523 MONO_ADD_INS (bblock, ins);
7526 NEW_PCONST (cfg, ins, NULL);
7527 ins->type = STACK_OBJ;
7528 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7530 MONO_ADD_INS (bblock, ins);
7539 MonoInst *iargs [2];
7540 MonoMethodSignature *fsig;
7543 MonoInst *vtable_arg = NULL;
7546 token = read32 (ip + 1);
7547 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7550 fsig = mono_method_get_signature (cmethod, image, token);
7554 mono_save_token_info (cfg, image, token, cmethod);
7556 if (!mono_class_init (cmethod->klass))
7559 if (cfg->generic_sharing_context)
7560 context_used = mono_method_check_context_used (cmethod);
7562 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7563 if (check_linkdemand (cfg, method, cmethod))
7565 CHECK_CFG_EXCEPTION;
7566 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7567 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7570 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7571 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7572 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7573 mono_class_vtable (cfg->domain, cmethod->klass);
7574 CHECK_TYPELOAD (cmethod->klass);
7576 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7577 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7580 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7581 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7583 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7585 CHECK_TYPELOAD (cmethod->klass);
7586 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7591 n = fsig->param_count;
7595 * Generate smaller code for the common newobj <exception> instruction in
7596 * argument checking code.
7598 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7599 is_exception_class (cmethod->klass) && n <= 2 &&
7600 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7601 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7602 MonoInst *iargs [3];
7604 g_assert (!vtable_arg);
7608 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7611 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7615 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7620 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7623 g_assert_not_reached ();
7631 /* move the args to allow room for 'this' in the first position */
7637 /* check_call_signature () requires sp[0] to be set */
7638 this_ins.type = STACK_OBJ;
7640 if (check_call_signature (cfg, fsig, sp))
7645 if (mini_class_is_system_array (cmethod->klass)) {
7646 g_assert (!vtable_arg);
7648 *sp = emit_get_rgctx_method (cfg, context_used,
7649 cmethod, MONO_RGCTX_INFO_METHOD);
7651 /* Avoid varargs in the common case */
7652 if (fsig->param_count == 1)
7653 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7654 else if (fsig->param_count == 2)
7655 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7656 else if (fsig->param_count == 3)
7657 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
7659 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7660 } else if (cmethod->string_ctor) {
7661 g_assert (!context_used);
7662 g_assert (!vtable_arg);
7663 /* we simply pass a null pointer */
7664 EMIT_NEW_PCONST (cfg, *sp, NULL);
7665 /* now call the string ctor */
7666 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7668 MonoInst* callvirt_this_arg = NULL;
7670 if (cmethod->klass->valuetype) {
7671 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7672 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7673 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7678 * The code generated by mini_emit_virtual_call () expects
7679 * iargs [0] to be a boxed instance, but luckily the vcall
7680 * will be transformed into a normal call there.
7682 } else if (context_used) {
7683 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
7686 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7688 CHECK_TYPELOAD (cmethod->klass);
7691 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7692 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7693 * As a workaround, we call class cctors before allocating objects.
7695 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7696 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7697 if (cfg->verbose_level > 2)
7698 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7699 class_inits = g_slist_prepend (class_inits, vtable);
7702 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
7705 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
7708 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7710 /* Now call the actual ctor */
7711 /* Avoid virtual calls to ctors if possible */
7712 if (cmethod->klass->marshalbyref)
7713 callvirt_this_arg = sp [0];
7716 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7717 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7718 type_to_eval_stack_type ((cfg), fsig->ret, ins);
7723 CHECK_CFG_EXCEPTION;
7728 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7729 mono_method_check_inlining (cfg, cmethod) &&
7730 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7731 !g_list_find (dont_inline, cmethod)) {
7734 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7735 cfg->real_offset += 5;
7738 inline_costs += costs - 5;
7741 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7743 } else if (context_used &&
7744 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7745 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7746 MonoInst *cmethod_addr;
7748 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7749 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7751 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7754 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7755 callvirt_this_arg, NULL, vtable_arg);
7759 if (alloc == NULL) {
7761 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7762 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7776 token = read32 (ip + 1);
7777 klass = mini_get_class (method, token, generic_context);
7778 CHECK_TYPELOAD (klass);
7779 if (sp [0]->type != STACK_OBJ)
7782 if (cfg->generic_sharing_context)
7783 context_used = mono_class_check_context_used (klass);
7785 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7792 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7794 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7798 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7799 MonoMethod *mono_castclass;
7800 MonoInst *iargs [1];
7803 mono_castclass = mono_marshal_get_castclass (klass);
7806 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7807 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7808 g_assert (costs > 0);
7811 cfg->real_offset += 5;
7816 inline_costs += costs;
7819 ins = handle_castclass (cfg, klass, *sp, context_used);
7820 CHECK_CFG_EXCEPTION;
7830 token = read32 (ip + 1);
7831 klass = mini_get_class (method, token, generic_context);
7832 CHECK_TYPELOAD (klass);
7833 if (sp [0]->type != STACK_OBJ)
7836 if (cfg->generic_sharing_context)
7837 context_used = mono_class_check_context_used (klass);
7839 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7846 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7848 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7852 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7853 MonoMethod *mono_isinst;
7854 MonoInst *iargs [1];
7857 mono_isinst = mono_marshal_get_isinst (klass);
7860 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7861 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7862 g_assert (costs > 0);
7865 cfg->real_offset += 5;
7870 inline_costs += costs;
7873 ins = handle_isinst (cfg, klass, *sp, context_used);
7874 CHECK_CFG_EXCEPTION;
7881 case CEE_UNBOX_ANY: {
7885 token = read32 (ip + 1);
7886 klass = mini_get_class (method, token, generic_context);
7887 CHECK_TYPELOAD (klass);
7889 mono_save_token_info (cfg, image, token, klass);
7891 if (cfg->generic_sharing_context)
7892 context_used = mono_class_check_context_used (klass);
7894 if (generic_class_is_reference_type (cfg, klass)) {
7895 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
7896 if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7897 MonoMethod *mono_castclass;
7898 MonoInst *iargs [1];
7901 mono_castclass = mono_marshal_get_castclass (klass);
7904 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7905 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7907 g_assert (costs > 0);
7910 cfg->real_offset += 5;
7914 inline_costs += costs;
7916 ins = handle_castclass (cfg, klass, *sp, context_used);
7917 CHECK_CFG_EXCEPTION;
7925 if (mono_class_is_nullable (klass)) {
7926 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
7933 ins = handle_unbox (cfg, klass, sp, context_used);
7939 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7952 token = read32 (ip + 1);
7953 klass = mini_get_class (method, token, generic_context);
7954 CHECK_TYPELOAD (klass);
7956 mono_save_token_info (cfg, image, token, klass);
7958 if (cfg->generic_sharing_context)
7959 context_used = mono_class_check_context_used (klass);
7961 if (generic_class_is_reference_type (cfg, klass)) {
7967 if (klass == mono_defaults.void_class)
7969 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7971 /* frequent check in generic code: box (struct), brtrue */
7973 // FIXME: LLVM can't handle the inconsistent bb linking
7974 if (!mono_class_is_nullable (klass) &&
7975 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
7976 (ip [5] == CEE_BRTRUE ||
7977 ip [5] == CEE_BRTRUE_S ||
7978 ip [5] == CEE_BRFALSE ||
7979 ip [5] == CEE_BRFALSE_S)) {
7980 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
7982 MonoBasicBlock *true_bb, *false_bb;
7986 if (cfg->verbose_level > 3) {
7987 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
7988 printf ("<box+brtrue opt>\n");
7996 target = ip + 1 + (signed char)(*ip);
8003 target = ip + 4 + (gint)(read32 (ip));
8007 g_assert_not_reached ();
8011 * We need to link both bblocks, since it is needed for handling stack
8012 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
8013 * Branching to only one of them would lead to inconsistencies, so
8014 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
8016 GET_BBLOCK (cfg, true_bb, target);
8017 GET_BBLOCK (cfg, false_bb, ip);
8019 mono_link_bblock (cfg, cfg->cbb, true_bb);
8020 mono_link_bblock (cfg, cfg->cbb, false_bb);
8022 if (sp != stack_start) {
8023 handle_stack_args (cfg, stack_start, sp - stack_start);
8025 CHECK_UNVERIFIABLE (cfg);
8028 if (COMPILE_LLVM (cfg)) {
8029 dreg = alloc_ireg (cfg);
8030 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
8031 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
8033 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
8035 /* The JIT can't eliminate the iconst+compare */
8036 MONO_INST_NEW (cfg, ins, OP_BR);
8037 ins->inst_target_bb = is_true ? true_bb : false_bb;
8038 MONO_ADD_INS (cfg->cbb, ins);
8041 start_new_bblock = 1;
8045 *sp++ = handle_box (cfg, val, klass, context_used);
8047 CHECK_CFG_EXCEPTION;
8056 token = read32 (ip + 1);
8057 klass = mini_get_class (method, token, generic_context);
8058 CHECK_TYPELOAD (klass);
8060 mono_save_token_info (cfg, image, token, klass);
8062 if (cfg->generic_sharing_context)
8063 context_used = mono_class_check_context_used (klass);
8065 if (mono_class_is_nullable (klass)) {
8068 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
8069 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
8073 ins = handle_unbox (cfg, klass, sp, context_used);
8083 MonoClassField *field;
8087 if (*ip == CEE_STFLD) {
8094 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
8096 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
8099 token = read32 (ip + 1);
8100 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8101 field = mono_method_get_wrapper_data (method, token);
8102 klass = field->parent;
8105 field = mono_field_from_token (image, token, &klass, generic_context);
8109 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8110 FIELD_ACCESS_FAILURE;
8111 mono_class_init (klass);
8113 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
8114 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
8115 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8116 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8119 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
8120 if (*ip == CEE_STFLD) {
8121 if (target_type_is_incompatible (cfg, field->type, sp [1]))
8123 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8124 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
8125 MonoInst *iargs [5];
8128 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8129 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8130 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
8134 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8135 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
8136 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8137 g_assert (costs > 0);
8139 cfg->real_offset += 5;
8142 inline_costs += costs;
8144 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
8149 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8151 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
8152 if (sp [0]->opcode != OP_LDADDR)
8153 store->flags |= MONO_INST_FAULT;
8155 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
8156 /* insert call to write barrier */
8157 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
8158 MonoInst *iargs [2], *dummy_use;
8161 dreg = alloc_preg (cfg);
8162 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8164 mono_emit_method_call (cfg, write_barrier, iargs, NULL);
8166 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
8169 store->flags |= ins_flag;
8176 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8177 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
8178 MonoInst *iargs [4];
8181 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8182 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8183 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
8184 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8185 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
8186 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8188 g_assert (costs > 0);
8190 cfg->real_offset += 5;
8194 inline_costs += costs;
8196 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
8200 if (sp [0]->type == STACK_VTYPE) {
8203 /* Have to compute the address of the variable */
8205 var = get_vreg_to_inst (cfg, sp [0]->dreg);
8207 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
8209 g_assert (var->klass == klass);
8211 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
8215 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8217 if (*ip == CEE_LDFLDA) {
8218 dreg = alloc_preg (cfg);
8220 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8221 ins->klass = mono_class_from_mono_type (field->type);
8222 ins->type = STACK_MP;
8227 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
8228 load->flags |= ins_flag;
8229 if (sp [0]->opcode != OP_LDADDR)
8230 load->flags |= MONO_INST_FAULT;
8241 MonoClassField *field;
8242 gpointer addr = NULL;
8243 gboolean is_special_static;
8246 token = read32 (ip + 1);
8248 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8249 field = mono_method_get_wrapper_data (method, token);
8250 klass = field->parent;
8253 field = mono_field_from_token (image, token, &klass, generic_context);
8256 mono_class_init (klass);
8257 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8258 FIELD_ACCESS_FAILURE;
8260 /* if the class is Critical then transparent code cannot access it's fields */
8261 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8262 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8265 * We can only support shared generic static
8266 * field access on architectures where the
8267 * trampoline code has been extended to handle
8268 * the generic class init.
8270 #ifndef MONO_ARCH_VTABLE_REG
8271 GENERIC_SHARING_FAILURE (*ip);
8274 if (cfg->generic_sharing_context)
8275 context_used = mono_class_check_context_used (klass);
8277 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
8279 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8280 * to be called here.
8282 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8283 mono_class_vtable (cfg->domain, klass);
8284 CHECK_TYPELOAD (klass);
8286 mono_domain_lock (cfg->domain);
8287 if (cfg->domain->special_static_fields)
8288 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8289 mono_domain_unlock (cfg->domain);
8291 is_special_static = mono_class_field_is_special_static (field);
8293 /* Generate IR to compute the field address */
8294 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
8296 * Fast access to TLS data
8297 * Inline version of get_thread_static_data () in
8301 int idx, static_data_reg, array_reg, dreg;
8302 MonoInst *thread_ins;
8304 // offset &= 0x7fffffff;
8305 // idx = (offset >> 24) - 1;
8306 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
8308 thread_ins = mono_get_thread_intrinsic (cfg);
8309 MONO_ADD_INS (cfg->cbb, thread_ins);
8310 static_data_reg = alloc_ireg (cfg);
8311 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
8313 if (cfg->compile_aot) {
8314 int offset_reg, offset2_reg, idx_reg;
8316 /* For TLS variables, this will return the TLS offset */
8317 EMIT_NEW_SFLDACONST (cfg, ins, field);
8318 offset_reg = ins->dreg;
8319 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
8320 idx_reg = alloc_ireg (cfg);
8321 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
8322 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
8323 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
8324 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
8325 array_reg = alloc_ireg (cfg);
8326 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
8327 offset2_reg = alloc_ireg (cfg);
8328 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
8329 dreg = alloc_ireg (cfg);
8330 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
8332 offset = (gsize)addr & 0x7fffffff;
8333 idx = (offset >> 24) - 1;
8335 array_reg = alloc_ireg (cfg);
8336 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
8337 dreg = alloc_ireg (cfg);
8338 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
8340 } else if ((cfg->opt & MONO_OPT_SHARED) ||
8341 (cfg->compile_aot && is_special_static) ||
8342 (context_used && is_special_static)) {
8343 MonoInst *iargs [2];
8345 g_assert (field->parent);
8346 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8348 iargs [1] = emit_get_rgctx_field (cfg, context_used,
8349 field, MONO_RGCTX_INFO_CLASS_FIELD);
8351 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8353 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8354 } else if (context_used) {
8355 MonoInst *static_data;
8358 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8359 method->klass->name_space, method->klass->name, method->name,
8360 depth, field->offset);
8363 if (mono_class_needs_cctor_run (klass, method))
8364 emit_generic_class_init (cfg, klass);
8367 * The pointer we're computing here is
8369 * super_info.static_data + field->offset
8371 static_data = emit_get_rgctx_klass (cfg, context_used,
8372 klass, MONO_RGCTX_INFO_STATIC_DATA);
8374 if (field->offset == 0) {
8377 int addr_reg = mono_alloc_preg (cfg);
8378 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8380 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8381 MonoInst *iargs [2];
8383 g_assert (field->parent);
8384 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8385 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8386 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8388 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8390 CHECK_TYPELOAD (klass);
8392 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8393 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8394 if (cfg->verbose_level > 2)
8395 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8396 class_inits = g_slist_prepend (class_inits, vtable);
8398 if (cfg->run_cctors) {
8400 /* This makes so that inline cannot trigger */
8401 /* .cctors: too many apps depend on them */
8402 /* running with a specific order... */
8403 if (! vtable->initialized)
8405 ex = mono_runtime_class_init_full (vtable, FALSE);
8407 set_exception_object (cfg, ex);
8408 goto exception_exit;
8412 addr = (char*)vtable->data + field->offset;
8414 if (cfg->compile_aot)
8415 EMIT_NEW_SFLDACONST (cfg, ins, field);
8417 EMIT_NEW_PCONST (cfg, ins, addr);
8419 MonoInst *iargs [1];
8420 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8421 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8425 /* Generate IR to do the actual load/store operation */
8427 if (*ip == CEE_LDSFLDA) {
8428 ins->klass = mono_class_from_mono_type (field->type);
8429 ins->type = STACK_PTR;
8431 } else if (*ip == CEE_STSFLD) {
8436 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
8437 store->flags |= ins_flag;
8439 gboolean is_const = FALSE;
8440 MonoVTable *vtable = NULL;
8442 if (!context_used) {
8443 vtable = mono_class_vtable (cfg->domain, klass);
8444 CHECK_TYPELOAD (klass);
8446 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8447 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8448 gpointer addr = (char*)vtable->data + field->offset;
8449 int ro_type = field->type->type;
8450 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
8451 ro_type = mono_class_enum_basetype (field->type->data.klass)->type;
8453 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8456 case MONO_TYPE_BOOLEAN:
8458 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8462 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8465 case MONO_TYPE_CHAR:
8467 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8471 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8476 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8480 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8486 case MONO_TYPE_FNPTR:
8487 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8488 type_to_eval_stack_type ((cfg), field->type, *sp);
8491 case MONO_TYPE_STRING:
8492 case MONO_TYPE_OBJECT:
8493 case MONO_TYPE_CLASS:
8494 case MONO_TYPE_SZARRAY:
8495 case MONO_TYPE_ARRAY:
8496 if (!mono_gc_is_moving ()) {
8497 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8498 type_to_eval_stack_type ((cfg), field->type, *sp);
8506 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8511 case MONO_TYPE_VALUETYPE:
8521 CHECK_STACK_OVF (1);
8523 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8524 load->flags |= ins_flag;
8537 token = read32 (ip + 1);
8538 klass = mini_get_class (method, token, generic_context);
8539 CHECK_TYPELOAD (klass);
8540 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8541 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8542 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
8543 generic_class_is_reference_type (cfg, klass)) {
8544 MonoInst *dummy_use;
8545 /* insert call to write barrier */
8546 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
8547 mono_emit_method_call (cfg, write_barrier, sp, NULL);
8548 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
8560 const char *data_ptr;
8562 guint32 field_token;
8568 token = read32 (ip + 1);
8570 klass = mini_get_class (method, token, generic_context);
8571 CHECK_TYPELOAD (klass);
8573 if (cfg->generic_sharing_context)
8574 context_used = mono_class_check_context_used (klass);
8576 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
8577 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
8578 ins->sreg1 = sp [0]->dreg;
8579 ins->type = STACK_I4;
8580 ins->dreg = alloc_ireg (cfg);
8581 MONO_ADD_INS (cfg->cbb, ins);
8582 *sp = mono_decompose_opcode (cfg, ins);
8587 MonoClass *array_class = mono_array_class_get (klass, 1);
8588 /* FIXME: we cannot get a managed
8589 allocator because we can't get the
8590 open generic class's vtable. We
8591 have the same problem in
8592 handle_alloc(). This
8593 needs to be solved so that we can
8594 have managed allocs of shared
8597 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
8598 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
8600 MonoMethod *managed_alloc = NULL;
8602 /* FIXME: Decompose later to help abcrem */
8605 args [0] = emit_get_rgctx_klass (cfg, context_used,
8606 array_class, MONO_RGCTX_INFO_VTABLE);
8611 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
8613 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8615 if (cfg->opt & MONO_OPT_SHARED) {
8616 /* Decompose now to avoid problems with references to the domainvar */
8617 MonoInst *iargs [3];
8619 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8620 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8623 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8625 /* Decompose later since it is needed by abcrem */
8626 MonoClass *array_type = mono_array_class_get (klass, 1);
8627 mono_class_vtable (cfg->domain, array_type);
8628 CHECK_TYPELOAD (array_type);
8630 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8631 ins->dreg = alloc_preg (cfg);
8632 ins->sreg1 = sp [0]->dreg;
8633 ins->inst_newa_class = klass;
8634 ins->type = STACK_OBJ;
8636 MONO_ADD_INS (cfg->cbb, ins);
8637 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8638 cfg->cbb->has_array_access = TRUE;
8640 /* Needed so mono_emit_load_get_addr () gets called */
8641 mono_get_got_var (cfg);
8651 * we inline/optimize the initialization sequence if possible.
8652 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8653 * for small sizes open code the memcpy
8654 * ensure the rva field is big enough
8656 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8657 MonoMethod *memcpy_method = get_memcpy_method ();
8658 MonoInst *iargs [3];
8659 int add_reg = alloc_preg (cfg);
8661 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8662 if (cfg->compile_aot) {
8663 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8665 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8667 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8668 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8677 if (sp [0]->type != STACK_OBJ)
8680 dreg = alloc_preg (cfg);
8681 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8682 ins->dreg = alloc_preg (cfg);
8683 ins->sreg1 = sp [0]->dreg;
8684 ins->type = STACK_I4;
8685 /* This flag will be inherited by the decomposition */
8686 ins->flags |= MONO_INST_FAULT;
8687 MONO_ADD_INS (cfg->cbb, ins);
8688 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8689 cfg->cbb->has_array_access = TRUE;
8697 if (sp [0]->type != STACK_OBJ)
8700 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8702 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8703 CHECK_TYPELOAD (klass);
8704 /* we need to make sure that this array is exactly the type it needs
8705 * to be for correctness. the wrappers are lax with their usage
8706 * so we need to ignore them here
8708 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
8709 MonoClass *array_class = mono_array_class_get (klass, 1);
8710 mini_emit_check_array_type (cfg, sp [0], array_class);
8711 CHECK_TYPELOAD (array_class);
8715 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8730 case CEE_LDELEM_REF: {
8736 if (*ip == CEE_LDELEM) {
8738 token = read32 (ip + 1);
8739 klass = mini_get_class (method, token, generic_context);
8740 CHECK_TYPELOAD (klass);
8741 mono_class_init (klass);
8744 klass = array_access_to_klass (*ip);
8746 if (sp [0]->type != STACK_OBJ)
8749 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8751 if (sp [1]->opcode == OP_ICONST) {
8752 int array_reg = sp [0]->dreg;
8753 int index_reg = sp [1]->dreg;
8754 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8756 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8757 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8759 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8760 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8763 if (*ip == CEE_LDELEM)
8776 case CEE_STELEM_REF:
8783 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8785 if (*ip == CEE_STELEM) {
8787 token = read32 (ip + 1);
8788 klass = mini_get_class (method, token, generic_context);
8789 CHECK_TYPELOAD (klass);
8790 mono_class_init (klass);
8793 klass = array_access_to_klass (*ip);
8795 if (sp [0]->type != STACK_OBJ)
8798 /* storing a NULL doesn't need any of the complex checks in stelemref */
8799 if (generic_class_is_reference_type (cfg, klass) &&
8800 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8801 MonoMethod* helper = mono_marshal_get_stelemref ();
8802 MonoInst *iargs [3];
8804 if (sp [0]->type != STACK_OBJ)
8806 if (sp [2]->type != STACK_OBJ)
8813 mono_emit_method_call (cfg, helper, iargs, NULL);
8815 if (sp [1]->opcode == OP_ICONST) {
8816 int array_reg = sp [0]->dreg;
8817 int index_reg = sp [1]->dreg;
8818 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8820 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8821 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8823 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8824 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8828 if (*ip == CEE_STELEM)
8835 case CEE_CKFINITE: {
8839 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8840 ins->sreg1 = sp [0]->dreg;
8841 ins->dreg = alloc_freg (cfg);
8842 ins->type = STACK_R8;
8843 MONO_ADD_INS (bblock, ins);
8845 *sp++ = mono_decompose_opcode (cfg, ins);
8850 case CEE_REFANYVAL: {
8851 MonoInst *src_var, *src;
8853 int klass_reg = alloc_preg (cfg);
8854 int dreg = alloc_preg (cfg);
8857 MONO_INST_NEW (cfg, ins, *ip);
8860 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8861 CHECK_TYPELOAD (klass);
8862 mono_class_init (klass);
8864 if (cfg->generic_sharing_context)
8865 context_used = mono_class_check_context_used (klass);
8868 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8870 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8871 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8872 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8875 MonoInst *klass_ins;
8877 klass_ins = emit_get_rgctx_klass (cfg, context_used,
8878 klass, MONO_RGCTX_INFO_KLASS);
8881 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8882 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8884 mini_emit_class_check (cfg, klass_reg, klass);
8886 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8887 ins->type = STACK_MP;
8892 case CEE_MKREFANY: {
8893 MonoInst *loc, *addr;
8896 MONO_INST_NEW (cfg, ins, *ip);
8899 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8900 CHECK_TYPELOAD (klass);
8901 mono_class_init (klass);
8903 if (cfg->generic_sharing_context)
8904 context_used = mono_class_check_context_used (klass);
8906 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8907 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8910 MonoInst *const_ins;
8911 int type_reg = alloc_preg (cfg);
8913 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
8914 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8915 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8916 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8917 } else if (cfg->compile_aot) {
8918 int const_reg = alloc_preg (cfg);
8919 int type_reg = alloc_preg (cfg);
8921 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8922 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8923 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8924 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8926 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8927 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8929 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8931 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8932 ins->type = STACK_VTYPE;
8933 ins->klass = mono_defaults.typed_reference_class;
8940 MonoClass *handle_class;
8942 CHECK_STACK_OVF (1);
8945 n = read32 (ip + 1);
8947 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
8948 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8949 handle = mono_method_get_wrapper_data (method, n);
8950 handle_class = mono_method_get_wrapper_data (method, n + 1);
8951 if (handle_class == mono_defaults.typehandle_class)
8952 handle = &((MonoClass*)handle)->byval_arg;
8955 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8959 mono_class_init (handle_class);
8960 if (cfg->generic_sharing_context) {
8961 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
8962 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
8963 /* This case handles ldtoken
8964 of an open type, like for
8967 } else if (handle_class == mono_defaults.typehandle_class) {
8968 /* If we get a MONO_TYPE_CLASS
8969 then we need to provide the
8971 instantiation of it. */
8972 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8975 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8976 } else if (handle_class == mono_defaults.fieldhandle_class)
8977 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8978 else if (handle_class == mono_defaults.methodhandle_class)
8979 context_used = mono_method_check_context_used (handle);
8981 g_assert_not_reached ();
8984 if ((cfg->opt & MONO_OPT_SHARED) &&
8985 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
8986 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
8987 MonoInst *addr, *vtvar, *iargs [3];
8988 int method_context_used;
8990 if (cfg->generic_sharing_context)
8991 method_context_used = mono_method_check_context_used (method);
8993 method_context_used = 0;
8995 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8997 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8998 EMIT_NEW_ICONST (cfg, iargs [1], n);
8999 if (method_context_used) {
9000 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
9001 method, MONO_RGCTX_INFO_METHOD);
9002 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
9004 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
9005 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
9007 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9009 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9011 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9013 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
9014 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
9015 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
9016 (cmethod->klass == mono_defaults.monotype_class->parent) &&
9017 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
9018 MonoClass *tclass = mono_class_from_mono_type (handle);
9020 mono_class_init (tclass);
9022 ins = emit_get_rgctx_klass (cfg, context_used,
9023 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
9024 } else if (cfg->compile_aot) {
9025 if (method->wrapper_type) {
9026 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
9027 /* Special case for static synchronized wrappers */
9028 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
9030 /* FIXME: n is not a normal token */
9031 cfg->disable_aot = TRUE;
9032 EMIT_NEW_PCONST (cfg, ins, NULL);
9035 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
9038 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
9040 ins->type = STACK_OBJ;
9041 ins->klass = cmethod->klass;
9044 MonoInst *addr, *vtvar;
9046 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9049 if (handle_class == mono_defaults.typehandle_class) {
9050 ins = emit_get_rgctx_klass (cfg, context_used,
9051 mono_class_from_mono_type (handle),
9052 MONO_RGCTX_INFO_TYPE);
9053 } else if (handle_class == mono_defaults.methodhandle_class) {
9054 ins = emit_get_rgctx_method (cfg, context_used,
9055 handle, MONO_RGCTX_INFO_METHOD);
9056 } else if (handle_class == mono_defaults.fieldhandle_class) {
9057 ins = emit_get_rgctx_field (cfg, context_used,
9058 handle, MONO_RGCTX_INFO_CLASS_FIELD);
9060 g_assert_not_reached ();
9062 } else if (cfg->compile_aot) {
9063 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
9065 EMIT_NEW_PCONST (cfg, ins, handle);
9067 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9068 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9069 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9079 MONO_INST_NEW (cfg, ins, OP_THROW);
9081 ins->sreg1 = sp [0]->dreg;
9083 bblock->out_of_line = TRUE;
9084 MONO_ADD_INS (bblock, ins);
9085 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
9086 MONO_ADD_INS (bblock, ins);
9089 link_bblock (cfg, bblock, end_bblock);
9090 start_new_bblock = 1;
9092 case CEE_ENDFINALLY:
9093 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
9094 MONO_ADD_INS (bblock, ins);
9096 start_new_bblock = 1;
9099 * Control will leave the method so empty the stack, otherwise
9100 * the next basic block will start with a nonempty stack.
9102 while (sp != stack_start) {
9110 if (*ip == CEE_LEAVE) {
9112 target = ip + 5 + (gint32)read32(ip + 1);
9115 target = ip + 2 + (signed char)(ip [1]);
9118 /* empty the stack */
9119 while (sp != stack_start) {
9124 * If this leave statement is in a catch block, check for a
9125 * pending exception, and rethrow it if necessary.
9126 * We avoid doing this in runtime invoke wrappers, since those are called
9127 * by native code which excepts the wrapper to catch all exceptions.
9129 for (i = 0; i < header->num_clauses; ++i) {
9130 MonoExceptionClause *clause = &header->clauses [i];
9133 * Use <= in the final comparison to handle clauses with multiple
9134 * leave statements, like in bug #78024.
9135 * The ordering of the exception clauses guarantees that we find the
9138 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
9140 MonoBasicBlock *dont_throw;
9145 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
9148 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
9150 NEW_BBLOCK (cfg, dont_throw);
9153 * Currently, we allways rethrow the abort exception, despite the
9154 * fact that this is not correct. See thread6.cs for an example.
9155 * But propagating the abort exception is more important than
9156 * getting the sematics right.
9158 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
9159 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
9160 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
9162 MONO_START_BB (cfg, dont_throw);
9167 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
9169 MonoExceptionClause *clause;
9171 for (tmp = handlers; tmp; tmp = tmp->next) {
9173 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
9175 link_bblock (cfg, bblock, tblock);
9176 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
9177 ins->inst_target_bb = tblock;
9178 ins->inst_eh_block = clause;
9179 MONO_ADD_INS (bblock, ins);
9180 bblock->has_call_handler = 1;
9181 if (COMPILE_LLVM (cfg)) {
9182 MonoBasicBlock *target_bb;
9185 * Link the finally bblock with the target, since it will
9186 * conceptually branch there.
9187 * FIXME: Have to link the bblock containing the endfinally.
9189 GET_BBLOCK (cfg, target_bb, target);
9190 link_bblock (cfg, tblock, target_bb);
9193 g_list_free (handlers);
9196 MONO_INST_NEW (cfg, ins, OP_BR);
9197 MONO_ADD_INS (bblock, ins);
9198 GET_BBLOCK (cfg, tblock, target);
9199 link_bblock (cfg, bblock, tblock);
9200 ins->inst_target_bb = tblock;
9201 start_new_bblock = 1;
9203 if (*ip == CEE_LEAVE)
9212 * Mono specific opcodes
9214 case MONO_CUSTOM_PREFIX: {
9216 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
9220 case CEE_MONO_ICALL: {
9222 MonoJitICallInfo *info;
9224 token = read32 (ip + 2);
9225 func = mono_method_get_wrapper_data (method, token);
9226 info = mono_find_jit_icall_by_addr (func);
9229 CHECK_STACK (info->sig->param_count);
9230 sp -= info->sig->param_count;
9232 ins = mono_emit_jit_icall (cfg, info->func, sp);
9233 if (!MONO_TYPE_IS_VOID (info->sig->ret))
9237 inline_costs += 10 * num_calls++;
9241 case CEE_MONO_LDPTR: {
9244 CHECK_STACK_OVF (1);
9246 token = read32 (ip + 2);
9248 ptr = mono_method_get_wrapper_data (method, token);
9249 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
9250 MonoJitICallInfo *callinfo;
9251 const char *icall_name;
9253 icall_name = method->name + strlen ("__icall_wrapper_");
9254 g_assert (icall_name);
9255 callinfo = mono_find_jit_icall_by_name (icall_name);
9256 g_assert (callinfo);
9258 if (ptr == callinfo->func) {
9259 /* Will be transformed into an AOTCONST later */
9260 EMIT_NEW_PCONST (cfg, ins, ptr);
9266 /* FIXME: Generalize this */
9267 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
9268 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
9273 EMIT_NEW_PCONST (cfg, ins, ptr);
9276 inline_costs += 10 * num_calls++;
9277 /* Can't embed random pointers into AOT code */
9278 cfg->disable_aot = 1;
9281 case CEE_MONO_ICALL_ADDR: {
9282 MonoMethod *cmethod;
9285 CHECK_STACK_OVF (1);
9287 token = read32 (ip + 2);
9289 cmethod = mono_method_get_wrapper_data (method, token);
9291 if (cfg->compile_aot) {
9292 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
9294 ptr = mono_lookup_internal_call (cmethod);
9296 EMIT_NEW_PCONST (cfg, ins, ptr);
9302 case CEE_MONO_VTADDR: {
9303 MonoInst *src_var, *src;
9309 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9310 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
9315 case CEE_MONO_NEWOBJ: {
9316 MonoInst *iargs [2];
9318 CHECK_STACK_OVF (1);
9320 token = read32 (ip + 2);
9321 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9322 mono_class_init (klass);
9323 NEW_DOMAINCONST (cfg, iargs [0]);
9324 MONO_ADD_INS (cfg->cbb, iargs [0]);
9325 NEW_CLASSCONST (cfg, iargs [1], klass);
9326 MONO_ADD_INS (cfg->cbb, iargs [1]);
9327 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
9329 inline_costs += 10 * num_calls++;
9332 case CEE_MONO_OBJADDR:
9335 MONO_INST_NEW (cfg, ins, OP_MOVE);
9336 ins->dreg = alloc_preg (cfg);
9337 ins->sreg1 = sp [0]->dreg;
9338 ins->type = STACK_MP;
9339 MONO_ADD_INS (cfg->cbb, ins);
9343 case CEE_MONO_LDNATIVEOBJ:
9345 * Similar to LDOBJ, but instead load the unmanaged
9346 * representation of the vtype to the stack.
9351 token = read32 (ip + 2);
9352 klass = mono_method_get_wrapper_data (method, token);
9353 g_assert (klass->valuetype);
9354 mono_class_init (klass);
9357 MonoInst *src, *dest, *temp;
9360 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
9361 temp->backend.is_pinvoke = 1;
9362 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
9363 mini_emit_stobj (cfg, dest, src, klass, TRUE);
9365 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
9366 dest->type = STACK_VTYPE;
9367 dest->klass = klass;
9373 case CEE_MONO_RETOBJ: {
9375 * Same as RET, but return the native representation of a vtype
9378 g_assert (cfg->ret);
9379 g_assert (mono_method_signature (method)->pinvoke);
9384 token = read32 (ip + 2);
9385 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9387 if (!cfg->vret_addr) {
9388 g_assert (cfg->ret_var_is_local);
9390 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
9392 EMIT_NEW_RETLOADA (cfg, ins);
9394 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
9396 if (sp != stack_start)
9399 MONO_INST_NEW (cfg, ins, OP_BR);
9400 ins->inst_target_bb = end_bblock;
9401 MONO_ADD_INS (bblock, ins);
9402 link_bblock (cfg, bblock, end_bblock);
9403 start_new_bblock = 1;
9407 case CEE_MONO_CISINST:
9408 case CEE_MONO_CCASTCLASS: {
9413 token = read32 (ip + 2);
9414 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9415 if (ip [1] == CEE_MONO_CISINST)
9416 ins = handle_cisinst (cfg, klass, sp [0]);
9418 ins = handle_ccastclass (cfg, klass, sp [0]);
9424 case CEE_MONO_SAVE_LMF:
9425 case CEE_MONO_RESTORE_LMF:
9426 #ifdef MONO_ARCH_HAVE_LMF_OPS
9427 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9428 MONO_ADD_INS (bblock, ins);
9429 cfg->need_lmf_area = TRUE;
9433 case CEE_MONO_CLASSCONST:
9434 CHECK_STACK_OVF (1);
9436 token = read32 (ip + 2);
9437 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9440 inline_costs += 10 * num_calls++;
9442 case CEE_MONO_NOT_TAKEN:
9443 bblock->out_of_line = TRUE;
9447 CHECK_STACK_OVF (1);
9449 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9450 ins->dreg = alloc_preg (cfg);
9451 ins->inst_offset = (gint32)read32 (ip + 2);
9452 ins->type = STACK_PTR;
9453 MONO_ADD_INS (bblock, ins);
9457 case CEE_MONO_DYN_CALL: {
9460 /* It would be easier to call a trampoline, but that would put an
9461 * extra frame on the stack, confusing exception handling. So
9462 * implement it inline using an opcode for now.
9465 if (!cfg->dyn_call_var) {
9466 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9467 /* prevent it from being register allocated */
9468 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
9471 /* Has to use a call inst since it local regalloc expects it */
9472 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
9473 ins = (MonoInst*)call;
9475 ins->sreg1 = sp [0]->dreg;
9476 ins->sreg2 = sp [1]->dreg;
9477 MONO_ADD_INS (bblock, ins);
9479 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
9480 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
9484 inline_costs += 10 * num_calls++;
9489 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9499 /* somewhat similar to LDTOKEN */
9500 MonoInst *addr, *vtvar;
9501 CHECK_STACK_OVF (1);
9502 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9504 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9505 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9507 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9508 ins->type = STACK_VTYPE;
9509 ins->klass = mono_defaults.argumenthandle_class;
9522 * The following transforms:
9523 * CEE_CEQ into OP_CEQ
9524 * CEE_CGT into OP_CGT
9525 * CEE_CGT_UN into OP_CGT_UN
9526 * CEE_CLT into OP_CLT
9527 * CEE_CLT_UN into OP_CLT_UN
9529 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9531 MONO_INST_NEW (cfg, ins, cmp->opcode);
9533 cmp->sreg1 = sp [0]->dreg;
9534 cmp->sreg2 = sp [1]->dreg;
9535 type_from_op (cmp, sp [0], sp [1]);
9537 if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9538 cmp->opcode = OP_LCOMPARE;
9539 else if (sp [0]->type == STACK_R8)
9540 cmp->opcode = OP_FCOMPARE;
9542 cmp->opcode = OP_ICOMPARE;
9543 MONO_ADD_INS (bblock, cmp);
9544 ins->type = STACK_I4;
9545 ins->dreg = alloc_dreg (cfg, ins->type);
9546 type_from_op (ins, sp [0], sp [1]);
9548 if (cmp->opcode == OP_FCOMPARE) {
9550 * The backends expect the fceq opcodes to do the
9553 cmp->opcode = OP_NOP;
9554 ins->sreg1 = cmp->sreg1;
9555 ins->sreg2 = cmp->sreg2;
9557 MONO_ADD_INS (bblock, ins);
9564 MonoMethod *cil_method;
9565 gboolean needs_static_rgctx_invoke;
9567 CHECK_STACK_OVF (1);
9569 n = read32 (ip + 2);
9570 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9573 mono_class_init (cmethod->klass);
9575 mono_save_token_info (cfg, image, n, cmethod);
9577 if (cfg->generic_sharing_context)
9578 context_used = mono_method_check_context_used (cmethod);
9580 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9582 cil_method = cmethod;
9583 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9584 METHOD_ACCESS_FAILURE;
9586 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9587 if (check_linkdemand (cfg, method, cmethod))
9589 CHECK_CFG_EXCEPTION;
9590 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9591 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9595 * Optimize the common case of ldftn+delegate creation
9597 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9598 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9599 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9601 int invoke_context_used = 0;
9603 invoke = mono_get_delegate_invoke (ctor_method->klass);
9604 if (!invoke || !mono_method_signature (invoke))
9607 if (cfg->generic_sharing_context)
9608 invoke_context_used = mono_method_check_context_used (invoke);
9610 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
9611 /* FIXME: SGEN support */
9612 if (!cfg->gen_write_barriers && invoke_context_used == 0) {
9613 MonoInst *target_ins;
9616 if (cfg->verbose_level > 3)
9617 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9618 target_ins = sp [-1];
9620 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
9621 CHECK_CFG_EXCEPTION;
9630 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
9631 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
9635 inline_costs += 10 * num_calls++;
9638 case CEE_LDVIRTFTN: {
9643 n = read32 (ip + 2);
9644 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9647 mono_class_init (cmethod->klass);
9649 if (cfg->generic_sharing_context)
9650 context_used = mono_method_check_context_used (cmethod);
9652 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9653 if (check_linkdemand (cfg, method, cmethod))
9655 CHECK_CFG_EXCEPTION;
9656 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9657 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9663 args [1] = emit_get_rgctx_method (cfg, context_used,
9664 cmethod, MONO_RGCTX_INFO_METHOD);
9667 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
9669 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
9672 inline_costs += 10 * num_calls++;
9676 CHECK_STACK_OVF (1);
9678 n = read16 (ip + 2);
9680 EMIT_NEW_ARGLOAD (cfg, ins, n);
9685 CHECK_STACK_OVF (1);
9687 n = read16 (ip + 2);
9689 NEW_ARGLOADA (cfg, ins, n);
9690 MONO_ADD_INS (cfg->cbb, ins);
9698 n = read16 (ip + 2);
9700 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9702 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9706 CHECK_STACK_OVF (1);
9708 n = read16 (ip + 2);
9710 EMIT_NEW_LOCLOAD (cfg, ins, n);
9715 unsigned char *tmp_ip;
9716 CHECK_STACK_OVF (1);
9718 n = read16 (ip + 2);
9721 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9727 EMIT_NEW_LOCLOADA (cfg, ins, n);
9736 n = read16 (ip + 2);
9738 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9740 emit_stloc_ir (cfg, sp, header, n);
9747 if (sp != stack_start)
9749 if (cfg->method != method)
9751 * Inlining this into a loop in a parent could lead to
9752 * stack overflows which is different behavior than the
9753 * non-inlined case, thus disable inlining in this case.
9755 goto inline_failure;
9757 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9758 ins->dreg = alloc_preg (cfg);
9759 ins->sreg1 = sp [0]->dreg;
9760 ins->type = STACK_PTR;
9761 MONO_ADD_INS (cfg->cbb, ins);
9763 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9765 ins->flags |= MONO_INST_INIT;
9770 case CEE_ENDFILTER: {
9771 MonoExceptionClause *clause, *nearest;
9772 int cc, nearest_num;
9776 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9778 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9779 ins->sreg1 = (*sp)->dreg;
9780 MONO_ADD_INS (bblock, ins);
9781 start_new_bblock = 1;
9786 for (cc = 0; cc < header->num_clauses; ++cc) {
9787 clause = &header->clauses [cc];
9788 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9789 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9790 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9796 if ((ip - header->code) != nearest->handler_offset)
9801 case CEE_UNALIGNED_:
9802 ins_flag |= MONO_INST_UNALIGNED;
9803 /* FIXME: record alignment? we can assume 1 for now */
9808 ins_flag |= MONO_INST_VOLATILE;
9812 ins_flag |= MONO_INST_TAILCALL;
9813 cfg->flags |= MONO_CFG_HAS_TAIL;
9814 /* Can't inline tail calls at this time */
9815 inline_costs += 100000;
9822 token = read32 (ip + 2);
9823 klass = mini_get_class (method, token, generic_context);
9824 CHECK_TYPELOAD (klass);
9825 if (generic_class_is_reference_type (cfg, klass))
9826 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9828 mini_emit_initobj (cfg, *sp, NULL, klass);
9832 case CEE_CONSTRAINED_:
9834 token = read32 (ip + 2);
9835 if (method->wrapper_type != MONO_WRAPPER_NONE)
9836 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
9838 constrained_call = mono_class_get_full (image, token, generic_context);
9839 CHECK_TYPELOAD (constrained_call);
9844 MonoInst *iargs [3];
9848 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9849 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9850 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9851 /* emit_memset only works when val == 0 */
9852 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9857 if (ip [1] == CEE_CPBLK) {
9858 MonoMethod *memcpy_method = get_memcpy_method ();
9859 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9861 MonoMethod *memset_method = get_memset_method ();
9862 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9872 ins_flag |= MONO_INST_NOTYPECHECK;
9874 ins_flag |= MONO_INST_NORANGECHECK;
9875 /* we ignore the no-nullcheck for now since we
9876 * really do it explicitly only when doing callvirt->call
9882 int handler_offset = -1;
9884 for (i = 0; i < header->num_clauses; ++i) {
9885 MonoExceptionClause *clause = &header->clauses [i];
9886 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9887 handler_offset = clause->handler_offset;
9892 bblock->flags |= BB_EXCEPTION_UNSAFE;
9894 g_assert (handler_offset != -1);
9896 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9897 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9898 ins->sreg1 = load->dreg;
9899 MONO_ADD_INS (bblock, ins);
9901 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
9902 MONO_ADD_INS (bblock, ins);
9905 link_bblock (cfg, bblock, end_bblock);
9906 start_new_bblock = 1;
9914 CHECK_STACK_OVF (1);
9916 token = read32 (ip + 2);
9917 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic) {
9918 MonoType *type = mono_type_create_from_typespec (image, token);
9919 token = mono_type_size (type, &ialign);
9921 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9922 CHECK_TYPELOAD (klass);
9923 mono_class_init (klass);
9924 token = mono_class_value_size (klass, &align);
9926 EMIT_NEW_ICONST (cfg, ins, token);
9931 case CEE_REFANYTYPE: {
9932 MonoInst *src_var, *src;
9938 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9940 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9941 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9942 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9960 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
9970 g_warning ("opcode 0x%02x not handled", *ip);
9974 if (start_new_bblock != 1)
9977 bblock->cil_length = ip - bblock->cil_code;
9978 bblock->next_bb = end_bblock;
9980 if (cfg->method == method && cfg->domainvar) {
9982 MonoInst *get_domain;
9984 cfg->cbb = init_localsbb;
9986 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9987 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9990 get_domain->dreg = alloc_preg (cfg);
9991 MONO_ADD_INS (cfg->cbb, get_domain);
9993 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9994 MONO_ADD_INS (cfg->cbb, store);
9997 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
9998 if (cfg->compile_aot)
9999 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
10000 mono_get_got_var (cfg);
10003 if (cfg->method == method && cfg->got_var)
10004 mono_emit_load_got_addr (cfg);
10009 cfg->cbb = init_localsbb;
10011 for (i = 0; i < header->num_locals; ++i) {
10012 MonoType *ptype = header->locals [i];
10013 int t = ptype->type;
10014 dreg = cfg->locals [i]->dreg;
10016 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
10017 t = mono_class_enum_basetype (ptype->data.klass)->type;
10018 if (ptype->byref) {
10019 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10020 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
10021 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
10022 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
10023 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
10024 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
10025 MONO_INST_NEW (cfg, ins, OP_R8CONST);
10026 ins->type = STACK_R8;
10027 ins->inst_p0 = (void*)&r8_0;
10028 ins->dreg = alloc_dreg (cfg, STACK_R8);
10029 MONO_ADD_INS (init_localsbb, ins);
10030 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
10031 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
10032 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
10033 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
10035 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10040 if (cfg->init_ref_vars && cfg->method == method) {
10041 /* Emit initialization for ref vars */
10042 // FIXME: Avoid duplication initialization for IL locals.
10043 for (i = 0; i < cfg->num_varinfo; ++i) {
10044 MonoInst *ins = cfg->varinfo [i];
10046 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
10047 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
10051 /* Add a sequence point for method entry/exit events */
10053 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
10054 MONO_ADD_INS (init_localsbb, ins);
10055 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
10056 MONO_ADD_INS (cfg->bb_exit, ins);
10061 if (cfg->method == method) {
10062 MonoBasicBlock *bb;
10063 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10064 bb->region = mono_find_block_region (cfg, bb->real_offset);
10066 mono_create_spvar_for_region (cfg, bb->region);
10067 if (cfg->verbose_level > 2)
10068 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
10072 g_slist_free (class_inits);
10073 dont_inline = g_list_remove (dont_inline, method);
10075 if (inline_costs < 0) {
10078 /* Method is too large */
10079 mname = mono_method_full_name (method, TRUE);
10080 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
10081 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
10083 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10084 mono_basic_block_free (original_bb);
10088 if ((cfg->verbose_level > 2) && (cfg->method == method))
10089 mono_print_code (cfg, "AFTER METHOD-TO-IR");
10091 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10092 mono_basic_block_free (original_bb);
10093 return inline_costs;
10096 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
10103 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
10107 set_exception_type_from_invalid_il (cfg, method, ip);
10111 g_slist_free (class_inits);
10112 mono_basic_block_free (original_bb);
10113 dont_inline = g_list_remove (dont_inline, method);
10114 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10119 store_membase_reg_to_store_membase_imm (int opcode)
10122 case OP_STORE_MEMBASE_REG:
10123 return OP_STORE_MEMBASE_IMM;
10124 case OP_STOREI1_MEMBASE_REG:
10125 return OP_STOREI1_MEMBASE_IMM;
10126 case OP_STOREI2_MEMBASE_REG:
10127 return OP_STOREI2_MEMBASE_IMM;
10128 case OP_STOREI4_MEMBASE_REG:
10129 return OP_STOREI4_MEMBASE_IMM;
10130 case OP_STOREI8_MEMBASE_REG:
10131 return OP_STOREI8_MEMBASE_IMM;
10133 g_assert_not_reached ();
10139 #endif /* DISABLE_JIT */
10142 mono_op_to_op_imm (int opcode)
10146 return OP_IADD_IMM;
10148 return OP_ISUB_IMM;
10150 return OP_IDIV_IMM;
10152 return OP_IDIV_UN_IMM;
10154 return OP_IREM_IMM;
10156 return OP_IREM_UN_IMM;
10158 return OP_IMUL_IMM;
10160 return OP_IAND_IMM;
10164 return OP_IXOR_IMM;
10166 return OP_ISHL_IMM;
10168 return OP_ISHR_IMM;
10170 return OP_ISHR_UN_IMM;
10173 return OP_LADD_IMM;
10175 return OP_LSUB_IMM;
10177 return OP_LAND_IMM;
10181 return OP_LXOR_IMM;
10183 return OP_LSHL_IMM;
10185 return OP_LSHR_IMM;
10187 return OP_LSHR_UN_IMM;
10190 return OP_COMPARE_IMM;
10192 return OP_ICOMPARE_IMM;
10194 return OP_LCOMPARE_IMM;
10196 case OP_STORE_MEMBASE_REG:
10197 return OP_STORE_MEMBASE_IMM;
10198 case OP_STOREI1_MEMBASE_REG:
10199 return OP_STOREI1_MEMBASE_IMM;
10200 case OP_STOREI2_MEMBASE_REG:
10201 return OP_STOREI2_MEMBASE_IMM;
10202 case OP_STOREI4_MEMBASE_REG:
10203 return OP_STOREI4_MEMBASE_IMM;
10205 #if defined(TARGET_X86) || defined (TARGET_AMD64)
10207 return OP_X86_PUSH_IMM;
10208 case OP_X86_COMPARE_MEMBASE_REG:
10209 return OP_X86_COMPARE_MEMBASE_IMM;
10211 #if defined(TARGET_AMD64)
10212 case OP_AMD64_ICOMPARE_MEMBASE_REG:
10213 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10215 case OP_VOIDCALL_REG:
10216 return OP_VOIDCALL;
10224 return OP_LOCALLOC_IMM;
10231 ldind_to_load_membase (int opcode)
10235 return OP_LOADI1_MEMBASE;
10237 return OP_LOADU1_MEMBASE;
10239 return OP_LOADI2_MEMBASE;
10241 return OP_LOADU2_MEMBASE;
10243 return OP_LOADI4_MEMBASE;
10245 return OP_LOADU4_MEMBASE;
10247 return OP_LOAD_MEMBASE;
10248 case CEE_LDIND_REF:
10249 return OP_LOAD_MEMBASE;
10251 return OP_LOADI8_MEMBASE;
10253 return OP_LOADR4_MEMBASE;
10255 return OP_LOADR8_MEMBASE;
10257 g_assert_not_reached ();
10264 stind_to_store_membase (int opcode)
10268 return OP_STOREI1_MEMBASE_REG;
10270 return OP_STOREI2_MEMBASE_REG;
10272 return OP_STOREI4_MEMBASE_REG;
10274 case CEE_STIND_REF:
10275 return OP_STORE_MEMBASE_REG;
10277 return OP_STOREI8_MEMBASE_REG;
10279 return OP_STORER4_MEMBASE_REG;
10281 return OP_STORER8_MEMBASE_REG;
10283 g_assert_not_reached ();
10290 mono_load_membase_to_load_mem (int opcode)
10292 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
10293 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10295 case OP_LOAD_MEMBASE:
10296 return OP_LOAD_MEM;
10297 case OP_LOADU1_MEMBASE:
10298 return OP_LOADU1_MEM;
10299 case OP_LOADU2_MEMBASE:
10300 return OP_LOADU2_MEM;
10301 case OP_LOADI4_MEMBASE:
10302 return OP_LOADI4_MEM;
10303 case OP_LOADU4_MEMBASE:
10304 return OP_LOADU4_MEM;
10305 #if SIZEOF_REGISTER == 8
10306 case OP_LOADI8_MEMBASE:
10307 return OP_LOADI8_MEM;
10316 op_to_op_dest_membase (int store_opcode, int opcode)
10318 #if defined(TARGET_X86)
10319 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
10324 return OP_X86_ADD_MEMBASE_REG;
10326 return OP_X86_SUB_MEMBASE_REG;
10328 return OP_X86_AND_MEMBASE_REG;
10330 return OP_X86_OR_MEMBASE_REG;
10332 return OP_X86_XOR_MEMBASE_REG;
10335 return OP_X86_ADD_MEMBASE_IMM;
10338 return OP_X86_SUB_MEMBASE_IMM;
10341 return OP_X86_AND_MEMBASE_IMM;
10344 return OP_X86_OR_MEMBASE_IMM;
10347 return OP_X86_XOR_MEMBASE_IMM;
10353 #if defined(TARGET_AMD64)
10354 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
10359 return OP_X86_ADD_MEMBASE_REG;
10361 return OP_X86_SUB_MEMBASE_REG;
10363 return OP_X86_AND_MEMBASE_REG;
10365 return OP_X86_OR_MEMBASE_REG;
10367 return OP_X86_XOR_MEMBASE_REG;
10369 return OP_X86_ADD_MEMBASE_IMM;
10371 return OP_X86_SUB_MEMBASE_IMM;
10373 return OP_X86_AND_MEMBASE_IMM;
10375 return OP_X86_OR_MEMBASE_IMM;
10377 return OP_X86_XOR_MEMBASE_IMM;
10379 return OP_AMD64_ADD_MEMBASE_REG;
10381 return OP_AMD64_SUB_MEMBASE_REG;
10383 return OP_AMD64_AND_MEMBASE_REG;
10385 return OP_AMD64_OR_MEMBASE_REG;
10387 return OP_AMD64_XOR_MEMBASE_REG;
10390 return OP_AMD64_ADD_MEMBASE_IMM;
10393 return OP_AMD64_SUB_MEMBASE_IMM;
10396 return OP_AMD64_AND_MEMBASE_IMM;
10399 return OP_AMD64_OR_MEMBASE_IMM;
10402 return OP_AMD64_XOR_MEMBASE_IMM;
10412 op_to_op_store_membase (int store_opcode, int opcode)
10414 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10417 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10418 return OP_X86_SETEQ_MEMBASE;
10420 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10421 return OP_X86_SETNE_MEMBASE;
10429 op_to_op_src1_membase (int load_opcode, int opcode)
10432 /* FIXME: This has sign extension issues */
10434 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10435 return OP_X86_COMPARE_MEMBASE8_IMM;
10438 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10443 return OP_X86_PUSH_MEMBASE;
10444 case OP_COMPARE_IMM:
10445 case OP_ICOMPARE_IMM:
10446 return OP_X86_COMPARE_MEMBASE_IMM;
10449 return OP_X86_COMPARE_MEMBASE_REG;
10453 #ifdef TARGET_AMD64
10454 /* FIXME: This has sign extension issues */
10456 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10457 return OP_X86_COMPARE_MEMBASE8_IMM;
10462 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10463 return OP_X86_PUSH_MEMBASE;
10465 /* FIXME: This only works for 32 bit immediates
10466 case OP_COMPARE_IMM:
10467 case OP_LCOMPARE_IMM:
10468 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10469 return OP_AMD64_COMPARE_MEMBASE_IMM;
10471 case OP_ICOMPARE_IMM:
10472 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10473 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10477 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10478 return OP_AMD64_COMPARE_MEMBASE_REG;
10481 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10482 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10491 op_to_op_src2_membase (int load_opcode, int opcode)
10494 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10500 return OP_X86_COMPARE_REG_MEMBASE;
10502 return OP_X86_ADD_REG_MEMBASE;
10504 return OP_X86_SUB_REG_MEMBASE;
10506 return OP_X86_AND_REG_MEMBASE;
10508 return OP_X86_OR_REG_MEMBASE;
10510 return OP_X86_XOR_REG_MEMBASE;
10514 #ifdef TARGET_AMD64
10515 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
10518 return OP_AMD64_ICOMPARE_REG_MEMBASE;
10520 return OP_X86_ADD_REG_MEMBASE;
10522 return OP_X86_SUB_REG_MEMBASE;
10524 return OP_X86_AND_REG_MEMBASE;
10526 return OP_X86_OR_REG_MEMBASE;
10528 return OP_X86_XOR_REG_MEMBASE;
10530 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
10534 return OP_AMD64_COMPARE_REG_MEMBASE;
10536 return OP_AMD64_ADD_REG_MEMBASE;
10538 return OP_AMD64_SUB_REG_MEMBASE;
10540 return OP_AMD64_AND_REG_MEMBASE;
10542 return OP_AMD64_OR_REG_MEMBASE;
10544 return OP_AMD64_XOR_REG_MEMBASE;
10553 mono_op_to_op_imm_noemul (int opcode)
10556 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10562 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10570 return mono_op_to_op_imm (opcode);
10574 #ifndef DISABLE_JIT
10577 * mono_handle_global_vregs:
10579 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10583 mono_handle_global_vregs (MonoCompile *cfg)
10585 gint32 *vreg_to_bb;
10586 MonoBasicBlock *bb;
10589 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
10591 #ifdef MONO_ARCH_SIMD_INTRINSICS
10592 if (cfg->uses_simd_intrinsics)
10593 mono_simd_simplify_indirection (cfg);
10596 /* Find local vregs used in more than one bb */
10597 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10598 MonoInst *ins = bb->code;
10599 int block_num = bb->block_num;
10601 if (cfg->verbose_level > 2)
10602 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
10605 for (; ins; ins = ins->next) {
10606 const char *spec = INS_INFO (ins->opcode);
10607 int regtype = 0, regindex;
10610 if (G_UNLIKELY (cfg->verbose_level > 2))
10611 mono_print_ins (ins);
10613 g_assert (ins->opcode >= MONO_CEE_LAST);
10615 for (regindex = 0; regindex < 4; regindex ++) {
10618 if (regindex == 0) {
10619 regtype = spec [MONO_INST_DEST];
10620 if (regtype == ' ')
10623 } else if (regindex == 1) {
10624 regtype = spec [MONO_INST_SRC1];
10625 if (regtype == ' ')
10628 } else if (regindex == 2) {
10629 regtype = spec [MONO_INST_SRC2];
10630 if (regtype == ' ')
10633 } else if (regindex == 3) {
10634 regtype = spec [MONO_INST_SRC3];
10635 if (regtype == ' ')
10640 #if SIZEOF_REGISTER == 4
10641 /* In the LLVM case, the long opcodes are not decomposed */
10642 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
10644 * Since some instructions reference the original long vreg,
10645 * and some reference the two component vregs, it is quite hard
10646 * to determine when it needs to be global. So be conservative.
10648 if (!get_vreg_to_inst (cfg, vreg)) {
10649 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10651 if (cfg->verbose_level > 2)
10652 printf ("LONG VREG R%d made global.\n", vreg);
10656 * Make the component vregs volatile since the optimizations can
10657 * get confused otherwise.
10659 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
10660 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
10664 g_assert (vreg != -1);
10666 prev_bb = vreg_to_bb [vreg];
10667 if (prev_bb == 0) {
10668 /* 0 is a valid block num */
10669 vreg_to_bb [vreg] = block_num + 1;
10670 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
10671 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
10674 if (!get_vreg_to_inst (cfg, vreg)) {
10675 if (G_UNLIKELY (cfg->verbose_level > 2))
10676 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
10680 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
10683 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10686 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
10689 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
10692 g_assert_not_reached ();
10696 /* Flag as having been used in more than one bb */
10697 vreg_to_bb [vreg] = -1;
10703 /* If a variable is used in only one bblock, convert it into a local vreg */
10704 for (i = 0; i < cfg->num_varinfo; i++) {
10705 MonoInst *var = cfg->varinfo [i];
10706 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
10708 switch (var->type) {
10714 #if SIZEOF_REGISTER == 8
10717 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
10718 /* Enabling this screws up the fp stack on x86 */
10721 /* Arguments are implicitly global */
10722 /* Putting R4 vars into registers doesn't work currently */
10723 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
10725 * Make that the variable's liveness interval doesn't contain a call, since
10726 * that would cause the lvreg to be spilled, making the whole optimization
10729 /* This is too slow for JIT compilation */
10731 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
10733 int def_index, call_index, ins_index;
10734 gboolean spilled = FALSE;
10739 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
10740 const char *spec = INS_INFO (ins->opcode);
10742 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
10743 def_index = ins_index;
10745 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
10746 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
10747 if (call_index > def_index) {
10753 if (MONO_IS_CALL (ins))
10754 call_index = ins_index;
10764 if (G_UNLIKELY (cfg->verbose_level > 2))
10765 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10766 var->flags |= MONO_INST_IS_DEAD;
10767 cfg->vreg_to_inst [var->dreg] = NULL;
10774 * Compress the varinfo and vars tables so the liveness computation is faster and
10775 * takes up less space.
10778 for (i = 0; i < cfg->num_varinfo; ++i) {
10779 MonoInst *var = cfg->varinfo [i];
10780 if (pos < i && cfg->locals_start == i)
10781 cfg->locals_start = pos;
10782 if (!(var->flags & MONO_INST_IS_DEAD)) {
10784 cfg->varinfo [pos] = cfg->varinfo [i];
10785 cfg->varinfo [pos]->inst_c0 = pos;
10786 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10787 cfg->vars [pos].idx = pos;
10788 #if SIZEOF_REGISTER == 4
10789 if (cfg->varinfo [pos]->type == STACK_I8) {
10790 /* Modify the two component vars too */
10793 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10794 var1->inst_c0 = pos;
10795 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10796 var1->inst_c0 = pos;
10803 cfg->num_varinfo = pos;
10804 if (cfg->locals_start > cfg->num_varinfo)
10805 cfg->locals_start = cfg->num_varinfo;
10809 * mono_spill_global_vars:
10811 * Generate spill code for variables which are not allocated to registers,
10812 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10813 * code is generated which could be optimized by the local optimization passes.
10816 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10818 MonoBasicBlock *bb;
10820 int orig_next_vreg;
10821 guint32 *vreg_to_lvreg;
10823 guint32 i, lvregs_len;
10824 gboolean dest_has_lvreg = FALSE;
10825 guint32 stacktypes [128];
10826 MonoInst **live_range_start, **live_range_end;
10827 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
10829 *need_local_opts = FALSE;
10831 memset (spec2, 0, sizeof (spec2));
10833 /* FIXME: Move this function to mini.c */
10834 stacktypes ['i'] = STACK_PTR;
10835 stacktypes ['l'] = STACK_I8;
10836 stacktypes ['f'] = STACK_R8;
10837 #ifdef MONO_ARCH_SIMD_INTRINSICS
10838 stacktypes ['x'] = STACK_VTYPE;
10841 #if SIZEOF_REGISTER == 4
10842 /* Create MonoInsts for longs */
10843 for (i = 0; i < cfg->num_varinfo; i++) {
10844 MonoInst *ins = cfg->varinfo [i];
10846 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10847 switch (ins->type) {
10852 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
10855 g_assert (ins->opcode == OP_REGOFFSET);
10857 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10859 tree->opcode = OP_REGOFFSET;
10860 tree->inst_basereg = ins->inst_basereg;
10861 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10863 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10865 tree->opcode = OP_REGOFFSET;
10866 tree->inst_basereg = ins->inst_basereg;
10867 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10877 /* FIXME: widening and truncation */
10880 * As an optimization, when a variable allocated to the stack is first loaded into
10881 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10882 * the variable again.
10884 orig_next_vreg = cfg->next_vreg;
10885 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10886 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10890 * These arrays contain the first and last instructions accessing a given
10892 * Since we emit bblocks in the same order we process them here, and we
10893 * don't split live ranges, these will precisely describe the live range of
10894 * the variable, i.e. the instruction range where a valid value can be found
10895 * in the variables location.
10896 * The live range is computed using the liveness info computed by the liveness pass.
10897 * We can't use vmv->range, since that is an abstract live range, and we need
10898 * one which is instruction precise.
10899 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
10901 /* FIXME: Only do this if debugging info is requested */
10902 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
10903 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
10904 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10905 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10907 /* Add spill loads/stores */
10908 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10911 if (cfg->verbose_level > 2)
10912 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10914 /* Clear vreg_to_lvreg array */
10915 for (i = 0; i < lvregs_len; i++)
10916 vreg_to_lvreg [lvregs [i]] = 0;
10920 MONO_BB_FOR_EACH_INS (bb, ins) {
10921 const char *spec = INS_INFO (ins->opcode);
10922 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
10923 gboolean store, no_lvreg;
10924 int sregs [MONO_MAX_SRC_REGS];
10926 if (G_UNLIKELY (cfg->verbose_level > 2))
10927 mono_print_ins (ins);
10929 if (ins->opcode == OP_NOP)
10933 * We handle LDADDR here as well, since it can only be decomposed
10934 * when variable addresses are known.
10936 if (ins->opcode == OP_LDADDR) {
10937 MonoInst *var = ins->inst_p0;
10939 if (var->opcode == OP_VTARG_ADDR) {
10940 /* Happens on SPARC/S390 where vtypes are passed by reference */
10941 MonoInst *vtaddr = var->inst_left;
10942 if (vtaddr->opcode == OP_REGVAR) {
10943 ins->opcode = OP_MOVE;
10944 ins->sreg1 = vtaddr->dreg;
10946 else if (var->inst_left->opcode == OP_REGOFFSET) {
10947 ins->opcode = OP_LOAD_MEMBASE;
10948 ins->inst_basereg = vtaddr->inst_basereg;
10949 ins->inst_offset = vtaddr->inst_offset;
10953 g_assert (var->opcode == OP_REGOFFSET);
10955 ins->opcode = OP_ADD_IMM;
10956 ins->sreg1 = var->inst_basereg;
10957 ins->inst_imm = var->inst_offset;
10960 *need_local_opts = TRUE;
10961 spec = INS_INFO (ins->opcode);
10964 if (ins->opcode < MONO_CEE_LAST) {
10965 mono_print_ins (ins);
10966 g_assert_not_reached ();
10970 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10974 if (MONO_IS_STORE_MEMBASE (ins)) {
10975 tmp_reg = ins->dreg;
10976 ins->dreg = ins->sreg2;
10977 ins->sreg2 = tmp_reg;
10980 spec2 [MONO_INST_DEST] = ' ';
10981 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10982 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10983 spec2 [MONO_INST_SRC3] = ' ';
10985 } else if (MONO_IS_STORE_MEMINDEX (ins))
10986 g_assert_not_reached ();
10991 if (G_UNLIKELY (cfg->verbose_level > 2)) {
10992 printf ("\t %.3s %d", spec, ins->dreg);
10993 num_sregs = mono_inst_get_src_registers (ins, sregs);
10994 for (srcindex = 0; srcindex < 3; ++srcindex)
10995 printf (" %d", sregs [srcindex]);
11002 regtype = spec [MONO_INST_DEST];
11003 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
11006 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
11007 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
11008 MonoInst *store_ins;
11010 MonoInst *def_ins = ins;
11011 int dreg = ins->dreg; /* The original vreg */
11013 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
11015 if (var->opcode == OP_REGVAR) {
11016 ins->dreg = var->dreg;
11017 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
11019 * Instead of emitting a load+store, use a _membase opcode.
11021 g_assert (var->opcode == OP_REGOFFSET);
11022 if (ins->opcode == OP_MOVE) {
11026 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
11027 ins->inst_basereg = var->inst_basereg;
11028 ins->inst_offset = var->inst_offset;
11031 spec = INS_INFO (ins->opcode);
11035 g_assert (var->opcode == OP_REGOFFSET);
11037 prev_dreg = ins->dreg;
11039 /* Invalidate any previous lvreg for this vreg */
11040 vreg_to_lvreg [ins->dreg] = 0;
11044 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
11046 store_opcode = OP_STOREI8_MEMBASE_REG;
11049 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
11051 if (regtype == 'l') {
11052 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
11053 mono_bblock_insert_after_ins (bb, ins, store_ins);
11054 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
11055 mono_bblock_insert_after_ins (bb, ins, store_ins);
11056 def_ins = store_ins;
11059 g_assert (store_opcode != OP_STOREV_MEMBASE);
11061 /* Try to fuse the store into the instruction itself */
11062 /* FIXME: Add more instructions */
11063 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
11064 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
11065 ins->inst_imm = ins->inst_c0;
11066 ins->inst_destbasereg = var->inst_basereg;
11067 ins->inst_offset = var->inst_offset;
11068 spec = INS_INFO (ins->opcode);
11069 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
11070 ins->opcode = store_opcode;
11071 ins->inst_destbasereg = var->inst_basereg;
11072 ins->inst_offset = var->inst_offset;
11076 tmp_reg = ins->dreg;
11077 ins->dreg = ins->sreg2;
11078 ins->sreg2 = tmp_reg;
11081 spec2 [MONO_INST_DEST] = ' ';
11082 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11083 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11084 spec2 [MONO_INST_SRC3] = ' ';
11086 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
11087 // FIXME: The backends expect the base reg to be in inst_basereg
11088 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
11090 ins->inst_basereg = var->inst_basereg;
11091 ins->inst_offset = var->inst_offset;
11092 spec = INS_INFO (ins->opcode);
11094 /* printf ("INS: "); mono_print_ins (ins); */
11095 /* Create a store instruction */
11096 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
11098 /* Insert it after the instruction */
11099 mono_bblock_insert_after_ins (bb, ins, store_ins);
11101 def_ins = store_ins;
11104 * We can't assign ins->dreg to var->dreg here, since the
11105 * sregs could use it. So set a flag, and do it after
11108 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
11109 dest_has_lvreg = TRUE;
11114 if (def_ins && !live_range_start [dreg]) {
11115 live_range_start [dreg] = def_ins;
11116 live_range_start_bb [dreg] = bb;
11123 num_sregs = mono_inst_get_src_registers (ins, sregs);
11124 for (srcindex = 0; srcindex < 3; ++srcindex) {
11125 regtype = spec [MONO_INST_SRC1 + srcindex];
11126 sreg = sregs [srcindex];
11128 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
11129 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
11130 MonoInst *var = get_vreg_to_inst (cfg, sreg);
11131 MonoInst *use_ins = ins;
11132 MonoInst *load_ins;
11133 guint32 load_opcode;
11135 if (var->opcode == OP_REGVAR) {
11136 sregs [srcindex] = var->dreg;
11137 //mono_inst_set_src_registers (ins, sregs);
11138 live_range_end [sreg] = use_ins;
11139 live_range_end_bb [sreg] = bb;
11143 g_assert (var->opcode == OP_REGOFFSET);
11145 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
11147 g_assert (load_opcode != OP_LOADV_MEMBASE);
11149 if (vreg_to_lvreg [sreg]) {
11150 g_assert (vreg_to_lvreg [sreg] != -1);
11152 /* The variable is already loaded to an lvreg */
11153 if (G_UNLIKELY (cfg->verbose_level > 2))
11154 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
11155 sregs [srcindex] = vreg_to_lvreg [sreg];
11156 //mono_inst_set_src_registers (ins, sregs);
11160 /* Try to fuse the load into the instruction */
11161 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
11162 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
11163 sregs [0] = var->inst_basereg;
11164 //mono_inst_set_src_registers (ins, sregs);
11165 ins->inst_offset = var->inst_offset;
11166 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
11167 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
11168 sregs [1] = var->inst_basereg;
11169 //mono_inst_set_src_registers (ins, sregs);
11170 ins->inst_offset = var->inst_offset;
11172 if (MONO_IS_REAL_MOVE (ins)) {
11173 ins->opcode = OP_NOP;
11176 //printf ("%d ", srcindex); mono_print_ins (ins);
11178 sreg = alloc_dreg (cfg, stacktypes [regtype]);
11180 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
11181 if (var->dreg == prev_dreg) {
11183 * sreg refers to the value loaded by the load
11184 * emitted below, but we need to use ins->dreg
11185 * since it refers to the store emitted earlier.
11189 g_assert (sreg != -1);
11190 vreg_to_lvreg [var->dreg] = sreg;
11191 g_assert (lvregs_len < 1024);
11192 lvregs [lvregs_len ++] = var->dreg;
11196 sregs [srcindex] = sreg;
11197 //mono_inst_set_src_registers (ins, sregs);
11199 if (regtype == 'l') {
11200 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
11201 mono_bblock_insert_before_ins (bb, ins, load_ins);
11202 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
11203 mono_bblock_insert_before_ins (bb, ins, load_ins);
11204 use_ins = load_ins;
11207 #if SIZEOF_REGISTER == 4
11208 g_assert (load_opcode != OP_LOADI8_MEMBASE);
11210 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
11211 mono_bblock_insert_before_ins (bb, ins, load_ins);
11212 use_ins = load_ins;
11216 if (var->dreg < orig_next_vreg) {
11217 live_range_end [var->dreg] = use_ins;
11218 live_range_end_bb [var->dreg] = bb;
11222 mono_inst_set_src_registers (ins, sregs);
11224 if (dest_has_lvreg) {
11225 g_assert (ins->dreg != -1);
11226 vreg_to_lvreg [prev_dreg] = ins->dreg;
11227 g_assert (lvregs_len < 1024);
11228 lvregs [lvregs_len ++] = prev_dreg;
11229 dest_has_lvreg = FALSE;
11233 tmp_reg = ins->dreg;
11234 ins->dreg = ins->sreg2;
11235 ins->sreg2 = tmp_reg;
11238 if (MONO_IS_CALL (ins)) {
11239 /* Clear vreg_to_lvreg array */
11240 for (i = 0; i < lvregs_len; i++)
11241 vreg_to_lvreg [lvregs [i]] = 0;
11243 } else if (ins->opcode == OP_NOP) {
11245 MONO_INST_NULLIFY_SREGS (ins);
11248 if (cfg->verbose_level > 2)
11249 mono_print_ins_index (1, ins);
11252 /* Extend the live range based on the liveness info */
11253 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
11254 for (i = 0; i < cfg->num_varinfo; i ++) {
11255 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
11257 if (vreg_is_volatile (cfg, vi->vreg))
11258 /* The liveness info is incomplete */
11261 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
11262 /* Live from at least the first ins of this bb */
11263 live_range_start [vi->vreg] = bb->code;
11264 live_range_start_bb [vi->vreg] = bb;
11267 if (mono_bitset_test_fast (bb->live_out_set, i)) {
11268 /* Live at least until the last ins of this bb */
11269 live_range_end [vi->vreg] = bb->last_ins;
11270 live_range_end_bb [vi->vreg] = bb;
11276 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
11278 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
11279 * by storing the current native offset into MonoMethodVar->live_range_start/end.
11281 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
11282 for (i = 0; i < cfg->num_varinfo; ++i) {
11283 int vreg = MONO_VARINFO (cfg, i)->vreg;
11286 if (live_range_start [vreg]) {
11287 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
11289 ins->inst_c1 = vreg;
11290 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
11292 if (live_range_end [vreg]) {
11293 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
11295 ins->inst_c1 = vreg;
11296 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
11297 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
11299 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
11305 g_free (live_range_start);
11306 g_free (live_range_end);
11307 g_free (live_range_start_bb);
11308 g_free (live_range_end_bb);
11313 * - use 'iadd' instead of 'int_add'
11314 * - handling ovf opcodes: decompose in method_to_ir.
11315 * - unify iregs/fregs
11316 * -> partly done, the missing parts are:
11317 * - a more complete unification would involve unifying the hregs as well, so
11318 * code wouldn't need if (fp) all over the place. but that would mean the hregs
11319 * would no longer map to the machine hregs, so the code generators would need to
11320 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
11321 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
11322 * fp/non-fp branches speeds it up by about 15%.
11323 * - use sext/zext opcodes instead of shifts
11325 * - get rid of TEMPLOADs if possible and use vregs instead
11326 * - clean up usage of OP_P/OP_ opcodes
11327 * - cleanup usage of DUMMY_USE
11328 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
11330 * - set the stack type and allocate a dreg in the EMIT_NEW macros
11331 * - get rid of all the <foo>2 stuff when the new JIT is ready.
11332 * - make sure handle_stack_args () is called before the branch is emitted
11333 * - when the new IR is done, get rid of all unused stuff
11334 * - COMPARE/BEQ as separate instructions or unify them ?
11335 * - keeping them separate allows specialized compare instructions like
11336 * compare_imm, compare_membase
11337 * - most back ends unify fp compare+branch, fp compare+ceq
11338 * - integrate mono_save_args into inline_method
11339 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
11340 * - handle long shift opts on 32 bit platforms somehow: they require
11341 * 3 sregs (2 for arg1 and 1 for arg2)
11342 * - make byref a 'normal' type.
11343 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
11344 * variable if needed.
11345 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
11346 * like inline_method.
11347 * - remove inlining restrictions
11348 * - fix LNEG and enable cfold of INEG
11349 * - generalize x86 optimizations like ldelema as a peephole optimization
11350 * - add store_mem_imm for amd64
11351 * - optimize the loading of the interruption flag in the managed->native wrappers
11352 * - avoid special handling of OP_NOP in passes
11353 * - move code inserting instructions into one function/macro.
11354 * - try a coalescing phase after liveness analysis
11355 * - add float -> vreg conversion + local optimizations on !x86
11356 * - figure out how to handle decomposed branches during optimizations, ie.
11357 * compare+branch, op_jump_table+op_br etc.
11358 * - promote RuntimeXHandles to vregs
11359 * - vtype cleanups:
11360 * - add a NEW_VARLOADA_VREG macro
11361 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11362 * accessing vtype fields.
11363 * - get rid of I8CONST on 64 bit platforms
11364 * - dealing with the increase in code size due to branches created during opcode
11366 * - use extended basic blocks
11367 * - all parts of the JIT
11368 * - handle_global_vregs () && local regalloc
11369 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
11370 * - sources of increase in code size:
11373 * - isinst and castclass
11374 * - lvregs not allocated to global registers even if used multiple times
11375 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
11377 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
11378 * - add all micro optimizations from the old JIT
11379 * - put tree optimizations into the deadce pass
11380 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
11381 * specific function.
11382 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
11383 * fcompare + branchCC.
11384 * - create a helper function for allocating a stack slot, taking into account
11385 * MONO_CFG_HAS_SPILLUP.
11387 * - merge the ia64 switch changes.
11388 * - optimize mono_regstate2_alloc_int/float.
11389 * - fix the pessimistic handling of variables accessed in exception handler blocks.
11390 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
11391 * parts of the tree could be separated by other instructions, killing the tree
11392 * arguments, or stores killing loads etc. Also, should we fold loads into other
11393 * instructions if the result of the load is used multiple times ?
11394 * - make the REM_IMM optimization in mini-x86.c arch-independent.
11395 * - LAST MERGE: 108395.
11396 * - when returning vtypes in registers, generate IR and append it to the end of the
11397 * last bb instead of doing it in the epilog.
11398 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
11406 - When to decompose opcodes:
11407 - earlier: this makes some optimizations hard to implement, since the low level IR
11408 no longer contains the neccessary information. But it is easier to do.
11409 - later: harder to implement, enables more optimizations.
11410 - Branches inside bblocks:
11411 - created when decomposing complex opcodes.
11412 - branches to another bblock: harmless, but not tracked by the branch
11413 optimizations, so need to branch to a label at the start of the bblock.
11414 - branches to inside the same bblock: very problematic, trips up the local
11415 reg allocator. Can be fixed by spitting the current bblock, but that is a
11416 complex operation, since some local vregs can become global vregs etc.
11417 - Local/global vregs:
11418 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11419 local register allocator.
11420 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11421 structure, created by mono_create_var (). Assigned to hregs or the stack by
11422 the global register allocator.
11423 - When to do optimizations like alu->alu_imm:
11424 - earlier -> saves work later on since the IR will be smaller/simpler
11425 - later -> can work on more instructions
11426 - Handling of valuetypes:
11427 - When a vtype is pushed on the stack, a new temporary is created, an
11428 instruction computing its address (LDADDR) is emitted and pushed on
11429 the stack. Need to optimize cases when the vtype is used immediately as in
11430 argument passing, stloc etc.
11431 - Instead of the to_end stuff in the old JIT, simply call the function handling
11432 the values on the stack before emitting the last instruction of the bb.
11435 #endif /* DISABLE_JIT */