2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
30 #include <mono/utils/memcheck.h>
32 #include <mono/metadata/assembly.h>
33 #include <mono/metadata/loader.h>
34 #include <mono/metadata/tabledefs.h>
35 #include <mono/metadata/class.h>
36 #include <mono/metadata/object.h>
37 #include <mono/metadata/exception.h>
38 #include <mono/metadata/opcodes.h>
39 #include <mono/metadata/mono-endian.h>
40 #include <mono/metadata/tokentype.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/marshal.h>
43 #include <mono/metadata/debug-helpers.h>
44 #include <mono/metadata/mono-debug.h>
45 #include <mono/metadata/gc-internal.h>
46 #include <mono/metadata/security-manager.h>
47 #include <mono/metadata/threads-types.h>
48 #include <mono/metadata/security-core-clr.h>
49 #include <mono/metadata/monitor.h>
50 #include <mono/metadata/profiler-private.h>
51 #include <mono/metadata/profiler.h>
52 #include <mono/utils/mono-compiler.h>
53 #include <mono/metadata/mono-basic-block.h>
60 #include "jit-icalls.h"
62 #include "debugger-agent.h"
64 #define BRANCH_COST 10
65 #define INLINE_LENGTH_LIMIT 20
66 #define INLINE_FAILURE do {\
67 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
70 #define CHECK_CFG_EXCEPTION do {\
71 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
74 #define METHOD_ACCESS_FAILURE do { \
75 char *method_fname = mono_method_full_name (method, TRUE); \
76 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
77 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
78 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
79 g_free (method_fname); \
80 g_free (cil_method_fname); \
81 goto exception_exit; \
83 #define FIELD_ACCESS_FAILURE do { \
84 char *method_fname = mono_method_full_name (method, TRUE); \
85 char *field_fname = mono_field_full_name (field); \
86 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
87 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
88 g_free (method_fname); \
89 g_free (field_fname); \
90 goto exception_exit; \
92 #define GENERIC_SHARING_FAILURE(opcode) do { \
93 if (cfg->generic_sharing_context) { \
94 if (cfg->verbose_level > 2) \
95 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
96 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
97 goto exception_exit; \
101 /* Determine whenever 'ins' represents a load of the 'this' argument */
102 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
104 static int ldind_to_load_membase (int opcode);
105 static int stind_to_store_membase (int opcode);
107 int mono_op_to_op_imm (int opcode);
108 int mono_op_to_op_imm_noemul (int opcode);
110 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
111 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
112 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
114 /* helper methods signature */
115 extern MonoMethodSignature *helper_sig_class_init_trampoline;
116 extern MonoMethodSignature *helper_sig_domain_get;
117 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
118 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm;
119 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
120 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
121 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
124 * Instruction metadata
132 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
133 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
139 #if SIZEOF_REGISTER == 8
144 /* keep in sync with the enum in mini.h */
147 #include "mini-ops.h"
152 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
153 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
155 * This should contain the index of the last sreg + 1. This is not the same
156 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
158 const gint8 ins_sreg_counts[] = {
159 #include "mini-ops.h"
164 #define MONO_INIT_VARINFO(vi,id) do { \
165 (vi)->range.first_use.pos.bid = 0xffff; \
171 mono_inst_set_src_registers (MonoInst *ins, int *regs)
173 ins->sreg1 = regs [0];
174 ins->sreg2 = regs [1];
175 ins->sreg3 = regs [2];
179 mono_alloc_ireg (MonoCompile *cfg)
181 return alloc_ireg (cfg);
185 mono_alloc_freg (MonoCompile *cfg)
187 return alloc_freg (cfg);
191 mono_alloc_preg (MonoCompile *cfg)
193 return alloc_preg (cfg);
197 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
199 return alloc_dreg (cfg, stack_type);
203 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
209 switch (type->type) {
212 case MONO_TYPE_BOOLEAN:
224 case MONO_TYPE_FNPTR:
226 case MONO_TYPE_CLASS:
227 case MONO_TYPE_STRING:
228 case MONO_TYPE_OBJECT:
229 case MONO_TYPE_SZARRAY:
230 case MONO_TYPE_ARRAY:
234 #if SIZEOF_REGISTER == 8
243 case MONO_TYPE_VALUETYPE:
244 if (type->data.klass->enumtype) {
245 type = mono_class_enum_basetype (type->data.klass);
248 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
251 case MONO_TYPE_TYPEDBYREF:
253 case MONO_TYPE_GENERICINST:
254 type = &type->data.generic_class->container_class->byval_arg;
258 g_assert (cfg->generic_sharing_context);
261 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
267 mono_print_bb (MonoBasicBlock *bb, const char *msg)
272 printf ("\n%s %d: [IN: ", msg, bb->block_num);
273 for (i = 0; i < bb->in_count; ++i)
274 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
276 for (i = 0; i < bb->out_count; ++i)
277 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
279 for (tree = bb->code; tree; tree = tree->next)
280 mono_print_ins_index (-1, tree);
284 * Can't put this at the beginning, since other files reference stuff from this
289 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
291 #define GET_BBLOCK(cfg,tblock,ip) do { \
292 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
294 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
295 NEW_BBLOCK (cfg, (tblock)); \
296 (tblock)->cil_code = (ip); \
297 ADD_BBLOCK (cfg, (tblock)); \
301 #if defined(TARGET_X86) || defined(TARGET_AMD64)
302 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
303 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
304 (dest)->dreg = alloc_preg ((cfg)); \
305 (dest)->sreg1 = (sr1); \
306 (dest)->sreg2 = (sr2); \
307 (dest)->inst_imm = (imm); \
308 (dest)->backend.shift_amount = (shift); \
309 MONO_ADD_INS ((cfg)->cbb, (dest)); \
313 #if SIZEOF_REGISTER == 8
314 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
315 /* FIXME: Need to add many more cases */ \
316 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
318 int dr = alloc_preg (cfg); \
319 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
320 (ins)->sreg2 = widen->dreg; \
324 #define ADD_WIDEN_OP(ins, arg1, arg2)
327 #define ADD_BINOP(op) do { \
328 MONO_INST_NEW (cfg, ins, (op)); \
330 ins->sreg1 = sp [0]->dreg; \
331 ins->sreg2 = sp [1]->dreg; \
332 type_from_op (ins, sp [0], sp [1]); \
334 /* Have to insert a widening op */ \
335 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
336 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
337 MONO_ADD_INS ((cfg)->cbb, (ins)); \
338 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
341 #define ADD_UNOP(op) do { \
342 MONO_INST_NEW (cfg, ins, (op)); \
344 ins->sreg1 = sp [0]->dreg; \
345 type_from_op (ins, sp [0], NULL); \
347 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
348 MONO_ADD_INS ((cfg)->cbb, (ins)); \
349 *sp++ = mono_decompose_opcode (cfg, ins); \
352 #define ADD_BINCOND(next_block) do { \
355 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
356 cmp->sreg1 = sp [0]->dreg; \
357 cmp->sreg2 = sp [1]->dreg; \
358 type_from_op (cmp, sp [0], sp [1]); \
360 type_from_op (ins, sp [0], sp [1]); \
361 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
362 GET_BBLOCK (cfg, tblock, target); \
363 link_bblock (cfg, bblock, tblock); \
364 ins->inst_true_bb = tblock; \
365 if ((next_block)) { \
366 link_bblock (cfg, bblock, (next_block)); \
367 ins->inst_false_bb = (next_block); \
368 start_new_bblock = 1; \
370 GET_BBLOCK (cfg, tblock, ip); \
371 link_bblock (cfg, bblock, tblock); \
372 ins->inst_false_bb = tblock; \
373 start_new_bblock = 2; \
375 if (sp != stack_start) { \
376 handle_stack_args (cfg, stack_start, sp - stack_start); \
377 CHECK_UNVERIFIABLE (cfg); \
379 MONO_ADD_INS (bblock, cmp); \
380 MONO_ADD_INS (bblock, ins); \
384 * link_bblock: Links two basic blocks
386 * links two basic blocks in the control flow graph, the 'from'
387 * argument is the starting block and the 'to' argument is the block
388 * the control flow ends to after 'from'.
391 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
393 MonoBasicBlock **newa;
397 if (from->cil_code) {
399 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
401 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
404 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
406 printf ("edge from entry to exit\n");
411 for (i = 0; i < from->out_count; ++i) {
412 if (to == from->out_bb [i]) {
418 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
419 for (i = 0; i < from->out_count; ++i) {
420 newa [i] = from->out_bb [i];
428 for (i = 0; i < to->in_count; ++i) {
429 if (from == to->in_bb [i]) {
435 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
436 for (i = 0; i < to->in_count; ++i) {
437 newa [i] = to->in_bb [i];
446 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
448 link_bblock (cfg, from, to);
452 * mono_find_block_region:
454 * We mark each basic block with a region ID. We use that to avoid BB
455 * optimizations when blocks are in different regions.
458 * A region token that encodes where this region is, and information
459 * about the clause owner for this block.
461 * The region encodes the try/catch/filter clause that owns this block
462 * as well as the type. -1 is a special value that represents a block
463 * that is in none of try/catch/filter.
466 mono_find_block_region (MonoCompile *cfg, int offset)
468 MonoMethodHeader *header = cfg->header;
469 MonoExceptionClause *clause;
472 for (i = 0; i < header->num_clauses; ++i) {
473 clause = &header->clauses [i];
474 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
475 (offset < (clause->handler_offset)))
476 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
478 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
479 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
480 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
481 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
482 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
484 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
487 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
488 return ((i + 1) << 8) | clause->flags;
495 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
497 MonoMethodHeader *header = cfg->header;
498 MonoExceptionClause *clause;
502 for (i = 0; i < header->num_clauses; ++i) {
503 clause = &header->clauses [i];
504 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
505 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
506 if (clause->flags == type)
507 res = g_list_append (res, clause);
514 mono_create_spvar_for_region (MonoCompile *cfg, int region)
518 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
522 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
523 /* prevent it from being register allocated */
524 var->flags |= MONO_INST_INDIRECT;
526 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
530 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
532 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
536 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
540 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
544 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
545 /* prevent it from being register allocated */
546 var->flags |= MONO_INST_INDIRECT;
548 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
554 * Returns the type used in the eval stack when @type is loaded.
555 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
558 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
562 inst->klass = klass = mono_class_from_mono_type (type);
564 inst->type = STACK_MP;
569 switch (type->type) {
571 inst->type = STACK_INV;
575 case MONO_TYPE_BOOLEAN:
581 inst->type = STACK_I4;
586 case MONO_TYPE_FNPTR:
587 inst->type = STACK_PTR;
589 case MONO_TYPE_CLASS:
590 case MONO_TYPE_STRING:
591 case MONO_TYPE_OBJECT:
592 case MONO_TYPE_SZARRAY:
593 case MONO_TYPE_ARRAY:
594 inst->type = STACK_OBJ;
598 inst->type = STACK_I8;
602 inst->type = STACK_R8;
604 case MONO_TYPE_VALUETYPE:
605 if (type->data.klass->enumtype) {
606 type = mono_class_enum_basetype (type->data.klass);
610 inst->type = STACK_VTYPE;
613 case MONO_TYPE_TYPEDBYREF:
614 inst->klass = mono_defaults.typed_reference_class;
615 inst->type = STACK_VTYPE;
617 case MONO_TYPE_GENERICINST:
618 type = &type->data.generic_class->container_class->byval_arg;
621 case MONO_TYPE_MVAR :
622 /* FIXME: all the arguments must be references for now,
623 * later look inside cfg and see if the arg num is
626 g_assert (cfg->generic_sharing_context);
627 inst->type = STACK_OBJ;
630 g_error ("unknown type 0x%02x in eval stack type", type->type);
635 * The following tables are used to quickly validate the IL code in type_from_op ().
638 bin_num_table [STACK_MAX] [STACK_MAX] = {
639 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
640 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
641 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
642 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
643 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
644 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
645 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
646 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
651 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
654 /* reduce the size of this table */
656 bin_int_table [STACK_MAX] [STACK_MAX] = {
657 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
658 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
659 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
660 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
661 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
662 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
663 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
664 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
668 bin_comp_table [STACK_MAX] [STACK_MAX] = {
669 /* Inv i L p F & O vt */
671 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
672 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
673 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
674 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
675 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
676 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
677 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
680 /* reduce the size of this table */
682 shift_table [STACK_MAX] [STACK_MAX] = {
683 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
684 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
685 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
686 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
687 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
688 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
689 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
690 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
694 * Tables to map from the non-specific opcode to the matching
695 * type-specific opcode.
697 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
699 binops_op_map [STACK_MAX] = {
700 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
703 /* handles from CEE_NEG to CEE_CONV_U8 */
705 unops_op_map [STACK_MAX] = {
706 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
709 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
711 ovfops_op_map [STACK_MAX] = {
712 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
715 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
717 ovf2ops_op_map [STACK_MAX] = {
718 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
721 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
723 ovf3ops_op_map [STACK_MAX] = {
724 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
727 /* handles from CEE_BEQ to CEE_BLT_UN */
729 beqops_op_map [STACK_MAX] = {
730 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
733 /* handles from CEE_CEQ to CEE_CLT_UN */
735 ceqops_op_map [STACK_MAX] = {
736 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
740 * Sets ins->type (the type on the eval stack) according to the
741 * type of the opcode and the arguments to it.
742 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
744 * FIXME: this function sets ins->type unconditionally in some cases, but
745 * it should set it to invalid for some types (a conv.x on an object)
748 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
750 switch (ins->opcode) {
757 /* FIXME: check unverifiable args for STACK_MP */
758 ins->type = bin_num_table [src1->type] [src2->type];
759 ins->opcode += binops_op_map [ins->type];
766 ins->type = bin_int_table [src1->type] [src2->type];
767 ins->opcode += binops_op_map [ins->type];
772 ins->type = shift_table [src1->type] [src2->type];
773 ins->opcode += binops_op_map [ins->type];
778 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
779 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
780 ins->opcode = OP_LCOMPARE;
781 else if (src1->type == STACK_R8)
782 ins->opcode = OP_FCOMPARE;
784 ins->opcode = OP_ICOMPARE;
786 case OP_ICOMPARE_IMM:
787 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
788 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
789 ins->opcode = OP_LCOMPARE_IMM;
801 ins->opcode += beqops_op_map [src1->type];
804 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
805 ins->opcode += ceqops_op_map [src1->type];
811 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
812 ins->opcode += ceqops_op_map [src1->type];
816 ins->type = neg_table [src1->type];
817 ins->opcode += unops_op_map [ins->type];
820 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
821 ins->type = src1->type;
823 ins->type = STACK_INV;
824 ins->opcode += unops_op_map [ins->type];
830 ins->type = STACK_I4;
831 ins->opcode += unops_op_map [src1->type];
834 ins->type = STACK_R8;
835 switch (src1->type) {
838 ins->opcode = OP_ICONV_TO_R_UN;
841 ins->opcode = OP_LCONV_TO_R_UN;
845 case CEE_CONV_OVF_I1:
846 case CEE_CONV_OVF_U1:
847 case CEE_CONV_OVF_I2:
848 case CEE_CONV_OVF_U2:
849 case CEE_CONV_OVF_I4:
850 case CEE_CONV_OVF_U4:
851 ins->type = STACK_I4;
852 ins->opcode += ovf3ops_op_map [src1->type];
854 case CEE_CONV_OVF_I_UN:
855 case CEE_CONV_OVF_U_UN:
856 ins->type = STACK_PTR;
857 ins->opcode += ovf2ops_op_map [src1->type];
859 case CEE_CONV_OVF_I1_UN:
860 case CEE_CONV_OVF_I2_UN:
861 case CEE_CONV_OVF_I4_UN:
862 case CEE_CONV_OVF_U1_UN:
863 case CEE_CONV_OVF_U2_UN:
864 case CEE_CONV_OVF_U4_UN:
865 ins->type = STACK_I4;
866 ins->opcode += ovf2ops_op_map [src1->type];
869 ins->type = STACK_PTR;
870 switch (src1->type) {
872 ins->opcode = OP_ICONV_TO_U;
876 #if SIZEOF_REGISTER == 8
877 ins->opcode = OP_LCONV_TO_U;
879 ins->opcode = OP_MOVE;
883 ins->opcode = OP_LCONV_TO_U;
886 ins->opcode = OP_FCONV_TO_U;
892 ins->type = STACK_I8;
893 ins->opcode += unops_op_map [src1->type];
895 case CEE_CONV_OVF_I8:
896 case CEE_CONV_OVF_U8:
897 ins->type = STACK_I8;
898 ins->opcode += ovf3ops_op_map [src1->type];
900 case CEE_CONV_OVF_U8_UN:
901 case CEE_CONV_OVF_I8_UN:
902 ins->type = STACK_I8;
903 ins->opcode += ovf2ops_op_map [src1->type];
907 ins->type = STACK_R8;
908 ins->opcode += unops_op_map [src1->type];
911 ins->type = STACK_R8;
915 ins->type = STACK_I4;
916 ins->opcode += ovfops_op_map [src1->type];
921 ins->type = STACK_PTR;
922 ins->opcode += ovfops_op_map [src1->type];
930 ins->type = bin_num_table [src1->type] [src2->type];
931 ins->opcode += ovfops_op_map [src1->type];
932 if (ins->type == STACK_R8)
933 ins->type = STACK_INV;
935 case OP_LOAD_MEMBASE:
936 ins->type = STACK_PTR;
938 case OP_LOADI1_MEMBASE:
939 case OP_LOADU1_MEMBASE:
940 case OP_LOADI2_MEMBASE:
941 case OP_LOADU2_MEMBASE:
942 case OP_LOADI4_MEMBASE:
943 case OP_LOADU4_MEMBASE:
944 ins->type = STACK_PTR;
946 case OP_LOADI8_MEMBASE:
947 ins->type = STACK_I8;
949 case OP_LOADR4_MEMBASE:
950 case OP_LOADR8_MEMBASE:
951 ins->type = STACK_R8;
954 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
958 if (ins->type == STACK_MP)
959 ins->klass = mono_defaults.object_class;
964 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
970 param_table [STACK_MAX] [STACK_MAX] = {
975 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
979 switch (args->type) {
989 for (i = 0; i < sig->param_count; ++i) {
990 switch (args [i].type) {
994 if (!sig->params [i]->byref)
998 if (sig->params [i]->byref)
1000 switch (sig->params [i]->type) {
1001 case MONO_TYPE_CLASS:
1002 case MONO_TYPE_STRING:
1003 case MONO_TYPE_OBJECT:
1004 case MONO_TYPE_SZARRAY:
1005 case MONO_TYPE_ARRAY:
1012 if (sig->params [i]->byref)
1014 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1023 /*if (!param_table [args [i].type] [sig->params [i]->type])
1031 * When we need a pointer to the current domain many times in a method, we
1032 * call mono_domain_get() once and we store the result in a local variable.
1033 * This function returns the variable that represents the MonoDomain*.
1035 inline static MonoInst *
1036 mono_get_domainvar (MonoCompile *cfg)
1038 if (!cfg->domainvar)
1039 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1040 return cfg->domainvar;
1044 * The got_var contains the address of the Global Offset Table when AOT
1048 mono_get_got_var (MonoCompile *cfg)
1050 #ifdef MONO_ARCH_NEED_GOT_VAR
1051 if (!cfg->compile_aot)
1053 if (!cfg->got_var) {
1054 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1056 return cfg->got_var;
1063 mono_get_vtable_var (MonoCompile *cfg)
1065 g_assert (cfg->generic_sharing_context);
1067 if (!cfg->rgctx_var) {
1068 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1069 /* force the var to be stack allocated */
1070 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1073 return cfg->rgctx_var;
1077 type_from_stack_type (MonoInst *ins) {
1078 switch (ins->type) {
1079 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1080 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1081 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1082 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1084 return &ins->klass->this_arg;
1085 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1086 case STACK_VTYPE: return &ins->klass->byval_arg;
1088 g_error ("stack type %d to monotype not handled\n", ins->type);
1093 static G_GNUC_UNUSED int
1094 type_to_stack_type (MonoType *t)
1096 t = mono_type_get_underlying_type (t);
1100 case MONO_TYPE_BOOLEAN:
1103 case MONO_TYPE_CHAR:
1110 case MONO_TYPE_FNPTR:
1112 case MONO_TYPE_CLASS:
1113 case MONO_TYPE_STRING:
1114 case MONO_TYPE_OBJECT:
1115 case MONO_TYPE_SZARRAY:
1116 case MONO_TYPE_ARRAY:
1124 case MONO_TYPE_VALUETYPE:
1125 case MONO_TYPE_TYPEDBYREF:
1127 case MONO_TYPE_GENERICINST:
1128 if (mono_type_generic_inst_is_valuetype (t))
1134 g_assert_not_reached ();
1141 array_access_to_klass (int opcode)
1145 return mono_defaults.byte_class;
1147 return mono_defaults.uint16_class;
1150 return mono_defaults.int_class;
1153 return mono_defaults.sbyte_class;
1156 return mono_defaults.int16_class;
1159 return mono_defaults.int32_class;
1161 return mono_defaults.uint32_class;
1164 return mono_defaults.int64_class;
1167 return mono_defaults.single_class;
1170 return mono_defaults.double_class;
1171 case CEE_LDELEM_REF:
1172 case CEE_STELEM_REF:
1173 return mono_defaults.object_class;
1175 g_assert_not_reached ();
1181 * We try to share variables when possible
1184 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1189 /* inlining can result in deeper stacks */
1190 if (slot >= cfg->header->max_stack)
1191 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1193 pos = ins->type - 1 + slot * STACK_MAX;
1195 switch (ins->type) {
1202 if ((vnum = cfg->intvars [pos]))
1203 return cfg->varinfo [vnum];
1204 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1205 cfg->intvars [pos] = res->inst_c0;
1208 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1214 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1217 * Don't use this if a generic_context is set, since that means AOT can't
1218 * look up the method using just the image+token.
1219 * table == 0 means this is a reference made from a wrapper.
1221 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1222 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1223 jump_info_token->image = image;
1224 jump_info_token->token = token;
1225 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1230 * This function is called to handle items that are left on the evaluation stack
1231 * at basic block boundaries. What happens is that we save the values to local variables
1232 * and we reload them later when first entering the target basic block (with the
1233 * handle_loaded_temps () function).
1234 * A single joint point will use the same variables (stored in the array bb->out_stack or
1235 * bb->in_stack, if the basic block is before or after the joint point).
1237 * This function needs to be called _before_ emitting the last instruction of
1238 * the bb (i.e. before emitting a branch).
1239 * If the stack merge fails at a join point, cfg->unverifiable is set.
1242 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1245 MonoBasicBlock *bb = cfg->cbb;
1246 MonoBasicBlock *outb;
1247 MonoInst *inst, **locals;
1252 if (cfg->verbose_level > 3)
1253 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1254 if (!bb->out_scount) {
1255 bb->out_scount = count;
1256 //printf ("bblock %d has out:", bb->block_num);
1258 for (i = 0; i < bb->out_count; ++i) {
1259 outb = bb->out_bb [i];
1260 /* exception handlers are linked, but they should not be considered for stack args */
1261 if (outb->flags & BB_EXCEPTION_HANDLER)
1263 //printf (" %d", outb->block_num);
1264 if (outb->in_stack) {
1266 bb->out_stack = outb->in_stack;
1272 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1273 for (i = 0; i < count; ++i) {
1275 * try to reuse temps already allocated for this purpouse, if they occupy the same
1276 * stack slot and if they are of the same type.
1277 * This won't cause conflicts since if 'local' is used to
1278 * store one of the values in the in_stack of a bblock, then
1279 * the same variable will be used for the same outgoing stack
1281 * This doesn't work when inlining methods, since the bblocks
1282 * in the inlined methods do not inherit their in_stack from
1283 * the bblock they are inlined to. See bug #58863 for an
1286 if (cfg->inlined_method)
1287 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1289 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1294 for (i = 0; i < bb->out_count; ++i) {
1295 outb = bb->out_bb [i];
1296 /* exception handlers are linked, but they should not be considered for stack args */
1297 if (outb->flags & BB_EXCEPTION_HANDLER)
1299 if (outb->in_scount) {
1300 if (outb->in_scount != bb->out_scount) {
1301 cfg->unverifiable = TRUE;
1304 continue; /* check they are the same locals */
1306 outb->in_scount = count;
1307 outb->in_stack = bb->out_stack;
1310 locals = bb->out_stack;
1312 for (i = 0; i < count; ++i) {
1313 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1314 inst->cil_code = sp [i]->cil_code;
1315 sp [i] = locals [i];
1316 if (cfg->verbose_level > 3)
1317 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1321 * It is possible that the out bblocks already have in_stack assigned, and
1322 * the in_stacks differ. In this case, we will store to all the different
1329 /* Find a bblock which has a different in_stack */
1331 while (bindex < bb->out_count) {
1332 outb = bb->out_bb [bindex];
1333 /* exception handlers are linked, but they should not be considered for stack args */
1334 if (outb->flags & BB_EXCEPTION_HANDLER) {
1338 if (outb->in_stack != locals) {
1339 for (i = 0; i < count; ++i) {
1340 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1341 inst->cil_code = sp [i]->cil_code;
1342 sp [i] = locals [i];
1343 if (cfg->verbose_level > 3)
1344 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1346 locals = outb->in_stack;
1355 /* Emit code which loads interface_offsets [klass->interface_id]
1356 * The array is stored in memory before vtable.
1359 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1361 if (cfg->compile_aot) {
1362 int ioffset_reg = alloc_preg (cfg);
1363 int iid_reg = alloc_preg (cfg);
1365 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1366 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1367 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1370 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1375 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1377 int ibitmap_reg = alloc_preg (cfg);
1378 #ifdef COMPRESSED_INTERFACE_BITMAP
1380 MonoInst *res, *ins;
1381 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1382 MONO_ADD_INS (cfg->cbb, ins);
1384 if (cfg->compile_aot)
1385 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1387 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1388 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1389 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1391 int ibitmap_byte_reg = alloc_preg (cfg);
1393 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1395 if (cfg->compile_aot) {
1396 int iid_reg = alloc_preg (cfg);
1397 int shifted_iid_reg = alloc_preg (cfg);
1398 int ibitmap_byte_address_reg = alloc_preg (cfg);
1399 int masked_iid_reg = alloc_preg (cfg);
1400 int iid_one_bit_reg = alloc_preg (cfg);
1401 int iid_bit_reg = alloc_preg (cfg);
1402 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1403 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1404 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1405 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1406 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1407 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1408 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1409 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1411 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1412 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1418 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1419 * stored in "klass_reg" implements the interface "klass".
1422 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1424 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1428 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1429 * stored in "vtable_reg" implements the interface "klass".
1432 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1434 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1438 * Emit code which checks whenever the interface id of @klass is smaller than
1439 * than the value given by max_iid_reg.
1442 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1443 MonoBasicBlock *false_target)
1445 if (cfg->compile_aot) {
1446 int iid_reg = alloc_preg (cfg);
1447 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1448 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1451 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1453 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1455 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1458 /* Same as above, but obtains max_iid from a vtable */
1460 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1461 MonoBasicBlock *false_target)
1463 int max_iid_reg = alloc_preg (cfg);
1465 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1466 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1469 /* Same as above, but obtains max_iid from a klass */
1471 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1472 MonoBasicBlock *false_target)
1474 int max_iid_reg = alloc_preg (cfg);
1476 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1477 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1481 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1483 int idepth_reg = alloc_preg (cfg);
1484 int stypes_reg = alloc_preg (cfg);
1485 int stype = alloc_preg (cfg);
1487 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1488 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1489 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1490 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1492 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1493 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1495 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1496 } else if (cfg->compile_aot) {
1497 int const_reg = alloc_preg (cfg);
1498 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1499 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1501 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1503 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1507 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1509 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1513 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1515 int intf_reg = alloc_preg (cfg);
1517 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1518 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1519 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1521 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1523 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1527 * Variant of the above that takes a register to the class, not the vtable.
1530 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1532 int intf_bit_reg = alloc_preg (cfg);
1534 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1535 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1536 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1538 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1540 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1544 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1547 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1548 } else if (cfg->compile_aot) {
1549 int const_reg = alloc_preg (cfg);
1550 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1551 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1553 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1555 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1559 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1561 return mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1565 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1567 if (cfg->compile_aot) {
1568 int const_reg = alloc_preg (cfg);
1569 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1570 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1572 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1574 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1578 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1581 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1584 int rank_reg = alloc_preg (cfg);
1585 int eclass_reg = alloc_preg (cfg);
1587 g_assert (!klass_inst);
1588 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1589 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1590 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1591 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1592 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1593 if (klass->cast_class == mono_defaults.object_class) {
1594 int parent_reg = alloc_preg (cfg);
1595 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1596 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1597 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1598 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1599 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1600 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1601 } else if (klass->cast_class == mono_defaults.enum_class) {
1602 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1603 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1604 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1606 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1607 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1610 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1611 /* Check that the object is a vector too */
1612 int bounds_reg = alloc_preg (cfg);
1613 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1614 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1615 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1618 int idepth_reg = alloc_preg (cfg);
1619 int stypes_reg = alloc_preg (cfg);
1620 int stype = alloc_preg (cfg);
1622 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1623 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1624 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1625 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1627 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1628 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1629 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1634 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1636 return mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1640 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1644 g_assert (val == 0);
1649 if ((size <= 4) && (size <= align)) {
1652 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1655 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1658 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1660 #if SIZEOF_REGISTER == 8
1662 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1668 val_reg = alloc_preg (cfg);
1670 if (SIZEOF_REGISTER == 8)
1671 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1673 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1676 /* This could be optimized further if neccesary */
1678 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1685 #if !NO_UNALIGNED_ACCESS
1686 if (SIZEOF_REGISTER == 8) {
1688 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1693 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1701 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1706 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1711 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1718 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1725 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1726 g_assert (size < 10000);
1729 /* This could be optimized further if neccesary */
1731 cur_reg = alloc_preg (cfg);
1732 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1733 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1740 #if !NO_UNALIGNED_ACCESS
1741 if (SIZEOF_REGISTER == 8) {
1743 cur_reg = alloc_preg (cfg);
1744 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1745 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1754 cur_reg = alloc_preg (cfg);
1755 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1756 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1762 cur_reg = alloc_preg (cfg);
1763 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1764 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1770 cur_reg = alloc_preg (cfg);
1771 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1772 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1780 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1783 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1786 type = mini_get_basic_type_from_generic (gsctx, type);
1787 switch (type->type) {
1788 case MONO_TYPE_VOID:
1789 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1792 case MONO_TYPE_BOOLEAN:
1795 case MONO_TYPE_CHAR:
1798 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1802 case MONO_TYPE_FNPTR:
1803 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1804 case MONO_TYPE_CLASS:
1805 case MONO_TYPE_STRING:
1806 case MONO_TYPE_OBJECT:
1807 case MONO_TYPE_SZARRAY:
1808 case MONO_TYPE_ARRAY:
1809 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1812 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1815 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1816 case MONO_TYPE_VALUETYPE:
1817 if (type->data.klass->enumtype) {
1818 type = mono_class_enum_basetype (type->data.klass);
1821 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1822 case MONO_TYPE_TYPEDBYREF:
1823 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1824 case MONO_TYPE_GENERICINST:
1825 type = &type->data.generic_class->container_class->byval_arg;
1828 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1834 * target_type_is_incompatible:
1835 * @cfg: MonoCompile context
1837 * Check that the item @arg on the evaluation stack can be stored
1838 * in the target type (can be a local, or field, etc).
1839 * The cfg arg can be used to check if we need verification or just
1842 * Returns: non-0 value if arg can't be stored on a target.
1845 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1847 MonoType *simple_type;
1850 if (target->byref) {
1851 /* FIXME: check that the pointed to types match */
1852 if (arg->type == STACK_MP)
1853 return arg->klass != mono_class_from_mono_type (target);
1854 if (arg->type == STACK_PTR)
1859 simple_type = mono_type_get_underlying_type (target);
1860 switch (simple_type->type) {
1861 case MONO_TYPE_VOID:
1865 case MONO_TYPE_BOOLEAN:
1868 case MONO_TYPE_CHAR:
1871 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1875 /* STACK_MP is needed when setting pinned locals */
1876 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1881 case MONO_TYPE_FNPTR:
1882 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1885 case MONO_TYPE_CLASS:
1886 case MONO_TYPE_STRING:
1887 case MONO_TYPE_OBJECT:
1888 case MONO_TYPE_SZARRAY:
1889 case MONO_TYPE_ARRAY:
1890 if (arg->type != STACK_OBJ)
1892 /* FIXME: check type compatibility */
1896 if (arg->type != STACK_I8)
1901 if (arg->type != STACK_R8)
1904 case MONO_TYPE_VALUETYPE:
1905 if (arg->type != STACK_VTYPE)
1907 klass = mono_class_from_mono_type (simple_type);
1908 if (klass != arg->klass)
1911 case MONO_TYPE_TYPEDBYREF:
1912 if (arg->type != STACK_VTYPE)
1914 klass = mono_class_from_mono_type (simple_type);
1915 if (klass != arg->klass)
1918 case MONO_TYPE_GENERICINST:
1919 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1920 if (arg->type != STACK_VTYPE)
1922 klass = mono_class_from_mono_type (simple_type);
1923 if (klass != arg->klass)
1927 if (arg->type != STACK_OBJ)
1929 /* FIXME: check type compatibility */
1933 case MONO_TYPE_MVAR:
1934 /* FIXME: all the arguments must be references for now,
1935 * later look inside cfg and see if the arg num is
1936 * really a reference
1938 g_assert (cfg->generic_sharing_context);
1939 if (arg->type != STACK_OBJ)
1943 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1949 * Prepare arguments for passing to a function call.
1950 * Return a non-zero value if the arguments can't be passed to the given
1952 * The type checks are not yet complete and some conversions may need
1953 * casts on 32 or 64 bit architectures.
1955 * FIXME: implement this using target_type_is_incompatible ()
1958 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1960 MonoType *simple_type;
1964 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1968 for (i = 0; i < sig->param_count; ++i) {
1969 if (sig->params [i]->byref) {
1970 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1974 simple_type = sig->params [i];
1975 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1977 switch (simple_type->type) {
1978 case MONO_TYPE_VOID:
1983 case MONO_TYPE_BOOLEAN:
1986 case MONO_TYPE_CHAR:
1989 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
1995 case MONO_TYPE_FNPTR:
1996 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
1999 case MONO_TYPE_CLASS:
2000 case MONO_TYPE_STRING:
2001 case MONO_TYPE_OBJECT:
2002 case MONO_TYPE_SZARRAY:
2003 case MONO_TYPE_ARRAY:
2004 if (args [i]->type != STACK_OBJ)
2009 if (args [i]->type != STACK_I8)
2014 if (args [i]->type != STACK_R8)
2017 case MONO_TYPE_VALUETYPE:
2018 if (simple_type->data.klass->enumtype) {
2019 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2022 if (args [i]->type != STACK_VTYPE)
2025 case MONO_TYPE_TYPEDBYREF:
2026 if (args [i]->type != STACK_VTYPE)
2029 case MONO_TYPE_GENERICINST:
2030 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2034 g_error ("unknown type 0x%02x in check_call_signature",
2042 callvirt_to_call (int opcode)
2047 case OP_VOIDCALLVIRT:
2056 g_assert_not_reached ();
2063 callvirt_to_call_membase (int opcode)
2067 return OP_CALL_MEMBASE;
2068 case OP_VOIDCALLVIRT:
2069 return OP_VOIDCALL_MEMBASE;
2071 return OP_FCALL_MEMBASE;
2073 return OP_LCALL_MEMBASE;
2075 return OP_VCALL_MEMBASE;
2077 g_assert_not_reached ();
2083 #ifdef MONO_ARCH_HAVE_IMT
2085 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2089 if (COMPILE_LLVM (cfg)) {
2090 method_reg = alloc_preg (cfg);
2093 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2094 } else if (cfg->compile_aot) {
2095 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2098 MONO_INST_NEW (cfg, ins, OP_PCONST);
2099 ins->inst_p0 = call->method;
2100 ins->dreg = method_reg;
2101 MONO_ADD_INS (cfg->cbb, ins);
2105 call->imt_arg_reg = method_reg;
2107 #ifdef MONO_ARCH_IMT_REG
2108 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2110 /* Need this to keep the IMT arg alive */
2111 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2116 #ifdef MONO_ARCH_IMT_REG
2117 method_reg = alloc_preg (cfg);
2120 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2121 } else if (cfg->compile_aot) {
2122 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2125 MONO_INST_NEW (cfg, ins, OP_PCONST);
2126 ins->inst_p0 = call->method;
2127 ins->dreg = method_reg;
2128 MONO_ADD_INS (cfg->cbb, ins);
2131 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2133 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2138 static MonoJumpInfo *
2139 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2141 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2145 ji->data.target = target;
2150 inline static MonoCallInst *
2151 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2152 MonoInst **args, int calli, int virtual, int tail)
2155 #ifdef MONO_ARCH_SOFT_FLOAT
2160 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2162 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2165 call->signature = sig;
2167 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2170 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2171 call->vret_var = cfg->vret_addr;
2172 //g_assert_not_reached ();
2174 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2175 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2178 temp->backend.is_pinvoke = sig->pinvoke;
2181 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2182 * address of return value to increase optimization opportunities.
2183 * Before vtype decomposition, the dreg of the call ins itself represents the
2184 * fact the call modifies the return value. After decomposition, the call will
2185 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2186 * will be transformed into an LDADDR.
2188 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2189 loada->dreg = alloc_preg (cfg);
2190 loada->inst_p0 = temp;
2191 /* We reference the call too since call->dreg could change during optimization */
2192 loada->inst_p1 = call;
2193 MONO_ADD_INS (cfg->cbb, loada);
2195 call->inst.dreg = temp->dreg;
2197 call->vret_var = loada;
2198 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2199 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2201 #ifdef MONO_ARCH_SOFT_FLOAT
2202 if (COMPILE_SOFT_FLOAT (cfg)) {
2204 * If the call has a float argument, we would need to do an r8->r4 conversion using
2205 * an icall, but that cannot be done during the call sequence since it would clobber
2206 * the call registers + the stack. So we do it before emitting the call.
2208 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2210 MonoInst *in = call->args [i];
2212 if (i >= sig->hasthis)
2213 t = sig->params [i - sig->hasthis];
2215 t = &mono_defaults.int_class->byval_arg;
2216 t = mono_type_get_underlying_type (t);
2218 if (!t->byref && t->type == MONO_TYPE_R4) {
2219 MonoInst *iargs [1];
2223 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2225 /* The result will be in an int vreg */
2226 call->args [i] = conv;
2233 if (COMPILE_LLVM (cfg))
2234 mono_llvm_emit_call (cfg, call);
2236 mono_arch_emit_call (cfg, call);
2238 mono_arch_emit_call (cfg, call);
2241 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2242 cfg->flags |= MONO_CFG_HAS_CALLS;
2247 inline static MonoInst*
2248 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2250 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE);
2252 call->inst.sreg1 = addr->dreg;
2254 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2256 return (MonoInst*)call;
2260 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2262 #ifdef MONO_ARCH_RGCTX_REG
2263 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2264 cfg->uses_rgctx_reg = TRUE;
2265 call->rgctx_reg = TRUE;
2267 call->rgctx_arg_reg = rgctx_reg;
2274 inline static MonoInst*
2275 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2281 rgctx_reg = mono_alloc_preg (cfg);
2282 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2284 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2286 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2287 return (MonoInst*)call;
2291 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, int rgctx_type);
2293 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, int rgctx_type);
2296 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2297 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2299 gboolean might_be_remote;
2300 gboolean virtual = this != NULL;
2301 gboolean enable_for_aot = TRUE;
2305 if (method->string_ctor) {
2306 /* Create the real signature */
2307 /* FIXME: Cache these */
2308 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2309 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2314 might_be_remote = this && sig->hasthis &&
2315 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2316 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this);
2318 context_used = mono_method_check_context_used (method);
2319 if (might_be_remote && context_used) {
2322 g_assert (cfg->generic_sharing_context);
2324 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2326 return mono_emit_calli (cfg, sig, args, addr);
2329 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE);
2331 if (might_be_remote)
2332 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2334 call->method = method;
2335 call->inst.flags |= MONO_INST_HAS_METHOD;
2336 call->inst.inst_left = this;
2339 int vtable_reg, slot_reg, this_reg;
2341 this_reg = this->dreg;
2343 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2344 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2345 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2347 /* Make a call to delegate->invoke_impl */
2348 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2349 call->inst.inst_basereg = this_reg;
2350 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2351 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2353 return (MonoInst*)call;
2357 if ((!cfg->compile_aot || enable_for_aot) &&
2358 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2359 (MONO_METHOD_IS_FINAL (method) &&
2360 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2361 !(method->klass->marshalbyref && context_used)) {
2363 * the method is not virtual, we just need to ensure this is not null
2364 * and then we can call the method directly.
2366 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2368 * The check above ensures method is not gshared, this is needed since
2369 * gshared methods can't have wrappers.
2371 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2374 if (!method->string_ctor)
2375 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2377 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2379 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2381 return (MonoInst*)call;
2384 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2386 * the method is virtual, but we can statically dispatch since either
2387 * it's class or the method itself are sealed.
2388 * But first we need to ensure it's not a null reference.
2390 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2392 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2393 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2395 return (MonoInst*)call;
2398 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2400 vtable_reg = alloc_preg (cfg);
2401 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2402 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2404 #ifdef MONO_ARCH_HAVE_IMT
2406 guint32 imt_slot = mono_method_get_imt_slot (method);
2407 emit_imt_argument (cfg, call, imt_arg);
2408 slot_reg = vtable_reg;
2409 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2412 if (slot_reg == -1) {
2413 slot_reg = alloc_preg (cfg);
2414 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2415 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2418 slot_reg = vtable_reg;
2419 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2420 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2421 #ifdef MONO_ARCH_HAVE_IMT
2423 g_assert (mono_method_signature (method)->generic_param_count);
2424 emit_imt_argument (cfg, call, imt_arg);
2429 call->inst.sreg1 = slot_reg;
2430 call->virtual = TRUE;
2433 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2435 return (MonoInst*)call;
2439 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2440 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2447 rgctx_reg = mono_alloc_preg (cfg);
2448 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2450 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2452 call = (MonoCallInst*)ins;
2454 set_rgctx_arg (cfg, call, rgctx_reg, vtable_arg);
2460 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2462 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2466 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2473 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE);
2476 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2478 return (MonoInst*)call;
2482 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2484 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2488 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2492 * mono_emit_abs_call:
2494 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2496 inline static MonoInst*
2497 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2498 MonoMethodSignature *sig, MonoInst **args)
2500 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2504 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2507 if (cfg->abs_patches == NULL)
2508 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2509 g_hash_table_insert (cfg->abs_patches, ji, ji);
2510 ins = mono_emit_native_call (cfg, ji, sig, args);
2511 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2516 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2518 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2519 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2523 * Native code might return non register sized integers
2524 * without initializing the upper bits.
2526 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2527 case OP_LOADI1_MEMBASE:
2528 widen_op = OP_ICONV_TO_I1;
2530 case OP_LOADU1_MEMBASE:
2531 widen_op = OP_ICONV_TO_U1;
2533 case OP_LOADI2_MEMBASE:
2534 widen_op = OP_ICONV_TO_I2;
2536 case OP_LOADU2_MEMBASE:
2537 widen_op = OP_ICONV_TO_U2;
2543 if (widen_op != -1) {
2544 int dreg = alloc_preg (cfg);
2547 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2548 widen->type = ins->type;
2558 get_memcpy_method (void)
2560 static MonoMethod *memcpy_method = NULL;
2561 if (!memcpy_method) {
2562 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2564 g_error ("Old corlib found. Install a new one");
2566 return memcpy_method;
2570 create_write_barrier_bitmap (MonoClass *klass, unsigned *wb_bitmap, int offset)
2572 MonoClassField *field;
2573 gpointer iter = NULL;
2575 while ((field = mono_class_get_fields (klass, &iter))) {
2578 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2580 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2581 if (mono_type_is_reference (field->type)) {
2582 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2583 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2585 /*FIXME support nested value types so this works for: struct X { Y y; int z;} struct Y { object a,b; }*/
2586 MonoClass *field_class = mono_class_from_mono_type (field->type);
2587 if (field_class->has_references)
2588 create_write_barrier_bitmap (field_class, wb_bitmap, offset + foffset);
2594 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value, int value_reg)
2597 int card_table_shift_bits;
2598 gpointer card_table_mask;
2599 guint8 *card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2600 MonoInst *dummy_use;
2602 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
2603 int nursery_shift_bits;
2604 size_t nursery_size;
2606 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2608 if (!cfg->compile_aot && card_table && nursery_shift_bits > 0) {
2611 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2612 wbarrier->sreg1 = ptr->dreg;
2614 wbarrier->sreg2 = value->dreg;
2616 wbarrier->sreg2 = value_reg;
2617 MONO_ADD_INS (cfg->cbb, wbarrier);
2621 int offset_reg = alloc_preg (cfg);
2622 int card_reg = alloc_preg (cfg);
2625 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2626 if (card_table_mask)
2627 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
2629 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2630 * IMM's larger than 32bits.
2632 if (cfg->compile_aot) {
2633 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
2635 MONO_INST_NEW (cfg, ins, OP_PCONST);
2636 ins->inst_p0 = card_table;
2637 ins->dreg = card_reg;
2638 MONO_ADD_INS (cfg->cbb, ins);
2641 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
2642 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
2646 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2647 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
2651 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
2653 MONO_INST_NEW (cfg, dummy_use, OP_DUMMY_USE);
2654 dummy_use->sreg1 = value_reg;
2655 MONO_ADD_INS (cfg->cbb, dummy_use);
2661 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
2663 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
2664 unsigned need_wb = 0;
2669 /*types with references can't have alignment smaller than sizeof(void*) */
2670 if (align < SIZEOF_VOID_P)
2673 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
2674 if (size > 32 * SIZEOF_VOID_P)
2677 create_write_barrier_bitmap (klass, &need_wb, 0);
2679 /* We don't unroll more than 5 stores to avoid code bloat. */
2680 if (size > 5 * SIZEOF_VOID_P) {
2681 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
2682 size += (SIZEOF_VOID_P - 1);
2683 size &= ~(SIZEOF_VOID_P - 1);
2685 EMIT_NEW_ICONST (cfg, iargs [2], size);
2686 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
2687 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
2691 destreg = iargs [0]->dreg;
2692 srcreg = iargs [1]->dreg;
2695 dest_ptr_reg = alloc_preg (cfg);
2696 tmp_reg = alloc_preg (cfg);
2699 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
2701 while (size >= SIZEOF_VOID_P) {
2702 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, tmp_reg, srcreg, offset);
2703 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
2706 emit_write_barrier (cfg, iargs [0], NULL, tmp_reg);
2708 offset += SIZEOF_VOID_P;
2709 size -= SIZEOF_VOID_P;
2712 /*tmp += sizeof (void*)*/
2713 if (size >= SIZEOF_VOID_P) {
2714 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
2715 MONO_ADD_INS (cfg->cbb, iargs [0]);
2719 /* Those cannot be references since size < sizeof (void*) */
2721 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
2722 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
2728 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
2729 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
2735 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
2736 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
2745 * Emit code to copy a valuetype of type @klass whose address is stored in
2746 * @src->dreg to memory whose address is stored at @dest->dreg.
2749 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2751 MonoInst *iargs [4];
2754 MonoMethod *memcpy_method;
2758 * This check breaks with spilled vars... need to handle it during verification anyway.
2759 * g_assert (klass && klass == src->klass && klass == dest->klass);
2763 n = mono_class_native_size (klass, &align);
2765 n = mono_class_value_size (klass, &align);
2767 /* if native is true there should be no references in the struct */
2768 if (cfg->gen_write_barriers && klass->has_references && !native) {
2769 /* Avoid barriers when storing to the stack */
2770 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2771 (dest->opcode == OP_LDADDR))) {
2772 int context_used = 0;
2777 if (cfg->generic_sharing_context)
2778 context_used = mono_class_check_context_used (klass);
2780 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
2781 if ((cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
2783 } else if (context_used) {
2784 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2786 if (cfg->compile_aot) {
2787 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2789 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2790 mono_class_compute_gc_descriptor (klass);
2794 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2799 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2800 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2801 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2805 EMIT_NEW_ICONST (cfg, iargs [2], n);
2807 memcpy_method = get_memcpy_method ();
2808 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2813 get_memset_method (void)
2815 static MonoMethod *memset_method = NULL;
2816 if (!memset_method) {
2817 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2819 g_error ("Old corlib found. Install a new one");
2821 return memset_method;
2825 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2827 MonoInst *iargs [3];
2830 MonoMethod *memset_method;
2832 /* FIXME: Optimize this for the case when dest is an LDADDR */
2834 mono_class_init (klass);
2835 n = mono_class_value_size (klass, &align);
2837 if (n <= sizeof (gpointer) * 5) {
2838 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2841 memset_method = get_memset_method ();
2843 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2844 EMIT_NEW_ICONST (cfg, iargs [2], n);
2845 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2850 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2852 MonoInst *this = NULL;
2854 g_assert (cfg->generic_sharing_context);
2856 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2857 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2858 !method->klass->valuetype)
2859 EMIT_NEW_ARGLOAD (cfg, this, 0);
2861 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2862 MonoInst *mrgctx_loc, *mrgctx_var;
2865 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2867 mrgctx_loc = mono_get_vtable_var (cfg);
2868 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2871 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2872 MonoInst *vtable_loc, *vtable_var;
2876 vtable_loc = mono_get_vtable_var (cfg);
2877 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2879 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2880 MonoInst *mrgctx_var = vtable_var;
2883 vtable_reg = alloc_preg (cfg);
2884 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2885 vtable_var->type = STACK_PTR;
2891 int vtable_reg, res_reg;
2893 vtable_reg = alloc_preg (cfg);
2894 res_reg = alloc_preg (cfg);
2895 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2900 static MonoJumpInfoRgctxEntry *
2901 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2903 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2904 res->method = method;
2905 res->in_mrgctx = in_mrgctx;
2906 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2907 res->data->type = patch_type;
2908 res->data->data.target = patch_data;
2909 res->info_type = info_type;
2914 static inline MonoInst*
2915 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2917 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2921 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2922 MonoClass *klass, int rgctx_type)
2924 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2925 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2927 return emit_rgctx_fetch (cfg, rgctx, entry);
2931 * emit_get_rgctx_method:
2933 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2934 * normal constants, else emit a load from the rgctx.
2937 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2938 MonoMethod *cmethod, int rgctx_type)
2940 if (!context_used) {
2943 switch (rgctx_type) {
2944 case MONO_RGCTX_INFO_METHOD:
2945 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
2947 case MONO_RGCTX_INFO_METHOD_RGCTX:
2948 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
2951 g_assert_not_reached ();
2954 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2955 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2957 return emit_rgctx_fetch (cfg, rgctx, entry);
2962 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2963 MonoClassField *field, int rgctx_type)
2965 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2966 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2968 return emit_rgctx_fetch (cfg, rgctx, entry);
2972 * On return the caller must check @klass for load errors.
2975 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
2977 MonoInst *vtable_arg;
2979 int context_used = 0;
2981 if (cfg->generic_sharing_context)
2982 context_used = mono_class_check_context_used (klass);
2985 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
2986 klass, MONO_RGCTX_INFO_VTABLE);
2988 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2992 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
2995 if (COMPILE_LLVM (cfg))
2996 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
2998 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
2999 #ifdef MONO_ARCH_VTABLE_REG
3000 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3001 cfg->uses_vtable_reg = TRUE;
3008 * On return the caller must check @array_class for load errors
3011 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3013 int vtable_reg = alloc_preg (cfg);
3014 int context_used = 0;
3016 if (cfg->generic_sharing_context)
3017 context_used = mono_class_check_context_used (array_class);
3019 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3021 if (cfg->opt & MONO_OPT_SHARED) {
3022 int class_reg = alloc_preg (cfg);
3023 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3024 if (cfg->compile_aot) {
3025 int klass_reg = alloc_preg (cfg);
3026 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3027 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3029 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3031 } else if (context_used) {
3032 MonoInst *vtable_ins;
3034 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3035 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3037 if (cfg->compile_aot) {
3041 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3043 vt_reg = alloc_preg (cfg);
3044 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3045 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3048 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3050 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3054 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3058 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
3060 if (mini_get_debug_options ()->better_cast_details) {
3061 int to_klass_reg = alloc_preg (cfg);
3062 int vtable_reg = alloc_preg (cfg);
3063 int klass_reg = alloc_preg (cfg);
3064 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3067 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3071 MONO_ADD_INS (cfg->cbb, tls_get);
3072 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3073 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3075 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3076 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3077 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3082 reset_cast_details (MonoCompile *cfg)
3084 /* Reset the variables holding the cast details */
3085 if (mini_get_debug_options ()->better_cast_details) {
3086 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3088 MONO_ADD_INS (cfg->cbb, tls_get);
3089 /* It is enough to reset the from field */
3090 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3095 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3096 * generic code is generated.
3099 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3101 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3104 MonoInst *rgctx, *addr;
3106 /* FIXME: What if the class is shared? We might not
3107 have to get the address of the method from the
3109 addr = emit_get_rgctx_method (cfg, context_used, method,
3110 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3112 rgctx = emit_get_rgctx (cfg, method, context_used);
3114 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3116 return mono_emit_method_call (cfg, method, &val, NULL);
3121 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3125 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3126 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3127 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3128 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3130 obj_reg = sp [0]->dreg;
3131 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3132 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3134 /* FIXME: generics */
3135 g_assert (klass->rank == 0);
3138 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3139 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3141 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3142 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3145 MonoInst *element_class;
3147 /* This assertion is from the unboxcast insn */
3148 g_assert (klass->rank == 0);
3150 element_class = emit_get_rgctx_klass (cfg, context_used,
3151 klass->element_class, MONO_RGCTX_INFO_KLASS);
3153 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3154 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3156 save_cast_details (cfg, klass->element_class, obj_reg);
3157 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3158 reset_cast_details (cfg);
3161 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
3162 MONO_ADD_INS (cfg->cbb, add);
3163 add->type = STACK_MP;
3170 * Returns NULL and set the cfg exception on error.
3173 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3175 MonoInst *iargs [2];
3181 MonoInst *iargs [2];
3184 FIXME: we cannot get managed_alloc here because we can't get
3185 the class's vtable (because it's not a closed class)
3187 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3188 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3191 if (cfg->opt & MONO_OPT_SHARED)
3192 rgctx_info = MONO_RGCTX_INFO_KLASS;
3194 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3195 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3197 if (cfg->opt & MONO_OPT_SHARED) {
3198 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3200 alloc_ftn = mono_object_new;
3203 alloc_ftn = mono_object_new_specific;
3206 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3209 if (cfg->opt & MONO_OPT_SHARED) {
3210 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3211 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3213 alloc_ftn = mono_object_new;
3214 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3215 /* This happens often in argument checking code, eg. throw new FooException... */
3216 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3217 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3218 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3220 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3221 MonoMethod *managed_alloc = NULL;
3225 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3226 cfg->exception_ptr = klass;
3230 #ifndef MONO_CROSS_COMPILE
3231 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3234 if (managed_alloc) {
3235 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3236 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3238 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3240 guint32 lw = vtable->klass->instance_size;
3241 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3242 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3243 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3246 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3250 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3254 * Returns NULL and set the cfg exception on error.
3257 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
3259 MonoInst *alloc, *ins;
3261 if (mono_class_is_nullable (klass)) {
3262 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3265 /* FIXME: What if the class is shared? We might not
3266 have to get the method address from the RGCTX. */
3267 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3268 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3269 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3271 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3273 return mono_emit_method_call (cfg, method, &val, NULL);
3277 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3281 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3286 // FIXME: This doesn't work yet (class libs tests fail?)
3287 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || mono_class_has_variant_generic_params (klass) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3290 * Returns NULL and set the cfg exception on error.
3293 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3295 MonoBasicBlock *is_null_bb;
3296 int obj_reg = src->dreg;
3297 int vtable_reg = alloc_preg (cfg);
3298 MonoInst *klass_inst = NULL;
3303 klass_inst = emit_get_rgctx_klass (cfg, context_used,
3304 klass, MONO_RGCTX_INFO_KLASS);
3306 if (is_complex_isinst (klass)) {
3307 /* Complex case, handle by an icall */
3313 args [1] = klass_inst;
3315 return mono_emit_jit_icall (cfg, mono_object_castclass, args);
3317 /* Simple case, handled by the code below */
3321 NEW_BBLOCK (cfg, is_null_bb);
3323 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3324 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3326 save_cast_details (cfg, klass, obj_reg);
3328 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3329 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3330 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3332 int klass_reg = alloc_preg (cfg);
3334 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3336 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3337 /* the remoting code is broken, access the class for now */
3338 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3339 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3341 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3342 cfg->exception_ptr = klass;
3345 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3347 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3348 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3350 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3352 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3353 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3357 MONO_START_BB (cfg, is_null_bb);
3359 reset_cast_details (cfg);
3365 * Returns NULL and set the cfg exception on error.
3368 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3371 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3372 int obj_reg = src->dreg;
3373 int vtable_reg = alloc_preg (cfg);
3374 int res_reg = alloc_preg (cfg);
3375 MonoInst *klass_inst = NULL;
3378 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3380 if (is_complex_isinst (klass)) {
3383 /* Complex case, handle by an icall */
3389 args [1] = klass_inst;
3391 return mono_emit_jit_icall (cfg, mono_object_isinst, args);
3393 /* Simple case, the code below can handle it */
3397 NEW_BBLOCK (cfg, is_null_bb);
3398 NEW_BBLOCK (cfg, false_bb);
3399 NEW_BBLOCK (cfg, end_bb);
3401 /* Do the assignment at the beginning, so the other assignment can be if converted */
3402 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3403 ins->type = STACK_OBJ;
3406 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3407 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3409 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3411 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3412 g_assert (!context_used);
3413 /* the is_null_bb target simply copies the input register to the output */
3414 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3416 int klass_reg = alloc_preg (cfg);
3419 int rank_reg = alloc_preg (cfg);
3420 int eclass_reg = alloc_preg (cfg);
3422 g_assert (!context_used);
3423 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3424 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3425 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3426 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3427 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3428 if (klass->cast_class == mono_defaults.object_class) {
3429 int parent_reg = alloc_preg (cfg);
3430 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3431 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3432 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3433 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3434 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3435 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3436 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3437 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3438 } else if (klass->cast_class == mono_defaults.enum_class) {
3439 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3440 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3441 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3442 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3444 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3445 /* Check that the object is a vector too */
3446 int bounds_reg = alloc_preg (cfg);
3447 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3448 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3449 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3452 /* the is_null_bb target simply copies the input register to the output */
3453 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3455 } else if (mono_class_is_nullable (klass)) {
3456 g_assert (!context_used);
3457 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3458 /* the is_null_bb target simply copies the input register to the output */
3459 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3461 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3462 g_assert (!context_used);
3463 /* the remoting code is broken, access the class for now */
3464 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3465 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3467 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3468 cfg->exception_ptr = klass;
3471 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3473 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3474 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3476 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3477 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3479 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3480 /* the is_null_bb target simply copies the input register to the output */
3481 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
3486 MONO_START_BB (cfg, false_bb);
3488 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3489 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3491 MONO_START_BB (cfg, is_null_bb);
3493 MONO_START_BB (cfg, end_bb);
3499 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3501 /* This opcode takes as input an object reference and a class, and returns:
3502 0) if the object is an instance of the class,
3503 1) if the object is not instance of the class,
3504 2) if the object is a proxy whose type cannot be determined */
3507 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3508 int obj_reg = src->dreg;
3509 int dreg = alloc_ireg (cfg);
3511 int klass_reg = alloc_preg (cfg);
3513 NEW_BBLOCK (cfg, true_bb);
3514 NEW_BBLOCK (cfg, false_bb);
3515 NEW_BBLOCK (cfg, false2_bb);
3516 NEW_BBLOCK (cfg, end_bb);
3517 NEW_BBLOCK (cfg, no_proxy_bb);
3519 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3520 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3522 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3523 NEW_BBLOCK (cfg, interface_fail_bb);
3525 tmp_reg = alloc_preg (cfg);
3526 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3527 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3528 MONO_START_BB (cfg, interface_fail_bb);
3529 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3531 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3533 tmp_reg = alloc_preg (cfg);
3534 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3535 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3536 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3538 tmp_reg = alloc_preg (cfg);
3539 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3540 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3542 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3543 tmp_reg = alloc_preg (cfg);
3544 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3545 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3547 tmp_reg = alloc_preg (cfg);
3548 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3549 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3550 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3552 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3553 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3555 MONO_START_BB (cfg, no_proxy_bb);
3557 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3560 MONO_START_BB (cfg, false_bb);
3562 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3563 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3565 MONO_START_BB (cfg, false2_bb);
3567 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3568 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3570 MONO_START_BB (cfg, true_bb);
3572 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3574 MONO_START_BB (cfg, end_bb);
3577 MONO_INST_NEW (cfg, ins, OP_ICONST);
3579 ins->type = STACK_I4;
3585 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3587 /* This opcode takes as input an object reference and a class, and returns:
3588 0) if the object is an instance of the class,
3589 1) if the object is a proxy whose type cannot be determined
3590 an InvalidCastException exception is thrown otherwhise*/
3593 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3594 int obj_reg = src->dreg;
3595 int dreg = alloc_ireg (cfg);
3596 int tmp_reg = alloc_preg (cfg);
3597 int klass_reg = alloc_preg (cfg);
3599 NEW_BBLOCK (cfg, end_bb);
3600 NEW_BBLOCK (cfg, ok_result_bb);
3602 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3603 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3605 save_cast_details (cfg, klass, obj_reg);
3607 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3608 NEW_BBLOCK (cfg, interface_fail_bb);
3610 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3611 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3612 MONO_START_BB (cfg, interface_fail_bb);
3613 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3615 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3617 tmp_reg = alloc_preg (cfg);
3618 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3619 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3620 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3622 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3623 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3626 NEW_BBLOCK (cfg, no_proxy_bb);
3628 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3629 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3630 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3632 tmp_reg = alloc_preg (cfg);
3633 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3634 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3636 tmp_reg = alloc_preg (cfg);
3637 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3638 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3639 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3641 NEW_BBLOCK (cfg, fail_1_bb);
3643 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3645 MONO_START_BB (cfg, fail_1_bb);
3647 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3648 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3650 MONO_START_BB (cfg, no_proxy_bb);
3652 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3655 MONO_START_BB (cfg, ok_result_bb);
3657 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3659 MONO_START_BB (cfg, end_bb);
3662 MONO_INST_NEW (cfg, ins, OP_ICONST);
3664 ins->type = STACK_I4;
3670 * Returns NULL and set the cfg exception on error.
3672 static G_GNUC_UNUSED MonoInst*
3673 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3675 gpointer *trampoline;
3676 MonoInst *obj, *method_ins, *tramp_ins;
3680 obj = handle_alloc (cfg, klass, FALSE, 0);
3684 /* Inline the contents of mono_delegate_ctor */
3686 /* Set target field */
3687 /* Optimize away setting of NULL target */
3688 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3689 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3691 /* Set method field */
3692 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3693 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3696 * To avoid looking up the compiled code belonging to the target method
3697 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3698 * store it, and we fill it after the method has been compiled.
3700 if (!cfg->compile_aot && !method->dynamic) {
3701 MonoInst *code_slot_ins;
3704 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3706 domain = mono_domain_get ();
3707 mono_domain_lock (domain);
3708 if (!domain_jit_info (domain)->method_code_hash)
3709 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3710 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3712 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3713 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3715 mono_domain_unlock (domain);
3717 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3719 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3722 /* Set invoke_impl field */
3723 if (cfg->compile_aot) {
3724 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3726 trampoline = mono_create_delegate_trampoline (klass);
3727 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3729 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3731 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3737 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3739 MonoJitICallInfo *info;
3741 /* Need to register the icall so it gets an icall wrapper */
3742 info = mono_get_array_new_va_icall (rank);
3744 cfg->flags |= MONO_CFG_HAS_VARARGS;
3746 /* mono_array_new_va () needs a vararg calling convention */
3747 cfg->disable_llvm = TRUE;
3749 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3750 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3754 mono_emit_load_got_addr (MonoCompile *cfg)
3756 MonoInst *getaddr, *dummy_use;
3758 if (!cfg->got_var || cfg->got_var_allocated)
3761 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3762 getaddr->dreg = cfg->got_var->dreg;
3764 /* Add it to the start of the first bblock */
3765 if (cfg->bb_entry->code) {
3766 getaddr->next = cfg->bb_entry->code;
3767 cfg->bb_entry->code = getaddr;
3770 MONO_ADD_INS (cfg->bb_entry, getaddr);
3772 cfg->got_var_allocated = TRUE;
3775 * Add a dummy use to keep the got_var alive, since real uses might
3776 * only be generated by the back ends.
3777 * Add it to end_bblock, so the variable's lifetime covers the whole
3779 * It would be better to make the usage of the got var explicit in all
3780 * cases when the backend needs it (i.e. calls, throw etc.), so this
3781 * wouldn't be needed.
3783 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3784 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3787 static int inline_limit;
3788 static gboolean inline_limit_inited;
3791 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3793 MonoMethodHeaderSummary header;
3795 #ifdef MONO_ARCH_SOFT_FLOAT
3796 MonoMethodSignature *sig = mono_method_signature (method);
3800 if (cfg->generic_sharing_context)
3803 if (cfg->inline_depth > 10)
3806 #ifdef MONO_ARCH_HAVE_LMF_OPS
3807 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3808 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3809 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3814 if (!mono_method_get_header_summary (method, &header))
3817 /*runtime, icall and pinvoke are checked by summary call*/
3818 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3819 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3820 (method->klass->marshalbyref) ||
3824 /* also consider num_locals? */
3825 /* Do the size check early to avoid creating vtables */
3826 if (!inline_limit_inited) {
3827 if (getenv ("MONO_INLINELIMIT"))
3828 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3830 inline_limit = INLINE_LENGTH_LIMIT;
3831 inline_limit_inited = TRUE;
3833 if (header.code_size >= inline_limit)
3837 * if we can initialize the class of the method right away, we do,
3838 * otherwise we don't allow inlining if the class needs initialization,
3839 * since it would mean inserting a call to mono_runtime_class_init()
3840 * inside the inlined code
3842 if (!(cfg->opt & MONO_OPT_SHARED)) {
3843 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3844 if (cfg->run_cctors && method->klass->has_cctor) {
3845 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3846 if (!method->klass->runtime_info)
3847 /* No vtable created yet */
3849 vtable = mono_class_vtable (cfg->domain, method->klass);
3852 /* This makes so that inline cannot trigger */
3853 /* .cctors: too many apps depend on them */
3854 /* running with a specific order... */
3855 if (! vtable->initialized)
3857 mono_runtime_class_init (vtable);
3859 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3860 if (!method->klass->runtime_info)
3861 /* No vtable created yet */
3863 vtable = mono_class_vtable (cfg->domain, method->klass);
3866 if (!vtable->initialized)
3871 * If we're compiling for shared code
3872 * the cctor will need to be run at aot method load time, for example,
3873 * or at the end of the compilation of the inlining method.
3875 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3880 * CAS - do not inline methods with declarative security
3881 * Note: this has to be before any possible return TRUE;
3883 if (mono_method_has_declsec (method))
3886 #ifdef MONO_ARCH_SOFT_FLOAT
3888 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3890 for (i = 0; i < sig->param_count; ++i)
3891 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3899 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3901 if (vtable->initialized && !cfg->compile_aot)
3904 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3907 if (!mono_class_needs_cctor_run (vtable->klass, method))
3910 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3911 /* The initialization is already done before the method is called */
3918 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
3922 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3924 mono_class_init (klass);
3925 size = mono_class_array_element_size (klass);
3927 mult_reg = alloc_preg (cfg);
3928 array_reg = arr->dreg;
3929 index_reg = index->dreg;
3931 #if SIZEOF_REGISTER == 8
3932 /* The array reg is 64 bits but the index reg is only 32 */
3933 if (COMPILE_LLVM (cfg)) {
3935 index2_reg = index_reg;
3937 index2_reg = alloc_preg (cfg);
3938 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3941 if (index->type == STACK_I8) {
3942 index2_reg = alloc_preg (cfg);
3943 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
3945 index2_reg = index_reg;
3950 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3952 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3953 if (size == 1 || size == 2 || size == 4 || size == 8) {
3954 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3956 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3957 ins->type = STACK_PTR;
3963 add_reg = alloc_preg (cfg);
3965 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3966 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3967 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3968 ins->type = STACK_PTR;
3969 MONO_ADD_INS (cfg->cbb, ins);
3974 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3976 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3978 int bounds_reg = alloc_preg (cfg);
3979 int add_reg = alloc_preg (cfg);
3980 int mult_reg = alloc_preg (cfg);
3981 int mult2_reg = alloc_preg (cfg);
3982 int low1_reg = alloc_preg (cfg);
3983 int low2_reg = alloc_preg (cfg);
3984 int high1_reg = alloc_preg (cfg);
3985 int high2_reg = alloc_preg (cfg);
3986 int realidx1_reg = alloc_preg (cfg);
3987 int realidx2_reg = alloc_preg (cfg);
3988 int sum_reg = alloc_preg (cfg);
3993 mono_class_init (klass);
3994 size = mono_class_array_element_size (klass);
3996 index1 = index_ins1->dreg;
3997 index2 = index_ins2->dreg;
3999 /* range checking */
4000 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4001 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4003 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4004 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4005 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4006 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4007 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4008 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4009 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4011 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4012 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4013 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4014 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4015 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
4016 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4017 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4019 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4020 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4021 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4022 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4023 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4025 ins->type = STACK_MP;
4027 MONO_ADD_INS (cfg->cbb, ins);
4034 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4038 MonoMethod *addr_method;
4041 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4044 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
4046 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4047 /* emit_ldelema_2 depends on OP_LMUL */
4048 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
4049 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
4053 element_size = mono_class_array_element_size (cmethod->klass->element_class);
4054 addr_method = mono_marshal_get_array_address (rank, element_size);
4055 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4060 static MonoBreakPolicy
4061 always_insert_breakpoint (MonoMethod *method)
4063 return MONO_BREAK_POLICY_ALWAYS;
4066 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4069 * mono_set_break_policy:
4070 * policy_callback: the new callback function
4072 * Allow embedders to decide wherther to actually obey breakpoint instructions
4073 * (both break IL instructions and Debugger.Break () method calls), for example
4074 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4075 * untrusted or semi-trusted code.
4077 * @policy_callback will be called every time a break point instruction needs to
4078 * be inserted with the method argument being the method that calls Debugger.Break()
4079 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4080 * if it wants the breakpoint to not be effective in the given method.
4081 * #MONO_BREAK_POLICY_ALWAYS is the default.
4084 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4086 if (policy_callback)
4087 break_policy_func = policy_callback;
4089 break_policy_func = always_insert_breakpoint;
4093 should_insert_brekpoint (MonoMethod *method) {
4094 switch (break_policy_func (method)) {
4095 case MONO_BREAK_POLICY_ALWAYS:
4097 case MONO_BREAK_POLICY_NEVER:
4099 case MONO_BREAK_POLICY_ON_DBG:
4100 return mono_debug_using_mono_debugger ();
4102 g_warning ("Incorrect value returned from break policy callback");
4107 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4109 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4111 MonoInst *addr, *store, *load;
4112 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4114 /* the bounds check is already done by the callers */
4115 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4117 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4118 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4120 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4121 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4127 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4129 MonoInst *ins = NULL;
4130 #ifdef MONO_ARCH_SIMD_INTRINSICS
4131 if (cfg->opt & MONO_OPT_SIMD) {
4132 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4142 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4144 MonoInst *ins = NULL;
4146 static MonoClass *runtime_helpers_class = NULL;
4147 if (! runtime_helpers_class)
4148 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
4149 "System.Runtime.CompilerServices", "RuntimeHelpers");
4151 if (cmethod->klass == mono_defaults.string_class) {
4152 if (strcmp (cmethod->name, "get_Chars") == 0) {
4153 int dreg = alloc_ireg (cfg);
4154 int index_reg = alloc_preg (cfg);
4155 int mult_reg = alloc_preg (cfg);
4156 int add_reg = alloc_preg (cfg);
4158 #if SIZEOF_REGISTER == 8
4159 /* The array reg is 64 bits but the index reg is only 32 */
4160 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
4162 index_reg = args [1]->dreg;
4164 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
4166 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4167 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
4168 add_reg = ins->dreg;
4169 /* Avoid a warning */
4171 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4174 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
4175 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4176 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4177 add_reg, G_STRUCT_OFFSET (MonoString, chars));
4179 type_from_op (ins, NULL, NULL);
4181 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4182 int dreg = alloc_ireg (cfg);
4183 /* Decompose later to allow more optimizations */
4184 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
4185 ins->type = STACK_I4;
4186 ins->flags |= MONO_INST_FAULT;
4187 cfg->cbb->has_array_access = TRUE;
4188 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
4191 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
4192 int mult_reg = alloc_preg (cfg);
4193 int add_reg = alloc_preg (cfg);
4195 /* The corlib functions check for oob already. */
4196 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
4197 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4198 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
4199 return cfg->cbb->last_ins;
4202 } else if (cmethod->klass == mono_defaults.object_class) {
4204 if (strcmp (cmethod->name, "GetType") == 0) {
4205 int dreg = alloc_preg (cfg);
4206 int vt_reg = alloc_preg (cfg);
4207 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4208 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
4209 type_from_op (ins, NULL, NULL);
4212 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
4213 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
4214 int dreg = alloc_ireg (cfg);
4215 int t1 = alloc_ireg (cfg);
4217 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
4218 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
4219 ins->type = STACK_I4;
4223 } else if (strcmp (cmethod->name, ".ctor") == 0) {
4224 MONO_INST_NEW (cfg, ins, OP_NOP);
4225 MONO_ADD_INS (cfg->cbb, ins);
4229 } else if (cmethod->klass == mono_defaults.array_class) {
4230 if (strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
4231 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
4233 #ifndef MONO_BIG_ARRAYS
4235 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
4238 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
4239 int dreg = alloc_ireg (cfg);
4240 int bounds_reg = alloc_ireg (cfg);
4241 MonoBasicBlock *end_bb, *szarray_bb;
4242 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
4244 NEW_BBLOCK (cfg, end_bb);
4245 NEW_BBLOCK (cfg, szarray_bb);
4247 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
4248 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4249 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4250 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
4251 /* Non-szarray case */
4253 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4254 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4256 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4257 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4258 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4259 MONO_START_BB (cfg, szarray_bb);
4262 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4263 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4265 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4266 MONO_START_BB (cfg, end_bb);
4268 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
4269 ins->type = STACK_I4;
4275 if (cmethod->name [0] != 'g')
4278 if (strcmp (cmethod->name, "get_Rank") == 0) {
4279 int dreg = alloc_ireg (cfg);
4280 int vtable_reg = alloc_preg (cfg);
4281 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
4282 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4283 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
4284 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4285 type_from_op (ins, NULL, NULL);
4288 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4289 int dreg = alloc_ireg (cfg);
4291 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4292 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4293 type_from_op (ins, NULL, NULL);
4298 } else if (cmethod->klass == runtime_helpers_class) {
4300 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
4301 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
4305 } else if (cmethod->klass == mono_defaults.thread_class) {
4306 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
4307 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
4308 MONO_ADD_INS (cfg->cbb, ins);
4310 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
4311 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4312 MONO_ADD_INS (cfg->cbb, ins);
4315 } else if (cmethod->klass == mono_defaults.monitor_class) {
4316 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4317 /* The trampolines don't work under SGEN */
4318 gboolean is_moving_gc = mono_gc_is_moving ();
4320 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1 && !is_moving_gc) {
4323 if (COMPILE_LLVM (cfg)) {
4325 * Pass the argument normally, the LLVM backend will handle the
4326 * calling convention problems.
4328 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4330 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
4331 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4332 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4333 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4336 return (MonoInst*)call;
4337 } else if (strcmp (cmethod->name, "Exit") == 0 && !is_moving_gc) {
4340 if (COMPILE_LLVM (cfg)) {
4341 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4343 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
4344 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4345 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4346 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4349 return (MonoInst*)call;
4351 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4352 MonoMethod *fast_method = NULL;
4354 /* Avoid infinite recursion */
4355 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
4356 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
4357 strcmp (cfg->method->name, "FastMonitorExit") == 0))
4360 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) ||
4361 strcmp (cmethod->name, "Exit") == 0)
4362 fast_method = mono_monitor_get_fast_path (cmethod);
4366 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4368 } else if (cmethod->klass->image == mono_defaults.corlib &&
4369 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4370 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4373 #if SIZEOF_REGISTER == 8
4374 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
4375 /* 64 bit reads are already atomic */
4376 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
4377 ins->dreg = mono_alloc_preg (cfg);
4378 ins->inst_basereg = args [0]->dreg;
4379 ins->inst_offset = 0;
4380 MONO_ADD_INS (cfg->cbb, ins);
4384 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4385 if (strcmp (cmethod->name, "Increment") == 0) {
4386 MonoInst *ins_iconst;
4389 if (fsig->params [0]->type == MONO_TYPE_I4)
4390 opcode = OP_ATOMIC_ADD_NEW_I4;
4391 #if SIZEOF_REGISTER == 8
4392 else if (fsig->params [0]->type == MONO_TYPE_I8)
4393 opcode = OP_ATOMIC_ADD_NEW_I8;
4396 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4397 ins_iconst->inst_c0 = 1;
4398 ins_iconst->dreg = mono_alloc_ireg (cfg);
4399 MONO_ADD_INS (cfg->cbb, ins_iconst);
4401 MONO_INST_NEW (cfg, ins, opcode);
4402 ins->dreg = mono_alloc_ireg (cfg);
4403 ins->inst_basereg = args [0]->dreg;
4404 ins->inst_offset = 0;
4405 ins->sreg2 = ins_iconst->dreg;
4406 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4407 MONO_ADD_INS (cfg->cbb, ins);
4409 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4410 MonoInst *ins_iconst;
4413 if (fsig->params [0]->type == MONO_TYPE_I4)
4414 opcode = OP_ATOMIC_ADD_NEW_I4;
4415 #if SIZEOF_REGISTER == 8
4416 else if (fsig->params [0]->type == MONO_TYPE_I8)
4417 opcode = OP_ATOMIC_ADD_NEW_I8;
4420 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4421 ins_iconst->inst_c0 = -1;
4422 ins_iconst->dreg = mono_alloc_ireg (cfg);
4423 MONO_ADD_INS (cfg->cbb, ins_iconst);
4425 MONO_INST_NEW (cfg, ins, opcode);
4426 ins->dreg = mono_alloc_ireg (cfg);
4427 ins->inst_basereg = args [0]->dreg;
4428 ins->inst_offset = 0;
4429 ins->sreg2 = ins_iconst->dreg;
4430 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4431 MONO_ADD_INS (cfg->cbb, ins);
4433 } else if (strcmp (cmethod->name, "Add") == 0) {
4436 if (fsig->params [0]->type == MONO_TYPE_I4)
4437 opcode = OP_ATOMIC_ADD_NEW_I4;
4438 #if SIZEOF_REGISTER == 8
4439 else if (fsig->params [0]->type == MONO_TYPE_I8)
4440 opcode = OP_ATOMIC_ADD_NEW_I8;
4444 MONO_INST_NEW (cfg, ins, opcode);
4445 ins->dreg = mono_alloc_ireg (cfg);
4446 ins->inst_basereg = args [0]->dreg;
4447 ins->inst_offset = 0;
4448 ins->sreg2 = args [1]->dreg;
4449 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4450 MONO_ADD_INS (cfg->cbb, ins);
4453 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4455 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4456 if (strcmp (cmethod->name, "Exchange") == 0) {
4458 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4460 if (fsig->params [0]->type == MONO_TYPE_I4)
4461 opcode = OP_ATOMIC_EXCHANGE_I4;
4462 #if SIZEOF_REGISTER == 8
4463 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4464 (fsig->params [0]->type == MONO_TYPE_I))
4465 opcode = OP_ATOMIC_EXCHANGE_I8;
4467 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4468 opcode = OP_ATOMIC_EXCHANGE_I4;
4473 MONO_INST_NEW (cfg, ins, opcode);
4474 ins->dreg = mono_alloc_ireg (cfg);
4475 ins->inst_basereg = args [0]->dreg;
4476 ins->inst_offset = 0;
4477 ins->sreg2 = args [1]->dreg;
4478 MONO_ADD_INS (cfg->cbb, ins);
4480 switch (fsig->params [0]->type) {
4482 ins->type = STACK_I4;
4486 ins->type = STACK_I8;
4488 case MONO_TYPE_OBJECT:
4489 ins->type = STACK_OBJ;
4492 g_assert_not_reached ();
4495 if (cfg->gen_write_barriers && is_ref)
4496 emit_write_barrier (cfg, args [0], args [1], -1);
4498 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4500 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4501 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4503 gboolean is_ref = MONO_TYPE_IS_REFERENCE (fsig->params [1]);
4504 if (fsig->params [1]->type == MONO_TYPE_I4)
4506 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4507 size = sizeof (gpointer);
4508 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
4511 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4512 ins->dreg = alloc_ireg (cfg);
4513 ins->sreg1 = args [0]->dreg;
4514 ins->sreg2 = args [1]->dreg;
4515 ins->sreg3 = args [2]->dreg;
4516 ins->type = STACK_I4;
4517 MONO_ADD_INS (cfg->cbb, ins);
4518 } else if (size == 8) {
4519 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4520 ins->dreg = alloc_ireg (cfg);
4521 ins->sreg1 = args [0]->dreg;
4522 ins->sreg2 = args [1]->dreg;
4523 ins->sreg3 = args [2]->dreg;
4524 ins->type = STACK_I8;
4525 MONO_ADD_INS (cfg->cbb, ins);
4527 /* g_assert_not_reached (); */
4529 if (cfg->gen_write_barriers && is_ref)
4530 emit_write_barrier (cfg, args [0], args [1], -1);
4532 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4536 } else if (cmethod->klass->image == mono_defaults.corlib) {
4537 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4538 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4539 if (should_insert_brekpoint (cfg->method))
4540 MONO_INST_NEW (cfg, ins, OP_BREAK);
4542 MONO_INST_NEW (cfg, ins, OP_NOP);
4543 MONO_ADD_INS (cfg->cbb, ins);
4546 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4547 && strcmp (cmethod->klass->name, "Environment") == 0) {
4549 EMIT_NEW_ICONST (cfg, ins, 1);
4551 EMIT_NEW_ICONST (cfg, ins, 0);
4555 } else if (cmethod->klass == mono_defaults.math_class) {
4557 * There is general branches code for Min/Max, but it does not work for
4559 * http://everything2.com/?node_id=1051618
4563 #ifdef MONO_ARCH_SIMD_INTRINSICS
4564 if (cfg->opt & MONO_OPT_SIMD) {
4565 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4571 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4575 * This entry point could be used later for arbitrary method
4578 inline static MonoInst*
4579 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4580 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4582 if (method->klass == mono_defaults.string_class) {
4583 /* managed string allocation support */
4584 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
4585 MonoInst *iargs [2];
4586 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4587 MonoMethod *managed_alloc = NULL;
4589 g_assert (vtable); /*Should not fail since it System.String*/
4590 #ifndef MONO_CROSS_COMPILE
4591 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4595 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4596 iargs [1] = args [0];
4597 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4604 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4606 MonoInst *store, *temp;
4609 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4610 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4613 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4614 * would be different than the MonoInst's used to represent arguments, and
4615 * the ldelema implementation can't deal with that.
4616 * Solution: When ldelema is used on an inline argument, create a var for
4617 * it, emit ldelema on that var, and emit the saving code below in
4618 * inline_method () if needed.
4620 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4621 cfg->args [i] = temp;
4622 /* This uses cfg->args [i] which is set by the preceeding line */
4623 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4624 store->cil_code = sp [0]->cil_code;
4629 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4630 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4632 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4634 check_inline_called_method_name_limit (MonoMethod *called_method)
4637 static char *limit = NULL;
4639 if (limit == NULL) {
4640 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4642 if (limit_string != NULL)
4643 limit = limit_string;
4645 limit = (char *) "";
4648 if (limit [0] != '\0') {
4649 char *called_method_name = mono_method_full_name (called_method, TRUE);
4651 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4652 g_free (called_method_name);
4654 //return (strncmp_result <= 0);
4655 return (strncmp_result == 0);
4662 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4664 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4667 static char *limit = NULL;
4669 if (limit == NULL) {
4670 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4671 if (limit_string != NULL) {
4672 limit = limit_string;
4674 limit = (char *) "";
4678 if (limit [0] != '\0') {
4679 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4681 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4682 g_free (caller_method_name);
4684 //return (strncmp_result <= 0);
4685 return (strncmp_result == 0);
4693 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4694 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4696 MonoInst *ins, *rvar = NULL;
4697 MonoMethodHeader *cheader;
4698 MonoBasicBlock *ebblock, *sbblock;
4700 MonoMethod *prev_inlined_method;
4701 MonoInst **prev_locals, **prev_args;
4702 MonoType **prev_arg_types;
4703 guint prev_real_offset;
4704 GHashTable *prev_cbb_hash;
4705 MonoBasicBlock **prev_cil_offset_to_bb;
4706 MonoBasicBlock *prev_cbb;
4707 unsigned char* prev_cil_start;
4708 guint32 prev_cil_offset_to_bb_len;
4709 MonoMethod *prev_current_method;
4710 MonoGenericContext *prev_generic_context;
4711 gboolean ret_var_set, prev_ret_var_set;
4713 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4715 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4716 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4719 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4720 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4724 if (cfg->verbose_level > 2)
4725 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4727 if (!cmethod->inline_info) {
4728 mono_jit_stats.inlineable_methods++;
4729 cmethod->inline_info = 1;
4732 /* allocate local variables */
4733 cheader = mono_method_get_header (cmethod);
4735 if (cheader == NULL || mono_loader_get_last_error ()) {
4737 mono_metadata_free_mh (cheader);
4738 mono_loader_clear_error ();
4742 /* allocate space to store the return value */
4743 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4744 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4748 prev_locals = cfg->locals;
4749 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4750 for (i = 0; i < cheader->num_locals; ++i)
4751 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4753 /* allocate start and end blocks */
4754 /* This is needed so if the inline is aborted, we can clean up */
4755 NEW_BBLOCK (cfg, sbblock);
4756 sbblock->real_offset = real_offset;
4758 NEW_BBLOCK (cfg, ebblock);
4759 ebblock->block_num = cfg->num_bblocks++;
4760 ebblock->real_offset = real_offset;
4762 prev_args = cfg->args;
4763 prev_arg_types = cfg->arg_types;
4764 prev_inlined_method = cfg->inlined_method;
4765 cfg->inlined_method = cmethod;
4766 cfg->ret_var_set = FALSE;
4767 cfg->inline_depth ++;
4768 prev_real_offset = cfg->real_offset;
4769 prev_cbb_hash = cfg->cbb_hash;
4770 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4771 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4772 prev_cil_start = cfg->cil_start;
4773 prev_cbb = cfg->cbb;
4774 prev_current_method = cfg->current_method;
4775 prev_generic_context = cfg->generic_context;
4776 prev_ret_var_set = cfg->ret_var_set;
4778 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4780 ret_var_set = cfg->ret_var_set;
4782 cfg->inlined_method = prev_inlined_method;
4783 cfg->real_offset = prev_real_offset;
4784 cfg->cbb_hash = prev_cbb_hash;
4785 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4786 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4787 cfg->cil_start = prev_cil_start;
4788 cfg->locals = prev_locals;
4789 cfg->args = prev_args;
4790 cfg->arg_types = prev_arg_types;
4791 cfg->current_method = prev_current_method;
4792 cfg->generic_context = prev_generic_context;
4793 cfg->ret_var_set = prev_ret_var_set;
4794 cfg->inline_depth --;
4796 if ((costs >= 0 && costs < 60) || inline_allways) {
4797 if (cfg->verbose_level > 2)
4798 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4800 mono_jit_stats.inlined_methods++;
4802 /* always add some code to avoid block split failures */
4803 MONO_INST_NEW (cfg, ins, OP_NOP);
4804 MONO_ADD_INS (prev_cbb, ins);
4806 prev_cbb->next_bb = sbblock;
4807 link_bblock (cfg, prev_cbb, sbblock);
4810 * Get rid of the begin and end bblocks if possible to aid local
4813 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4815 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4816 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4818 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4819 MonoBasicBlock *prev = ebblock->in_bb [0];
4820 mono_merge_basic_blocks (cfg, prev, ebblock);
4822 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4823 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4824 cfg->cbb = prev_cbb;
4832 * If the inlined method contains only a throw, then the ret var is not
4833 * set, so set it to a dummy value.
4836 static double r8_0 = 0.0;
4838 switch (rvar->type) {
4840 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4843 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4848 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4851 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4852 ins->type = STACK_R8;
4853 ins->inst_p0 = (void*)&r8_0;
4854 ins->dreg = rvar->dreg;
4855 MONO_ADD_INS (cfg->cbb, ins);
4858 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4861 g_assert_not_reached ();
4865 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4868 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4871 if (cfg->verbose_level > 2)
4872 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4873 cfg->exception_type = MONO_EXCEPTION_NONE;
4874 mono_loader_clear_error ();
4876 /* This gets rid of the newly added bblocks */
4877 cfg->cbb = prev_cbb;
4879 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4884 * Some of these comments may well be out-of-date.
4885 * Design decisions: we do a single pass over the IL code (and we do bblock
4886 * splitting/merging in the few cases when it's required: a back jump to an IL
4887 * address that was not already seen as bblock starting point).
4888 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4889 * Complex operations are decomposed in simpler ones right away. We need to let the
4890 * arch-specific code peek and poke inside this process somehow (except when the
4891 * optimizations can take advantage of the full semantic info of coarse opcodes).
4892 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4893 * MonoInst->opcode initially is the IL opcode or some simplification of that
4894 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4895 * opcode with value bigger than OP_LAST.
4896 * At this point the IR can be handed over to an interpreter, a dumb code generator
4897 * or to the optimizing code generator that will translate it to SSA form.
4899 * Profiling directed optimizations.
4900 * We may compile by default with few or no optimizations and instrument the code
4901 * or the user may indicate what methods to optimize the most either in a config file
4902 * or through repeated runs where the compiler applies offline the optimizations to
4903 * each method and then decides if it was worth it.
4906 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4907 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4908 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4909 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4910 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4911 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4912 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4913 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4915 /* offset from br.s -> br like opcodes */
4916 #define BIG_BRANCH_OFFSET 13
4919 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4921 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4923 return b == NULL || b == bb;
4927 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4929 unsigned char *ip = start;
4930 unsigned char *target;
4933 MonoBasicBlock *bblock;
4934 const MonoOpcode *opcode;
4937 cli_addr = ip - start;
4938 i = mono_opcode_value ((const guint8 **)&ip, end);
4941 opcode = &mono_opcodes [i];
4942 switch (opcode->argument) {
4943 case MonoInlineNone:
4946 case MonoInlineString:
4947 case MonoInlineType:
4948 case MonoInlineField:
4949 case MonoInlineMethod:
4952 case MonoShortInlineR:
4959 case MonoShortInlineVar:
4960 case MonoShortInlineI:
4963 case MonoShortInlineBrTarget:
4964 target = start + cli_addr + 2 + (signed char)ip [1];
4965 GET_BBLOCK (cfg, bblock, target);
4968 GET_BBLOCK (cfg, bblock, ip);
4970 case MonoInlineBrTarget:
4971 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4972 GET_BBLOCK (cfg, bblock, target);
4975 GET_BBLOCK (cfg, bblock, ip);
4977 case MonoInlineSwitch: {
4978 guint32 n = read32 (ip + 1);
4981 cli_addr += 5 + 4 * n;
4982 target = start + cli_addr;
4983 GET_BBLOCK (cfg, bblock, target);
4985 for (j = 0; j < n; ++j) {
4986 target = start + cli_addr + (gint32)read32 (ip);
4987 GET_BBLOCK (cfg, bblock, target);
4997 g_assert_not_reached ();
5000 if (i == CEE_THROW) {
5001 unsigned char *bb_start = ip - 1;
5003 /* Find the start of the bblock containing the throw */
5005 while ((bb_start >= start) && !bblock) {
5006 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
5010 bblock->out_of_line = 1;
5019 static inline MonoMethod *
5020 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5024 if (m->wrapper_type != MONO_WRAPPER_NONE)
5025 return mono_method_get_wrapper_data (m, token);
5027 method = mono_get_method_full (m->klass->image, token, klass, context);
5032 static inline MonoMethod *
5033 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5035 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
5037 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
5043 static inline MonoClass*
5044 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
5048 if (method->wrapper_type != MONO_WRAPPER_NONE)
5049 klass = mono_method_get_wrapper_data (method, token);
5051 klass = mono_class_get_full (method->klass->image, token, context);
5053 mono_class_init (klass);
5058 * Returns TRUE if the JIT should abort inlining because "callee"
5059 * is influenced by security attributes.
5062 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
5066 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
5070 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
5071 if (result == MONO_JIT_SECURITY_OK)
5074 if (result == MONO_JIT_LINKDEMAND_ECMA) {
5075 /* Generate code to throw a SecurityException before the actual call/link */
5076 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5079 NEW_ICONST (cfg, args [0], 4);
5080 NEW_METHODCONST (cfg, args [1], caller);
5081 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
5082 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
5083 /* don't hide previous results */
5084 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
5085 cfg->exception_data = result;
5093 throw_exception (void)
5095 static MonoMethod *method = NULL;
5098 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5099 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
5106 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
5108 MonoMethod *thrower = throw_exception ();
5111 EMIT_NEW_PCONST (cfg, args [0], ex);
5112 mono_emit_method_call (cfg, thrower, args, NULL);
5116 * Return the original method is a wrapper is specified. We can only access
5117 * the custom attributes from the original method.
5120 get_original_method (MonoMethod *method)
5122 if (method->wrapper_type == MONO_WRAPPER_NONE)
5125 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
5126 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
5129 /* in other cases we need to find the original method */
5130 return mono_marshal_method_from_wrapper (method);
5134 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
5135 MonoBasicBlock *bblock, unsigned char *ip)
5137 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5138 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
5140 emit_throw_exception (cfg, ex);
5144 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
5145 MonoBasicBlock *bblock, unsigned char *ip)
5147 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5148 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
5150 emit_throw_exception (cfg, ex);
5154 * Check that the IL instructions at ip are the array initialization
5155 * sequence and return the pointer to the data and the size.
5158 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
5161 * newarr[System.Int32]
5163 * ldtoken field valuetype ...
5164 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
5166 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
5167 guint32 token = read32 (ip + 7);
5168 guint32 field_token = read32 (ip + 2);
5169 guint32 field_index = field_token & 0xffffff;
5171 const char *data_ptr;
5173 MonoMethod *cmethod;
5174 MonoClass *dummy_class;
5175 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
5181 *out_field_token = field_token;
5183 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
5186 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
5188 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
5189 case MONO_TYPE_BOOLEAN:
5193 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
5194 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
5195 case MONO_TYPE_CHAR:
5205 return NULL; /* stupid ARM FP swapped format */
5215 if (size > mono_type_size (field->type, &dummy_align))
5218 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
5219 if (!method->klass->image->dynamic) {
5220 field_index = read32 (ip + 2) & 0xffffff;
5221 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
5222 data_ptr = mono_image_rva_map (method->klass->image, rva);
5223 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
5224 /* for aot code we do the lookup on load */
5225 if (aot && data_ptr)
5226 return GUINT_TO_POINTER (rva);
5228 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
5230 data_ptr = mono_field_get_data (field);
5238 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
5240 char *method_fname = mono_method_full_name (method, TRUE);
5242 MonoMethodHeader *header = mono_method_get_header (method);
5244 if (header->code_size == 0)
5245 method_code = g_strdup ("method body is empty.");
5247 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
5248 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
5249 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
5250 g_free (method_fname);
5251 g_free (method_code);
5252 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
5256 set_exception_object (MonoCompile *cfg, MonoException *exception)
5258 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
5259 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
5260 cfg->exception_ptr = exception;
5264 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5268 if (cfg->generic_sharing_context)
5269 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
5271 type = &klass->byval_arg;
5272 return MONO_TYPE_IS_REFERENCE (type);
5276 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5279 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5280 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5281 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5282 /* Optimize reg-reg moves away */
5284 * Can't optimize other opcodes, since sp[0] might point to
5285 * the last ins of a decomposed opcode.
5287 sp [0]->dreg = (cfg)->locals [n]->dreg;
5289 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5294 * ldloca inhibits many optimizations so try to get rid of it in common
5297 static inline unsigned char *
5298 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5307 local = read16 (ip + 2);
5311 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5312 gboolean skip = FALSE;
5314 /* From the INITOBJ case */
5315 token = read32 (ip + 2);
5316 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5317 CHECK_TYPELOAD (klass);
5318 if (generic_class_is_reference_type (cfg, klass)) {
5319 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5320 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5321 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5322 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5323 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5336 is_exception_class (MonoClass *class)
5339 if (class == mono_defaults.exception_class)
5341 class = class->parent;
5347 * mono_method_to_ir:
5349 * Translate the .net IL into linear IR.
5352 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5353 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5354 guint inline_offset, gboolean is_virtual_call)
5357 MonoInst *ins, **sp, **stack_start;
5358 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5359 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
5360 MonoMethod *cmethod, *method_definition;
5361 MonoInst **arg_array;
5362 MonoMethodHeader *header;
5364 guint32 token, ins_flag;
5366 MonoClass *constrained_call = NULL;
5367 unsigned char *ip, *end, *target, *err_pos;
5368 static double r8_0 = 0.0;
5369 MonoMethodSignature *sig;
5370 MonoGenericContext *generic_context = NULL;
5371 MonoGenericContainer *generic_container = NULL;
5372 MonoType **param_types;
5373 int i, n, start_new_bblock, dreg;
5374 int num_calls = 0, inline_costs = 0;
5375 int breakpoint_id = 0;
5377 MonoBoolean security, pinvoke;
5378 MonoSecurityManager* secman = NULL;
5379 MonoDeclSecurityActions actions;
5380 GSList *class_inits = NULL;
5381 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5383 gboolean init_locals, seq_points, skip_dead_blocks;
5385 /* serialization and xdomain stuff may need access to private fields and methods */
5386 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5387 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5388 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5389 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5390 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5391 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5393 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5395 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5396 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5397 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5398 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5400 image = method->klass->image;
5401 header = mono_method_get_header (method);
5403 MonoLoaderError *error;
5405 if ((error = mono_loader_get_last_error ())) {
5406 cfg->exception_type = error->exception_type;
5408 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
5409 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
5411 goto exception_exit;
5413 generic_container = mono_method_get_generic_container (method);
5414 sig = mono_method_signature (method);
5415 num_args = sig->hasthis + sig->param_count;
5416 ip = (unsigned char*)header->code;
5417 cfg->cil_start = ip;
5418 end = ip + header->code_size;
5419 mono_jit_stats.cil_code_size += header->code_size;
5420 init_locals = header->init_locals;
5422 seq_points = cfg->gen_seq_points && cfg->method == method;
5425 * Methods without init_locals set could cause asserts in various passes
5430 method_definition = method;
5431 while (method_definition->is_inflated) {
5432 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5433 method_definition = imethod->declaring;
5436 /* SkipVerification is not allowed if core-clr is enabled */
5437 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5439 dont_verify_stloc = TRUE;
5442 if (!dont_verify && mini_method_verify (cfg, method_definition))
5443 goto exception_exit;
5445 if (mono_debug_using_mono_debugger ())
5446 cfg->keep_cil_nops = TRUE;
5448 if (sig->is_inflated)
5449 generic_context = mono_method_get_context (method);
5450 else if (generic_container)
5451 generic_context = &generic_container->context;
5452 cfg->generic_context = generic_context;
5454 if (!cfg->generic_sharing_context)
5455 g_assert (!sig->has_type_parameters);
5457 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5458 g_assert (method->is_inflated);
5459 g_assert (mono_method_get_context (method)->method_inst);
5461 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5462 g_assert (sig->generic_param_count);
5464 if (cfg->method == method) {
5465 cfg->real_offset = 0;
5467 cfg->real_offset = inline_offset;
5470 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5471 cfg->cil_offset_to_bb_len = header->code_size;
5473 cfg->current_method = method;
5475 if (cfg->verbose_level > 2)
5476 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5478 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5480 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5481 for (n = 0; n < sig->param_count; ++n)
5482 param_types [n + sig->hasthis] = sig->params [n];
5483 cfg->arg_types = param_types;
5485 dont_inline = g_list_prepend (dont_inline, method);
5486 if (cfg->method == method) {
5488 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5489 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5492 NEW_BBLOCK (cfg, start_bblock);
5493 cfg->bb_entry = start_bblock;
5494 start_bblock->cil_code = NULL;
5495 start_bblock->cil_length = 0;
5498 NEW_BBLOCK (cfg, end_bblock);
5499 cfg->bb_exit = end_bblock;
5500 end_bblock->cil_code = NULL;
5501 end_bblock->cil_length = 0;
5502 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
5503 g_assert (cfg->num_bblocks == 2);
5505 arg_array = cfg->args;
5507 if (header->num_clauses) {
5508 cfg->spvars = g_hash_table_new (NULL, NULL);
5509 cfg->exvars = g_hash_table_new (NULL, NULL);
5511 /* handle exception clauses */
5512 for (i = 0; i < header->num_clauses; ++i) {
5513 MonoBasicBlock *try_bb;
5514 MonoExceptionClause *clause = &header->clauses [i];
5515 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5516 try_bb->real_offset = clause->try_offset;
5517 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5518 tblock->real_offset = clause->handler_offset;
5519 tblock->flags |= BB_EXCEPTION_HANDLER;
5521 link_bblock (cfg, try_bb, tblock);
5523 if (*(ip + clause->handler_offset) == CEE_POP)
5524 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5526 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5527 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5528 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5529 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5530 MONO_ADD_INS (tblock, ins);
5532 /* todo: is a fault block unsafe to optimize? */
5533 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5534 tblock->flags |= BB_EXCEPTION_UNSAFE;
5538 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5540 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5542 /* catch and filter blocks get the exception object on the stack */
5543 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5544 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5545 MonoInst *dummy_use;
5547 /* mostly like handle_stack_args (), but just sets the input args */
5548 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5549 tblock->in_scount = 1;
5550 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5551 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5554 * Add a dummy use for the exvar so its liveness info will be
5558 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5560 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5561 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5562 tblock->flags |= BB_EXCEPTION_HANDLER;
5563 tblock->real_offset = clause->data.filter_offset;
5564 tblock->in_scount = 1;
5565 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5566 /* The filter block shares the exvar with the handler block */
5567 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5568 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5569 MONO_ADD_INS (tblock, ins);
5573 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5574 clause->data.catch_class &&
5575 cfg->generic_sharing_context &&
5576 mono_class_check_context_used (clause->data.catch_class)) {
5578 * In shared generic code with catch
5579 * clauses containing type variables
5580 * the exception handling code has to
5581 * be able to get to the rgctx.
5582 * Therefore we have to make sure that
5583 * the vtable/mrgctx argument (for
5584 * static or generic methods) or the
5585 * "this" argument (for non-static
5586 * methods) are live.
5588 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5589 mini_method_get_context (method)->method_inst ||
5590 method->klass->valuetype) {
5591 mono_get_vtable_var (cfg);
5593 MonoInst *dummy_use;
5595 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5600 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5601 cfg->cbb = start_bblock;
5602 cfg->args = arg_array;
5603 mono_save_args (cfg, sig, inline_args);
5606 /* FIRST CODE BLOCK */
5607 NEW_BBLOCK (cfg, bblock);
5608 bblock->cil_code = ip;
5612 ADD_BBLOCK (cfg, bblock);
5614 if (cfg->method == method) {
5615 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5616 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5617 MONO_INST_NEW (cfg, ins, OP_BREAK);
5618 MONO_ADD_INS (bblock, ins);
5622 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5623 secman = mono_security_manager_get_methods ();
5625 security = (secman && mono_method_has_declsec (method));
5626 /* at this point having security doesn't mean we have any code to generate */
5627 if (security && (cfg->method == method)) {
5628 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5629 * And we do not want to enter the next section (with allocation) if we
5630 * have nothing to generate */
5631 security = mono_declsec_get_demands (method, &actions);
5634 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5635 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5637 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5638 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5639 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5641 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5642 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5646 mono_custom_attrs_free (custom);
5649 custom = mono_custom_attrs_from_class (wrapped->klass);
5650 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5654 mono_custom_attrs_free (custom);
5657 /* not a P/Invoke after all */
5662 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5663 /* we use a separate basic block for the initialization code */
5664 NEW_BBLOCK (cfg, init_localsbb);
5665 cfg->bb_init = init_localsbb;
5666 init_localsbb->real_offset = cfg->real_offset;
5667 start_bblock->next_bb = init_localsbb;
5668 init_localsbb->next_bb = bblock;
5669 link_bblock (cfg, start_bblock, init_localsbb);
5670 link_bblock (cfg, init_localsbb, bblock);
5672 cfg->cbb = init_localsbb;
5674 start_bblock->next_bb = bblock;
5675 link_bblock (cfg, start_bblock, bblock);
5678 /* at this point we know, if security is TRUE, that some code needs to be generated */
5679 if (security && (cfg->method == method)) {
5682 mono_jit_stats.cas_demand_generation++;
5684 if (actions.demand.blob) {
5685 /* Add code for SecurityAction.Demand */
5686 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5687 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5688 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5689 mono_emit_method_call (cfg, secman->demand, args, NULL);
5691 if (actions.noncasdemand.blob) {
5692 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5693 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5694 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5695 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5696 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5697 mono_emit_method_call (cfg, secman->demand, args, NULL);
5699 if (actions.demandchoice.blob) {
5700 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5701 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5702 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5703 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5704 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5708 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5710 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5713 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5714 /* check if this is native code, e.g. an icall or a p/invoke */
5715 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5716 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5718 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
5719 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
5721 /* if this ia a native call then it can only be JITted from platform code */
5722 if ((icall || pinvk) && method->klass && method->klass->image) {
5723 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
5724 MonoException *ex = icall ? mono_get_exception_security () :
5725 mono_get_exception_method_access ();
5726 emit_throw_exception (cfg, ex);
5733 if (header->code_size == 0)
5736 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5741 if (cfg->method == method)
5742 mono_debug_init_method (cfg, bblock, breakpoint_id);
5744 for (n = 0; n < header->num_locals; ++n) {
5745 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5750 /* We force the vtable variable here for all shared methods
5751 for the possibility that they might show up in a stack
5752 trace where their exact instantiation is needed. */
5753 if (cfg->generic_sharing_context && method == cfg->method) {
5754 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5755 mini_method_get_context (method)->method_inst ||
5756 method->klass->valuetype) {
5757 mono_get_vtable_var (cfg);
5759 /* FIXME: Is there a better way to do this?
5760 We need the variable live for the duration
5761 of the whole method. */
5762 cfg->args [0]->flags |= MONO_INST_INDIRECT;
5766 /* add a check for this != NULL to inlined methods */
5767 if (is_virtual_call) {
5770 NEW_ARGLOAD (cfg, arg_ins, 0);
5771 MONO_ADD_INS (cfg->cbb, arg_ins);
5772 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
5775 skip_dead_blocks = !dont_verify;
5776 if (skip_dead_blocks) {
5777 original_bb = bb = mono_basic_block_split (method, &error);
5778 if (!mono_error_ok (&error)) {
5779 mono_error_cleanup (&error);
5785 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5786 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5789 start_new_bblock = 0;
5792 if (cfg->method == method)
5793 cfg->real_offset = ip - header->code;
5795 cfg->real_offset = inline_offset;
5800 if (start_new_bblock) {
5801 bblock->cil_length = ip - bblock->cil_code;
5802 if (start_new_bblock == 2) {
5803 g_assert (ip == tblock->cil_code);
5805 GET_BBLOCK (cfg, tblock, ip);
5807 bblock->next_bb = tblock;
5810 start_new_bblock = 0;
5811 for (i = 0; i < bblock->in_scount; ++i) {
5812 if (cfg->verbose_level > 3)
5813 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5814 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5818 g_slist_free (class_inits);
5821 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5822 link_bblock (cfg, bblock, tblock);
5823 if (sp != stack_start) {
5824 handle_stack_args (cfg, stack_start, sp - stack_start);
5826 CHECK_UNVERIFIABLE (cfg);
5828 bblock->next_bb = tblock;
5831 for (i = 0; i < bblock->in_scount; ++i) {
5832 if (cfg->verbose_level > 3)
5833 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5834 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5837 g_slist_free (class_inits);
5842 if (skip_dead_blocks) {
5843 int ip_offset = ip - header->code;
5845 if (ip_offset == bb->end)
5849 int op_size = mono_opcode_size (ip, end);
5850 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
5852 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
5854 if (ip_offset + op_size == bb->end) {
5855 MONO_INST_NEW (cfg, ins, OP_NOP);
5856 MONO_ADD_INS (bblock, ins);
5857 start_new_bblock = 1;
5865 * Sequence points are points where the debugger can place a breakpoint.
5866 * Currently, we generate these automatically at points where the IL
5869 if (seq_points && sp == stack_start) {
5870 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
5871 MONO_ADD_INS (cfg->cbb, ins);
5874 bblock->real_offset = cfg->real_offset;
5876 if ((cfg->method == method) && cfg->coverage_info) {
5877 guint32 cil_offset = ip - header->code;
5878 cfg->coverage_info->data [cil_offset].cil_code = ip;
5880 /* TODO: Use an increment here */
5881 #if defined(TARGET_X86)
5882 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5883 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5885 MONO_ADD_INS (cfg->cbb, ins);
5887 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5888 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5892 if (cfg->verbose_level > 3)
5893 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5897 if (cfg->keep_cil_nops)
5898 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
5900 MONO_INST_NEW (cfg, ins, OP_NOP);
5902 MONO_ADD_INS (bblock, ins);
5905 if (should_insert_brekpoint (cfg->method))
5906 MONO_INST_NEW (cfg, ins, OP_BREAK);
5908 MONO_INST_NEW (cfg, ins, OP_NOP);
5910 MONO_ADD_INS (bblock, ins);
5916 CHECK_STACK_OVF (1);
5917 n = (*ip)-CEE_LDARG_0;
5919 EMIT_NEW_ARGLOAD (cfg, ins, n);
5927 CHECK_STACK_OVF (1);
5928 n = (*ip)-CEE_LDLOC_0;
5930 EMIT_NEW_LOCLOAD (cfg, ins, n);
5939 n = (*ip)-CEE_STLOC_0;
5942 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5944 emit_stloc_ir (cfg, sp, header, n);
5951 CHECK_STACK_OVF (1);
5954 EMIT_NEW_ARGLOAD (cfg, ins, n);
5960 CHECK_STACK_OVF (1);
5963 NEW_ARGLOADA (cfg, ins, n);
5964 MONO_ADD_INS (cfg->cbb, ins);
5974 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5976 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5981 CHECK_STACK_OVF (1);
5984 EMIT_NEW_LOCLOAD (cfg, ins, n);
5988 case CEE_LDLOCA_S: {
5989 unsigned char *tmp_ip;
5991 CHECK_STACK_OVF (1);
5992 CHECK_LOCAL (ip [1]);
5994 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
6000 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
6009 CHECK_LOCAL (ip [1]);
6010 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
6012 emit_stloc_ir (cfg, sp, header, ip [1]);
6017 CHECK_STACK_OVF (1);
6018 EMIT_NEW_PCONST (cfg, ins, NULL);
6019 ins->type = STACK_OBJ;
6024 CHECK_STACK_OVF (1);
6025 EMIT_NEW_ICONST (cfg, ins, -1);
6038 CHECK_STACK_OVF (1);
6039 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
6045 CHECK_STACK_OVF (1);
6047 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
6053 CHECK_STACK_OVF (1);
6054 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
6060 CHECK_STACK_OVF (1);
6061 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6062 ins->type = STACK_I8;
6063 ins->dreg = alloc_dreg (cfg, STACK_I8);
6065 ins->inst_l = (gint64)read64 (ip);
6066 MONO_ADD_INS (bblock, ins);
6072 gboolean use_aotconst = FALSE;
6074 #ifdef TARGET_POWERPC
6075 /* FIXME: Clean this up */
6076 if (cfg->compile_aot)
6077 use_aotconst = TRUE;
6080 /* FIXME: we should really allocate this only late in the compilation process */
6081 f = mono_domain_alloc (cfg->domain, sizeof (float));
6083 CHECK_STACK_OVF (1);
6089 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
6091 dreg = alloc_freg (cfg);
6092 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
6093 ins->type = STACK_R8;
6095 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6096 ins->type = STACK_R8;
6097 ins->dreg = alloc_dreg (cfg, STACK_R8);
6099 MONO_ADD_INS (bblock, ins);
6109 gboolean use_aotconst = FALSE;
6111 #ifdef TARGET_POWERPC
6112 /* FIXME: Clean this up */
6113 if (cfg->compile_aot)
6114 use_aotconst = TRUE;
6117 /* FIXME: we should really allocate this only late in the compilation process */
6118 d = mono_domain_alloc (cfg->domain, sizeof (double));
6120 CHECK_STACK_OVF (1);
6126 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
6128 dreg = alloc_freg (cfg);
6129 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
6130 ins->type = STACK_R8;
6132 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6133 ins->type = STACK_R8;
6134 ins->dreg = alloc_dreg (cfg, STACK_R8);
6136 MONO_ADD_INS (bblock, ins);
6145 MonoInst *temp, *store;
6147 CHECK_STACK_OVF (1);
6151 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
6152 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
6154 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6157 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6170 if (sp [0]->type == STACK_R8)
6171 /* we need to pop the value from the x86 FP stack */
6172 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
6181 if (stack_start != sp)
6183 token = read32 (ip + 1);
6184 /* FIXME: check the signature matches */
6185 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6187 if (!cmethod || mono_loader_get_last_error ())
6190 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
6191 GENERIC_SHARING_FAILURE (CEE_JMP);
6193 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
6194 CHECK_CFG_EXCEPTION;
6196 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6198 MonoMethodSignature *fsig = mono_method_signature (cmethod);
6201 /* Handle tail calls similarly to calls */
6202 n = fsig->param_count + fsig->hasthis;
6204 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
6205 call->method = cmethod;
6206 call->tail_call = TRUE;
6207 call->signature = mono_method_signature (cmethod);
6208 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
6209 call->inst.inst_p0 = cmethod;
6210 for (i = 0; i < n; ++i)
6211 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
6213 mono_arch_emit_call (cfg, call);
6214 MONO_ADD_INS (bblock, (MonoInst*)call);
6217 for (i = 0; i < num_args; ++i)
6218 /* Prevent arguments from being optimized away */
6219 arg_array [i]->flags |= MONO_INST_VOLATILE;
6221 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6222 ins = (MonoInst*)call;
6223 ins->inst_p0 = cmethod;
6224 MONO_ADD_INS (bblock, ins);
6228 start_new_bblock = 1;
6233 case CEE_CALLVIRT: {
6234 MonoInst *addr = NULL;
6235 MonoMethodSignature *fsig = NULL;
6237 int virtual = *ip == CEE_CALLVIRT;
6238 int calli = *ip == CEE_CALLI;
6239 gboolean pass_imt_from_rgctx = FALSE;
6240 MonoInst *imt_arg = NULL;
6241 gboolean pass_vtable = FALSE;
6242 gboolean pass_mrgctx = FALSE;
6243 MonoInst *vtable_arg = NULL;
6244 gboolean check_this = FALSE;
6245 gboolean supported_tail_call = FALSE;
6248 token = read32 (ip + 1);
6255 if (method->wrapper_type != MONO_WRAPPER_NONE)
6256 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6258 fsig = mono_metadata_parse_signature (image, token);
6260 n = fsig->param_count + fsig->hasthis;
6262 if (method->dynamic && fsig->pinvoke) {
6266 * This is a call through a function pointer using a pinvoke
6267 * signature. Have to create a wrapper and call that instead.
6268 * FIXME: This is very slow, need to create a wrapper at JIT time
6269 * instead based on the signature.
6271 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
6272 EMIT_NEW_PCONST (cfg, args [1], fsig);
6274 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
6277 MonoMethod *cil_method;
6279 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6280 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6281 cil_method = cmethod;
6282 } else if (constrained_call) {
6283 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6285 * This is needed since get_method_constrained can't find
6286 * the method in klass representing a type var.
6287 * The type var is guaranteed to be a reference type in this
6290 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6291 cil_method = cmethod;
6292 g_assert (!cmethod->klass->valuetype);
6294 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6297 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6298 cil_method = cmethod;
6301 if (!cmethod || mono_loader_get_last_error ())
6303 if (!dont_verify && !cfg->skip_visibility) {
6304 MonoMethod *target_method = cil_method;
6305 if (method->is_inflated) {
6306 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6308 if (!mono_method_can_access_method (method_definition, target_method) &&
6309 !mono_method_can_access_method (method, cil_method))
6310 METHOD_ACCESS_FAILURE;
6313 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6314 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6316 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6317 /* MS.NET seems to silently convert this to a callvirt */
6322 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
6323 * converts to a callvirt.
6325 * tests/bug-515884.il is an example of this behavior
6327 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
6328 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
6329 if (!virtual && cmethod->klass->marshalbyref && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
6333 if (!cmethod->klass->inited)
6334 if (!mono_class_init (cmethod->klass))
6337 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6338 mini_class_is_system_array (cmethod->klass)) {
6339 array_rank = cmethod->klass->rank;
6340 fsig = mono_method_signature (cmethod);
6342 fsig = mono_method_signature (cmethod);
6347 if (fsig->pinvoke) {
6348 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6349 check_for_pending_exc, FALSE);
6350 fsig = mono_method_signature (wrapper);
6351 } else if (constrained_call) {
6352 fsig = mono_method_signature (cmethod);
6354 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6358 mono_save_token_info (cfg, image, token, cil_method);
6360 n = fsig->param_count + fsig->hasthis;
6362 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6363 if (check_linkdemand (cfg, method, cmethod))
6365 CHECK_CFG_EXCEPTION;
6368 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6369 g_assert_not_reached ();
6372 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6375 if (!cfg->generic_sharing_context && cmethod)
6376 g_assert (!mono_method_check_context_used (cmethod));
6380 //g_assert (!virtual || fsig->hasthis);
6384 if (constrained_call) {
6386 * We have the `constrained.' prefix opcode.
6388 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6390 * The type parameter is instantiated as a valuetype,
6391 * but that type doesn't override the method we're
6392 * calling, so we need to box `this'.
6394 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
6395 ins->klass = constrained_call;
6396 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call));
6397 CHECK_CFG_EXCEPTION;
6398 } else if (!constrained_call->valuetype) {
6399 int dreg = alloc_preg (cfg);
6402 * The type parameter is instantiated as a reference
6403 * type. We have a managed pointer on the stack, so
6404 * we need to dereference it here.
6406 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6407 ins->type = STACK_OBJ;
6409 } else if (cmethod->klass->valuetype)
6411 constrained_call = NULL;
6414 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6418 * If the callee is a shared method, then its static cctor
6419 * might not get called after the call was patched.
6421 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6422 emit_generic_class_init (cfg, cmethod->klass);
6423 CHECK_TYPELOAD (cmethod->klass);
6426 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6427 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6428 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6429 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6430 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6433 * Pass vtable iff target method might
6434 * be shared, which means that sharing
6435 * is enabled for its class and its
6436 * context is sharable (and it's not a
6439 if (sharing_enabled && context_sharable &&
6440 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6444 if (cmethod && mini_method_get_context (cmethod) &&
6445 mini_method_get_context (cmethod)->method_inst) {
6446 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6447 MonoGenericContext *context = mini_method_get_context (cmethod);
6448 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6450 g_assert (!pass_vtable);
6452 if (sharing_enabled && context_sharable)
6456 if (cfg->generic_sharing_context && cmethod) {
6457 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6459 context_used = mono_method_check_context_used (cmethod);
6461 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6462 /* Generic method interface
6463 calls are resolved via a
6464 helper function and don't
6466 if (!cmethod_context || !cmethod_context->method_inst)
6467 pass_imt_from_rgctx = TRUE;
6471 * If a shared method calls another
6472 * shared method then the caller must
6473 * have a generic sharing context
6474 * because the magic trampoline
6475 * requires it. FIXME: We shouldn't
6476 * have to force the vtable/mrgctx
6477 * variable here. Instead there
6478 * should be a flag in the cfg to
6479 * request a generic sharing context.
6482 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6483 mono_get_vtable_var (cfg);
6488 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6490 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6492 CHECK_TYPELOAD (cmethod->klass);
6493 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6498 g_assert (!vtable_arg);
6500 if (!cfg->compile_aot) {
6502 * emit_get_rgctx_method () calls mono_class_vtable () so check
6503 * for type load errors before.
6505 mono_class_setup_vtable (cmethod->klass);
6506 CHECK_TYPELOAD (cmethod->klass);
6509 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6511 /* !marshalbyref is needed to properly handle generic methods + remoting */
6512 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6513 MONO_METHOD_IS_FINAL (cmethod)) &&
6514 !cmethod->klass->marshalbyref) {
6521 if (pass_imt_from_rgctx) {
6522 g_assert (!pass_vtable);
6525 imt_arg = emit_get_rgctx_method (cfg, context_used,
6526 cmethod, MONO_RGCTX_INFO_METHOD);
6530 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
6532 /* Calling virtual generic methods */
6533 if (cmethod && virtual &&
6534 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6535 !(MONO_METHOD_IS_FINAL (cmethod) &&
6536 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6537 mono_method_signature (cmethod)->generic_param_count) {
6538 MonoInst *this_temp, *this_arg_temp, *store;
6539 MonoInst *iargs [4];
6541 g_assert (mono_method_signature (cmethod)->is_inflated);
6543 /* Prevent inlining of methods that contain indirect calls */
6546 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
6547 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt) {
6548 g_assert (!imt_arg);
6550 g_assert (cmethod->is_inflated);
6551 imt_arg = emit_get_rgctx_method (cfg, context_used,
6552 cmethod, MONO_RGCTX_INFO_METHOD);
6553 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6557 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6558 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6559 MONO_ADD_INS (bblock, store);
6561 /* FIXME: This should be a managed pointer */
6562 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6564 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6565 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6566 cmethod, MONO_RGCTX_INFO_METHOD);
6567 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6568 addr = mono_emit_jit_icall (cfg,
6569 mono_helper_compile_generic_method, iargs);
6571 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6573 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6576 if (!MONO_TYPE_IS_VOID (fsig->ret))
6577 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6579 CHECK_CFG_EXCEPTION;
6586 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6587 supported_tail_call = cmethod && MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
6589 supported_tail_call = cmethod && mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6593 /* FIXME: runtime generic context pointer for jumps? */
6594 /* FIXME: handle this for generic sharing eventually */
6595 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) && supported_tail_call) {
6598 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6601 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6602 /* Handle tail calls similarly to calls */
6603 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE);
6605 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6606 call->tail_call = TRUE;
6607 call->method = cmethod;
6608 call->signature = mono_method_signature (cmethod);
6611 * We implement tail calls by storing the actual arguments into the
6612 * argument variables, then emitting a CEE_JMP.
6614 for (i = 0; i < n; ++i) {
6615 /* Prevent argument from being register allocated */
6616 arg_array [i]->flags |= MONO_INST_VOLATILE;
6617 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6621 ins = (MonoInst*)call;
6622 ins->inst_p0 = cmethod;
6623 ins->inst_p1 = arg_array [0];
6624 MONO_ADD_INS (bblock, ins);
6625 link_bblock (cfg, bblock, end_bblock);
6626 start_new_bblock = 1;
6628 CHECK_CFG_EXCEPTION;
6630 /* skip CEE_RET as well */
6636 /* Conversion to a JIT intrinsic */
6637 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6639 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6640 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6645 CHECK_CFG_EXCEPTION;
6653 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6654 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6655 mono_method_check_inlining (cfg, cmethod) &&
6656 !g_list_find (dont_inline, cmethod)) {
6658 gboolean allways = FALSE;
6660 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6661 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6662 /* Prevent inlining of methods that call wrappers */
6664 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6668 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6670 cfg->real_offset += 5;
6673 if (!MONO_TYPE_IS_VOID (fsig->ret))
6674 /* *sp is already set by inline_method */
6677 inline_costs += costs;
6683 inline_costs += 10 * num_calls++;
6685 /* Tail recursion elimination */
6686 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6687 gboolean has_vtargs = FALSE;
6690 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6693 /* keep it simple */
6694 for (i = fsig->param_count - 1; i >= 0; i--) {
6695 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6700 for (i = 0; i < n; ++i)
6701 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6702 MONO_INST_NEW (cfg, ins, OP_BR);
6703 MONO_ADD_INS (bblock, ins);
6704 tblock = start_bblock->out_bb [0];
6705 link_bblock (cfg, bblock, tblock);
6706 ins->inst_target_bb = tblock;
6707 start_new_bblock = 1;
6709 /* skip the CEE_RET, too */
6710 if (ip_in_bb (cfg, bblock, ip + 5))
6720 /* Generic sharing */
6721 /* FIXME: only do this for generic methods if
6722 they are not shared! */
6723 if (context_used && !imt_arg && !array_rank &&
6724 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6725 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6726 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6727 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6730 g_assert (cfg->generic_sharing_context && cmethod);
6734 * We are compiling a call to a
6735 * generic method from shared code,
6736 * which means that we have to look up
6737 * the method in the rgctx and do an
6740 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6743 /* Indirect calls */
6745 g_assert (!imt_arg);
6747 if (*ip == CEE_CALL)
6748 g_assert (context_used);
6749 else if (*ip == CEE_CALLI)
6750 g_assert (!vtable_arg);
6752 /* FIXME: what the hell is this??? */
6753 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6754 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6756 /* Prevent inlining of methods with indirect calls */
6761 int rgctx_reg = mono_alloc_preg (cfg);
6763 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6764 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6765 call = (MonoCallInst*)ins;
6766 set_rgctx_arg (cfg, call, rgctx_reg, vtable_arg);
6768 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6770 * Instead of emitting an indirect call, emit a direct call
6771 * with the contents of the aotconst as the patch info.
6773 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
6775 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6776 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
6779 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6782 if (!MONO_TYPE_IS_VOID (fsig->ret))
6783 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6785 CHECK_CFG_EXCEPTION;
6796 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6797 if (sp [fsig->param_count]->type == STACK_OBJ) {
6798 MonoInst *iargs [2];
6801 iargs [1] = sp [fsig->param_count];
6803 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6806 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6807 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6808 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6809 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6811 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6814 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6815 if (!cmethod->klass->element_class->valuetype && !readonly)
6816 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6817 CHECK_TYPELOAD (cmethod->klass);
6820 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6823 g_assert_not_reached ();
6826 CHECK_CFG_EXCEPTION;
6833 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6835 if (!MONO_TYPE_IS_VOID (fsig->ret))
6836 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6838 CHECK_CFG_EXCEPTION;
6848 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6850 } else if (imt_arg) {
6851 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6853 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6856 if (!MONO_TYPE_IS_VOID (fsig->ret))
6857 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6859 CHECK_CFG_EXCEPTION;
6866 if (cfg->method != method) {
6867 /* return from inlined method */
6869 * If in_count == 0, that means the ret is unreachable due to
6870 * being preceeded by a throw. In that case, inline_method () will
6871 * handle setting the return value
6872 * (test case: test_0_inline_throw ()).
6874 if (return_var && cfg->cbb->in_count) {
6878 //g_assert (returnvar != -1);
6879 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6880 cfg->ret_var_set = TRUE;
6884 MonoType *ret_type = mono_method_signature (method)->ret;
6888 * Place a seq point here too even through the IL stack is not
6889 * empty, so a step over on
6892 * will work correctly.
6894 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
6895 MONO_ADD_INS (cfg->cbb, ins);
6898 g_assert (!return_var);
6901 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6904 if (!cfg->vret_addr) {
6907 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6909 EMIT_NEW_RETLOADA (cfg, ret_addr);
6911 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6912 ins->klass = mono_class_from_mono_type (ret_type);
6915 #ifdef MONO_ARCH_SOFT_FLOAT
6916 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6917 MonoInst *iargs [1];
6921 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6922 mono_arch_emit_setret (cfg, method, conv);
6924 mono_arch_emit_setret (cfg, method, *sp);
6927 mono_arch_emit_setret (cfg, method, *sp);
6932 if (sp != stack_start)
6934 MONO_INST_NEW (cfg, ins, OP_BR);
6936 ins->inst_target_bb = end_bblock;
6937 MONO_ADD_INS (bblock, ins);
6938 link_bblock (cfg, bblock, end_bblock);
6939 start_new_bblock = 1;
6943 MONO_INST_NEW (cfg, ins, OP_BR);
6945 target = ip + 1 + (signed char)(*ip);
6947 GET_BBLOCK (cfg, tblock, target);
6948 link_bblock (cfg, bblock, tblock);
6949 ins->inst_target_bb = tblock;
6950 if (sp != stack_start) {
6951 handle_stack_args (cfg, stack_start, sp - stack_start);
6953 CHECK_UNVERIFIABLE (cfg);
6955 MONO_ADD_INS (bblock, ins);
6956 start_new_bblock = 1;
6957 inline_costs += BRANCH_COST;
6971 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6973 target = ip + 1 + *(signed char*)ip;
6979 inline_costs += BRANCH_COST;
6983 MONO_INST_NEW (cfg, ins, OP_BR);
6986 target = ip + 4 + (gint32)read32(ip);
6988 GET_BBLOCK (cfg, tblock, target);
6989 link_bblock (cfg, bblock, tblock);
6990 ins->inst_target_bb = tblock;
6991 if (sp != stack_start) {
6992 handle_stack_args (cfg, stack_start, sp - stack_start);
6994 CHECK_UNVERIFIABLE (cfg);
6997 MONO_ADD_INS (bblock, ins);
6999 start_new_bblock = 1;
7000 inline_costs += BRANCH_COST;
7007 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
7008 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
7009 guint32 opsize = is_short ? 1 : 4;
7011 CHECK_OPSIZE (opsize);
7013 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
7016 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
7021 GET_BBLOCK (cfg, tblock, target);
7022 link_bblock (cfg, bblock, tblock);
7023 GET_BBLOCK (cfg, tblock, ip);
7024 link_bblock (cfg, bblock, tblock);
7026 if (sp != stack_start) {
7027 handle_stack_args (cfg, stack_start, sp - stack_start);
7028 CHECK_UNVERIFIABLE (cfg);
7031 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
7032 cmp->sreg1 = sp [0]->dreg;
7033 type_from_op (cmp, sp [0], NULL);
7036 #if SIZEOF_REGISTER == 4
7037 if (cmp->opcode == OP_LCOMPARE_IMM) {
7038 /* Convert it to OP_LCOMPARE */
7039 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7040 ins->type = STACK_I8;
7041 ins->dreg = alloc_dreg (cfg, STACK_I8);
7043 MONO_ADD_INS (bblock, ins);
7044 cmp->opcode = OP_LCOMPARE;
7045 cmp->sreg2 = ins->dreg;
7048 MONO_ADD_INS (bblock, cmp);
7050 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
7051 type_from_op (ins, sp [0], NULL);
7052 MONO_ADD_INS (bblock, ins);
7053 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
7054 GET_BBLOCK (cfg, tblock, target);
7055 ins->inst_true_bb = tblock;
7056 GET_BBLOCK (cfg, tblock, ip);
7057 ins->inst_false_bb = tblock;
7058 start_new_bblock = 2;
7061 inline_costs += BRANCH_COST;
7076 MONO_INST_NEW (cfg, ins, *ip);
7078 target = ip + 4 + (gint32)read32(ip);
7084 inline_costs += BRANCH_COST;
7088 MonoBasicBlock **targets;
7089 MonoBasicBlock *default_bblock;
7090 MonoJumpInfoBBTable *table;
7091 int offset_reg = alloc_preg (cfg);
7092 int target_reg = alloc_preg (cfg);
7093 int table_reg = alloc_preg (cfg);
7094 int sum_reg = alloc_preg (cfg);
7095 gboolean use_op_switch;
7099 n = read32 (ip + 1);
7102 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
7106 CHECK_OPSIZE (n * sizeof (guint32));
7107 target = ip + n * sizeof (guint32);
7109 GET_BBLOCK (cfg, default_bblock, target);
7110 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7112 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
7113 for (i = 0; i < n; ++i) {
7114 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
7115 targets [i] = tblock;
7116 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
7120 if (sp != stack_start) {
7122 * Link the current bb with the targets as well, so handle_stack_args
7123 * will set their in_stack correctly.
7125 link_bblock (cfg, bblock, default_bblock);
7126 for (i = 0; i < n; ++i)
7127 link_bblock (cfg, bblock, targets [i]);
7129 handle_stack_args (cfg, stack_start, sp - stack_start);
7131 CHECK_UNVERIFIABLE (cfg);
7134 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
7135 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
7138 for (i = 0; i < n; ++i)
7139 link_bblock (cfg, bblock, targets [i]);
7141 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
7142 table->table = targets;
7143 table->table_size = n;
7145 use_op_switch = FALSE;
7147 /* ARM implements SWITCH statements differently */
7148 /* FIXME: Make it use the generic implementation */
7149 if (!cfg->compile_aot)
7150 use_op_switch = TRUE;
7153 if (COMPILE_LLVM (cfg))
7154 use_op_switch = TRUE;
7156 cfg->cbb->has_jump_table = 1;
7158 if (use_op_switch) {
7159 MONO_INST_NEW (cfg, ins, OP_SWITCH);
7160 ins->sreg1 = src1->dreg;
7161 ins->inst_p0 = table;
7162 ins->inst_many_bb = targets;
7163 ins->klass = GUINT_TO_POINTER (n);
7164 MONO_ADD_INS (cfg->cbb, ins);
7166 if (sizeof (gpointer) == 8)
7167 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
7169 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
7171 #if SIZEOF_REGISTER == 8
7172 /* The upper word might not be zero, and we add it to a 64 bit address later */
7173 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
7176 if (cfg->compile_aot) {
7177 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
7179 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
7180 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
7181 ins->inst_p0 = table;
7182 ins->dreg = table_reg;
7183 MONO_ADD_INS (cfg->cbb, ins);
7186 /* FIXME: Use load_memindex */
7187 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
7188 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
7189 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
7191 start_new_bblock = 1;
7192 inline_costs += (BRANCH_COST * 2);
7212 dreg = alloc_freg (cfg);
7215 dreg = alloc_lreg (cfg);
7218 dreg = alloc_preg (cfg);
7221 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
7222 ins->type = ldind_type [*ip - CEE_LDIND_I1];
7223 ins->flags |= ins_flag;
7225 MONO_ADD_INS (bblock, ins);
7240 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
7241 ins->flags |= ins_flag;
7243 MONO_ADD_INS (bblock, ins);
7245 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
7246 emit_write_barrier (cfg, sp [0], sp [1], -1);
7255 MONO_INST_NEW (cfg, ins, (*ip));
7257 ins->sreg1 = sp [0]->dreg;
7258 ins->sreg2 = sp [1]->dreg;
7259 type_from_op (ins, sp [0], sp [1]);
7261 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7263 /* Use the immediate opcodes if possible */
7264 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
7265 int imm_opcode = mono_op_to_op_imm (ins->opcode);
7266 if (imm_opcode != -1) {
7267 ins->opcode = imm_opcode;
7268 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7271 sp [1]->opcode = OP_NOP;
7275 MONO_ADD_INS ((cfg)->cbb, (ins));
7277 *sp++ = mono_decompose_opcode (cfg, ins);
7294 MONO_INST_NEW (cfg, ins, (*ip));
7296 ins->sreg1 = sp [0]->dreg;
7297 ins->sreg2 = sp [1]->dreg;
7298 type_from_op (ins, sp [0], sp [1]);
7300 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7301 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7303 /* FIXME: Pass opcode to is_inst_imm */
7305 /* Use the immediate opcodes if possible */
7306 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7309 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7310 if (imm_opcode != -1) {
7311 ins->opcode = imm_opcode;
7312 if (sp [1]->opcode == OP_I8CONST) {
7313 #if SIZEOF_REGISTER == 8
7314 ins->inst_imm = sp [1]->inst_l;
7316 ins->inst_ls_word = sp [1]->inst_ls_word;
7317 ins->inst_ms_word = sp [1]->inst_ms_word;
7321 ins->inst_imm = (gssize)(sp [1]->inst_c0);
7324 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7325 if (sp [1]->next == NULL)
7326 sp [1]->opcode = OP_NOP;
7329 MONO_ADD_INS ((cfg)->cbb, (ins));
7331 *sp++ = mono_decompose_opcode (cfg, ins);
7344 case CEE_CONV_OVF_I8:
7345 case CEE_CONV_OVF_U8:
7349 /* Special case this earlier so we have long constants in the IR */
7350 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7351 int data = sp [-1]->inst_c0;
7352 sp [-1]->opcode = OP_I8CONST;
7353 sp [-1]->type = STACK_I8;
7354 #if SIZEOF_REGISTER == 8
7355 if ((*ip) == CEE_CONV_U8)
7356 sp [-1]->inst_c0 = (guint32)data;
7358 sp [-1]->inst_c0 = data;
7360 sp [-1]->inst_ls_word = data;
7361 if ((*ip) == CEE_CONV_U8)
7362 sp [-1]->inst_ms_word = 0;
7364 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7366 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7373 case CEE_CONV_OVF_I4:
7374 case CEE_CONV_OVF_I1:
7375 case CEE_CONV_OVF_I2:
7376 case CEE_CONV_OVF_I:
7377 case CEE_CONV_OVF_U:
7380 if (sp [-1]->type == STACK_R8) {
7381 ADD_UNOP (CEE_CONV_OVF_I8);
7388 case CEE_CONV_OVF_U1:
7389 case CEE_CONV_OVF_U2:
7390 case CEE_CONV_OVF_U4:
7393 if (sp [-1]->type == STACK_R8) {
7394 ADD_UNOP (CEE_CONV_OVF_U8);
7401 case CEE_CONV_OVF_I1_UN:
7402 case CEE_CONV_OVF_I2_UN:
7403 case CEE_CONV_OVF_I4_UN:
7404 case CEE_CONV_OVF_I8_UN:
7405 case CEE_CONV_OVF_U1_UN:
7406 case CEE_CONV_OVF_U2_UN:
7407 case CEE_CONV_OVF_U4_UN:
7408 case CEE_CONV_OVF_U8_UN:
7409 case CEE_CONV_OVF_I_UN:
7410 case CEE_CONV_OVF_U_UN:
7417 CHECK_CFG_EXCEPTION;
7421 case CEE_ADD_OVF_UN:
7423 case CEE_MUL_OVF_UN:
7425 case CEE_SUB_OVF_UN:
7433 token = read32 (ip + 1);
7434 klass = mini_get_class (method, token, generic_context);
7435 CHECK_TYPELOAD (klass);
7437 if (generic_class_is_reference_type (cfg, klass)) {
7438 MonoInst *store, *load;
7439 int dreg = alloc_preg (cfg);
7441 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7442 load->flags |= ins_flag;
7443 MONO_ADD_INS (cfg->cbb, load);
7445 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7446 store->flags |= ins_flag;
7447 MONO_ADD_INS (cfg->cbb, store);
7449 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
7450 emit_write_barrier (cfg, sp [0], sp [1], -1);
7452 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7464 token = read32 (ip + 1);
7465 klass = mini_get_class (method, token, generic_context);
7466 CHECK_TYPELOAD (klass);
7468 /* Optimize the common ldobj+stloc combination */
7478 loc_index = ip [5] - CEE_STLOC_0;
7485 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7486 CHECK_LOCAL (loc_index);
7488 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7489 ins->dreg = cfg->locals [loc_index]->dreg;
7495 /* Optimize the ldobj+stobj combination */
7496 /* The reference case ends up being a load+store anyway */
7497 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7502 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7509 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7518 CHECK_STACK_OVF (1);
7520 n = read32 (ip + 1);
7522 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7523 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7524 ins->type = STACK_OBJ;
7527 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7528 MonoInst *iargs [1];
7530 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7531 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7533 if (cfg->opt & MONO_OPT_SHARED) {
7534 MonoInst *iargs [3];
7536 if (cfg->compile_aot) {
7537 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7539 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7540 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7541 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7542 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7543 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7545 if (bblock->out_of_line) {
7546 MonoInst *iargs [2];
7548 if (image == mono_defaults.corlib) {
7550 * Avoid relocations in AOT and save some space by using a
7551 * version of helper_ldstr specialized to mscorlib.
7553 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7554 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7556 /* Avoid creating the string object */
7557 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7558 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7559 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7563 if (cfg->compile_aot) {
7564 NEW_LDSTRCONST (cfg, ins, image, n);
7566 MONO_ADD_INS (bblock, ins);
7569 NEW_PCONST (cfg, ins, NULL);
7570 ins->type = STACK_OBJ;
7571 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7573 MONO_ADD_INS (bblock, ins);
7582 MonoInst *iargs [2];
7583 MonoMethodSignature *fsig;
7586 MonoInst *vtable_arg = NULL;
7589 token = read32 (ip + 1);
7590 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7591 if (!cmethod || mono_loader_get_last_error ())
7593 fsig = mono_method_get_signature (cmethod, image, token);
7597 mono_save_token_info (cfg, image, token, cmethod);
7599 if (!mono_class_init (cmethod->klass))
7602 if (cfg->generic_sharing_context)
7603 context_used = mono_method_check_context_used (cmethod);
7605 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7606 if (check_linkdemand (cfg, method, cmethod))
7608 CHECK_CFG_EXCEPTION;
7609 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7610 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7613 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
7614 emit_generic_class_init (cfg, cmethod->klass);
7615 CHECK_TYPELOAD (cmethod->klass);
7618 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7619 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7620 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7621 mono_class_vtable (cfg->domain, cmethod->klass);
7622 CHECK_TYPELOAD (cmethod->klass);
7624 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7625 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7628 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7629 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7631 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7633 CHECK_TYPELOAD (cmethod->klass);
7634 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7639 n = fsig->param_count;
7643 * Generate smaller code for the common newobj <exception> instruction in
7644 * argument checking code.
7646 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7647 is_exception_class (cmethod->klass) && n <= 2 &&
7648 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7649 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7650 MonoInst *iargs [3];
7652 g_assert (!vtable_arg);
7656 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7659 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7663 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7668 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7671 g_assert_not_reached ();
7679 /* move the args to allow room for 'this' in the first position */
7685 /* check_call_signature () requires sp[0] to be set */
7686 this_ins.type = STACK_OBJ;
7688 if (check_call_signature (cfg, fsig, sp))
7693 if (mini_class_is_system_array (cmethod->klass)) {
7694 g_assert (!vtable_arg);
7696 *sp = emit_get_rgctx_method (cfg, context_used,
7697 cmethod, MONO_RGCTX_INFO_METHOD);
7699 /* Avoid varargs in the common case */
7700 if (fsig->param_count == 1)
7701 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7702 else if (fsig->param_count == 2)
7703 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7704 else if (fsig->param_count == 3)
7705 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
7707 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7708 } else if (cmethod->string_ctor) {
7709 g_assert (!context_used);
7710 g_assert (!vtable_arg);
7711 /* we simply pass a null pointer */
7712 EMIT_NEW_PCONST (cfg, *sp, NULL);
7713 /* now call the string ctor */
7714 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7716 MonoInst* callvirt_this_arg = NULL;
7718 if (cmethod->klass->valuetype) {
7719 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7720 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7721 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7726 * The code generated by mini_emit_virtual_call () expects
7727 * iargs [0] to be a boxed instance, but luckily the vcall
7728 * will be transformed into a normal call there.
7730 } else if (context_used) {
7731 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
7734 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7736 CHECK_TYPELOAD (cmethod->klass);
7739 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7740 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7741 * As a workaround, we call class cctors before allocating objects.
7743 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7744 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7745 if (cfg->verbose_level > 2)
7746 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7747 class_inits = g_slist_prepend (class_inits, vtable);
7750 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
7753 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
7756 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7758 /* Now call the actual ctor */
7759 /* Avoid virtual calls to ctors if possible */
7760 if (cmethod->klass->marshalbyref)
7761 callvirt_this_arg = sp [0];
7764 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7765 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7766 type_to_eval_stack_type ((cfg), fsig->ret, ins);
7771 CHECK_CFG_EXCEPTION;
7776 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7777 mono_method_check_inlining (cfg, cmethod) &&
7778 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7779 !g_list_find (dont_inline, cmethod)) {
7782 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7783 cfg->real_offset += 5;
7786 inline_costs += costs - 5;
7789 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7791 } else if (context_used &&
7792 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7793 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7794 MonoInst *cmethod_addr;
7796 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7797 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7799 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7802 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7803 callvirt_this_arg, NULL, vtable_arg);
7807 if (alloc == NULL) {
7809 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7810 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7824 token = read32 (ip + 1);
7825 klass = mini_get_class (method, token, generic_context);
7826 CHECK_TYPELOAD (klass);
7827 if (sp [0]->type != STACK_OBJ)
7830 if (cfg->generic_sharing_context)
7831 context_used = mono_class_check_context_used (klass);
7833 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7840 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7842 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7846 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7847 MonoMethod *mono_castclass;
7848 MonoInst *iargs [1];
7851 mono_castclass = mono_marshal_get_castclass (klass);
7854 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7855 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7856 g_assert (costs > 0);
7859 cfg->real_offset += 5;
7864 inline_costs += costs;
7867 ins = handle_castclass (cfg, klass, *sp, context_used);
7868 CHECK_CFG_EXCEPTION;
7878 token = read32 (ip + 1);
7879 klass = mini_get_class (method, token, generic_context);
7880 CHECK_TYPELOAD (klass);
7881 if (sp [0]->type != STACK_OBJ)
7884 if (cfg->generic_sharing_context)
7885 context_used = mono_class_check_context_used (klass);
7887 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7894 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7896 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7900 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7901 MonoMethod *mono_isinst;
7902 MonoInst *iargs [1];
7905 mono_isinst = mono_marshal_get_isinst (klass);
7908 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7909 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7910 g_assert (costs > 0);
7913 cfg->real_offset += 5;
7918 inline_costs += costs;
7921 ins = handle_isinst (cfg, klass, *sp, context_used);
7922 CHECK_CFG_EXCEPTION;
7929 case CEE_UNBOX_ANY: {
7933 token = read32 (ip + 1);
7934 klass = mini_get_class (method, token, generic_context);
7935 CHECK_TYPELOAD (klass);
7937 mono_save_token_info (cfg, image, token, klass);
7939 if (cfg->generic_sharing_context)
7940 context_used = mono_class_check_context_used (klass);
7942 if (generic_class_is_reference_type (cfg, klass)) {
7943 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
7944 if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7945 MonoMethod *mono_castclass;
7946 MonoInst *iargs [1];
7949 mono_castclass = mono_marshal_get_castclass (klass);
7952 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7953 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7955 g_assert (costs > 0);
7958 cfg->real_offset += 5;
7962 inline_costs += costs;
7964 ins = handle_castclass (cfg, klass, *sp, context_used);
7965 CHECK_CFG_EXCEPTION;
7973 if (mono_class_is_nullable (klass)) {
7974 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
7981 ins = handle_unbox (cfg, klass, sp, context_used);
7987 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
8000 token = read32 (ip + 1);
8001 klass = mini_get_class (method, token, generic_context);
8002 CHECK_TYPELOAD (klass);
8004 mono_save_token_info (cfg, image, token, klass);
8006 if (cfg->generic_sharing_context)
8007 context_used = mono_class_check_context_used (klass);
8009 if (generic_class_is_reference_type (cfg, klass)) {
8015 if (klass == mono_defaults.void_class)
8017 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
8019 /* frequent check in generic code: box (struct), brtrue */
8021 // FIXME: LLVM can't handle the inconsistent bb linking
8022 if (!mono_class_is_nullable (klass) &&
8023 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
8024 (ip [5] == CEE_BRTRUE ||
8025 ip [5] == CEE_BRTRUE_S ||
8026 ip [5] == CEE_BRFALSE ||
8027 ip [5] == CEE_BRFALSE_S)) {
8028 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
8030 MonoBasicBlock *true_bb, *false_bb;
8034 if (cfg->verbose_level > 3) {
8035 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8036 printf ("<box+brtrue opt>\n");
8044 target = ip + 1 + (signed char)(*ip);
8051 target = ip + 4 + (gint)(read32 (ip));
8055 g_assert_not_reached ();
8059 * We need to link both bblocks, since it is needed for handling stack
8060 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
8061 * Branching to only one of them would lead to inconsistencies, so
8062 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
8064 GET_BBLOCK (cfg, true_bb, target);
8065 GET_BBLOCK (cfg, false_bb, ip);
8067 mono_link_bblock (cfg, cfg->cbb, true_bb);
8068 mono_link_bblock (cfg, cfg->cbb, false_bb);
8070 if (sp != stack_start) {
8071 handle_stack_args (cfg, stack_start, sp - stack_start);
8073 CHECK_UNVERIFIABLE (cfg);
8076 if (COMPILE_LLVM (cfg)) {
8077 dreg = alloc_ireg (cfg);
8078 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
8079 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
8081 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
8083 /* The JIT can't eliminate the iconst+compare */
8084 MONO_INST_NEW (cfg, ins, OP_BR);
8085 ins->inst_target_bb = is_true ? true_bb : false_bb;
8086 MONO_ADD_INS (cfg->cbb, ins);
8089 start_new_bblock = 1;
8093 *sp++ = handle_box (cfg, val, klass, context_used);
8095 CHECK_CFG_EXCEPTION;
8104 token = read32 (ip + 1);
8105 klass = mini_get_class (method, token, generic_context);
8106 CHECK_TYPELOAD (klass);
8108 mono_save_token_info (cfg, image, token, klass);
8110 if (cfg->generic_sharing_context)
8111 context_used = mono_class_check_context_used (klass);
8113 if (mono_class_is_nullable (klass)) {
8116 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
8117 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
8121 ins = handle_unbox (cfg, klass, sp, context_used);
8131 MonoClassField *field;
8135 if (*ip == CEE_STFLD) {
8142 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
8144 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
8147 token = read32 (ip + 1);
8148 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8149 field = mono_method_get_wrapper_data (method, token);
8150 klass = field->parent;
8153 field = mono_field_from_token (image, token, &klass, generic_context);
8157 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8158 FIELD_ACCESS_FAILURE;
8159 mono_class_init (klass);
8161 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
8162 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
8163 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8164 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8167 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
8168 if (*ip == CEE_STFLD) {
8169 if (target_type_is_incompatible (cfg, field->type, sp [1]))
8171 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8172 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
8173 MonoInst *iargs [5];
8176 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8177 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8178 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
8182 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8183 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
8184 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8185 g_assert (costs > 0);
8187 cfg->real_offset += 5;
8190 inline_costs += costs;
8192 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
8197 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8199 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
8200 if (sp [0]->opcode != OP_LDADDR)
8201 store->flags |= MONO_INST_FAULT;
8203 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
8204 /* insert call to write barrier */
8208 dreg = alloc_preg (cfg);
8209 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8210 emit_write_barrier (cfg, ptr, sp [1], -1);
8213 store->flags |= ins_flag;
8220 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8221 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
8222 MonoInst *iargs [4];
8225 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8226 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8227 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
8228 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8229 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
8230 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8232 g_assert (costs > 0);
8234 cfg->real_offset += 5;
8238 inline_costs += costs;
8240 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
8244 if (sp [0]->type == STACK_VTYPE) {
8247 /* Have to compute the address of the variable */
8249 var = get_vreg_to_inst (cfg, sp [0]->dreg);
8251 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
8253 g_assert (var->klass == klass);
8255 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
8259 if (*ip == CEE_LDFLDA) {
8260 if (sp [0]->type == STACK_OBJ) {
8261 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
8262 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
8265 dreg = alloc_preg (cfg);
8267 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8268 ins->klass = mono_class_from_mono_type (field->type);
8269 ins->type = STACK_MP;
8274 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8276 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
8277 load->flags |= ins_flag;
8278 if (sp [0]->opcode != OP_LDADDR)
8279 load->flags |= MONO_INST_FAULT;
8290 MonoClassField *field;
8291 gpointer addr = NULL;
8292 gboolean is_special_static;
8295 token = read32 (ip + 1);
8297 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8298 field = mono_method_get_wrapper_data (method, token);
8299 klass = field->parent;
8302 field = mono_field_from_token (image, token, &klass, generic_context);
8305 mono_class_init (klass);
8306 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8307 FIELD_ACCESS_FAILURE;
8309 /* if the class is Critical then transparent code cannot access it's fields */
8310 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8311 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8314 * We can only support shared generic static
8315 * field access on architectures where the
8316 * trampoline code has been extended to handle
8317 * the generic class init.
8319 #ifndef MONO_ARCH_VTABLE_REG
8320 GENERIC_SHARING_FAILURE (*ip);
8323 if (cfg->generic_sharing_context)
8324 context_used = mono_class_check_context_used (klass);
8326 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
8328 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8329 * to be called here.
8331 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8332 mono_class_vtable (cfg->domain, klass);
8333 CHECK_TYPELOAD (klass);
8335 mono_domain_lock (cfg->domain);
8336 if (cfg->domain->special_static_fields)
8337 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8338 mono_domain_unlock (cfg->domain);
8340 is_special_static = mono_class_field_is_special_static (field);
8342 /* Generate IR to compute the field address */
8343 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
8345 * Fast access to TLS data
8346 * Inline version of get_thread_static_data () in
8350 int idx, static_data_reg, array_reg, dreg;
8351 MonoInst *thread_ins;
8353 // offset &= 0x7fffffff;
8354 // idx = (offset >> 24) - 1;
8355 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
8357 thread_ins = mono_get_thread_intrinsic (cfg);
8358 MONO_ADD_INS (cfg->cbb, thread_ins);
8359 static_data_reg = alloc_ireg (cfg);
8360 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
8362 if (cfg->compile_aot) {
8363 int offset_reg, offset2_reg, idx_reg;
8365 /* For TLS variables, this will return the TLS offset */
8366 EMIT_NEW_SFLDACONST (cfg, ins, field);
8367 offset_reg = ins->dreg;
8368 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
8369 idx_reg = alloc_ireg (cfg);
8370 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
8371 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
8372 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
8373 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
8374 array_reg = alloc_ireg (cfg);
8375 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
8376 offset2_reg = alloc_ireg (cfg);
8377 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
8378 dreg = alloc_ireg (cfg);
8379 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
8381 offset = (gsize)addr & 0x7fffffff;
8382 idx = (offset >> 24) - 1;
8384 array_reg = alloc_ireg (cfg);
8385 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
8386 dreg = alloc_ireg (cfg);
8387 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
8389 } else if ((cfg->opt & MONO_OPT_SHARED) ||
8390 (cfg->compile_aot && is_special_static) ||
8391 (context_used && is_special_static)) {
8392 MonoInst *iargs [2];
8394 g_assert (field->parent);
8395 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8397 iargs [1] = emit_get_rgctx_field (cfg, context_used,
8398 field, MONO_RGCTX_INFO_CLASS_FIELD);
8400 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8402 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8403 } else if (context_used) {
8404 MonoInst *static_data;
8407 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8408 method->klass->name_space, method->klass->name, method->name,
8409 depth, field->offset);
8412 if (mono_class_needs_cctor_run (klass, method))
8413 emit_generic_class_init (cfg, klass);
8416 * The pointer we're computing here is
8418 * super_info.static_data + field->offset
8420 static_data = emit_get_rgctx_klass (cfg, context_used,
8421 klass, MONO_RGCTX_INFO_STATIC_DATA);
8423 if (field->offset == 0) {
8426 int addr_reg = mono_alloc_preg (cfg);
8427 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8429 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8430 MonoInst *iargs [2];
8432 g_assert (field->parent);
8433 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8434 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8435 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8437 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8439 CHECK_TYPELOAD (klass);
8441 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8442 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8443 if (cfg->verbose_level > 2)
8444 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8445 class_inits = g_slist_prepend (class_inits, vtable);
8447 if (cfg->run_cctors) {
8449 /* This makes so that inline cannot trigger */
8450 /* .cctors: too many apps depend on them */
8451 /* running with a specific order... */
8452 if (! vtable->initialized)
8454 ex = mono_runtime_class_init_full (vtable, FALSE);
8456 set_exception_object (cfg, ex);
8457 goto exception_exit;
8461 addr = (char*)vtable->data + field->offset;
8463 if (cfg->compile_aot)
8464 EMIT_NEW_SFLDACONST (cfg, ins, field);
8466 EMIT_NEW_PCONST (cfg, ins, addr);
8468 MonoInst *iargs [1];
8469 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8470 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8474 /* Generate IR to do the actual load/store operation */
8476 if (*ip == CEE_LDSFLDA) {
8477 ins->klass = mono_class_from_mono_type (field->type);
8478 ins->type = STACK_PTR;
8480 } else if (*ip == CEE_STSFLD) {
8485 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
8486 store->flags |= ins_flag;
8488 gboolean is_const = FALSE;
8489 MonoVTable *vtable = NULL;
8491 if (!context_used) {
8492 vtable = mono_class_vtable (cfg->domain, klass);
8493 CHECK_TYPELOAD (klass);
8495 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8496 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8497 gpointer addr = (char*)vtable->data + field->offset;
8498 int ro_type = field->type->type;
8499 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
8500 ro_type = mono_class_enum_basetype (field->type->data.klass)->type;
8502 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8505 case MONO_TYPE_BOOLEAN:
8507 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8511 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8514 case MONO_TYPE_CHAR:
8516 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8520 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8525 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8529 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8535 case MONO_TYPE_FNPTR:
8536 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8537 type_to_eval_stack_type ((cfg), field->type, *sp);
8540 case MONO_TYPE_STRING:
8541 case MONO_TYPE_OBJECT:
8542 case MONO_TYPE_CLASS:
8543 case MONO_TYPE_SZARRAY:
8544 case MONO_TYPE_ARRAY:
8545 if (!mono_gc_is_moving ()) {
8546 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8547 type_to_eval_stack_type ((cfg), field->type, *sp);
8555 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8560 case MONO_TYPE_VALUETYPE:
8570 CHECK_STACK_OVF (1);
8572 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8573 load->flags |= ins_flag;
8586 token = read32 (ip + 1);
8587 klass = mini_get_class (method, token, generic_context);
8588 CHECK_TYPELOAD (klass);
8589 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8590 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8591 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
8592 generic_class_is_reference_type (cfg, klass)) {
8593 /* insert call to write barrier */
8594 emit_write_barrier (cfg, sp [0], sp [1], -1);
8606 const char *data_ptr;
8608 guint32 field_token;
8614 token = read32 (ip + 1);
8616 klass = mini_get_class (method, token, generic_context);
8617 CHECK_TYPELOAD (klass);
8619 if (cfg->generic_sharing_context)
8620 context_used = mono_class_check_context_used (klass);
8622 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
8623 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
8624 ins->sreg1 = sp [0]->dreg;
8625 ins->type = STACK_I4;
8626 ins->dreg = alloc_ireg (cfg);
8627 MONO_ADD_INS (cfg->cbb, ins);
8628 *sp = mono_decompose_opcode (cfg, ins);
8633 MonoClass *array_class = mono_array_class_get (klass, 1);
8634 /* FIXME: we cannot get a managed
8635 allocator because we can't get the
8636 open generic class's vtable. We
8637 have the same problem in
8638 handle_alloc(). This
8639 needs to be solved so that we can
8640 have managed allocs of shared
8643 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
8644 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
8646 MonoMethod *managed_alloc = NULL;
8648 /* FIXME: Decompose later to help abcrem */
8651 args [0] = emit_get_rgctx_klass (cfg, context_used,
8652 array_class, MONO_RGCTX_INFO_VTABLE);
8657 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
8659 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8661 if (cfg->opt & MONO_OPT_SHARED) {
8662 /* Decompose now to avoid problems with references to the domainvar */
8663 MonoInst *iargs [3];
8665 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8666 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8669 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8671 /* Decompose later since it is needed by abcrem */
8672 MonoClass *array_type = mono_array_class_get (klass, 1);
8673 mono_class_vtable (cfg->domain, array_type);
8674 CHECK_TYPELOAD (array_type);
8676 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8677 ins->dreg = alloc_preg (cfg);
8678 ins->sreg1 = sp [0]->dreg;
8679 ins->inst_newa_class = klass;
8680 ins->type = STACK_OBJ;
8682 MONO_ADD_INS (cfg->cbb, ins);
8683 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8684 cfg->cbb->has_array_access = TRUE;
8686 /* Needed so mono_emit_load_get_addr () gets called */
8687 mono_get_got_var (cfg);
8697 * we inline/optimize the initialization sequence if possible.
8698 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8699 * for small sizes open code the memcpy
8700 * ensure the rva field is big enough
8702 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8703 MonoMethod *memcpy_method = get_memcpy_method ();
8704 MonoInst *iargs [3];
8705 int add_reg = alloc_preg (cfg);
8707 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8708 if (cfg->compile_aot) {
8709 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8711 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8713 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8714 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8723 if (sp [0]->type != STACK_OBJ)
8726 dreg = alloc_preg (cfg);
8727 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8728 ins->dreg = alloc_preg (cfg);
8729 ins->sreg1 = sp [0]->dreg;
8730 ins->type = STACK_I4;
8731 /* This flag will be inherited by the decomposition */
8732 ins->flags |= MONO_INST_FAULT;
8733 MONO_ADD_INS (cfg->cbb, ins);
8734 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8735 cfg->cbb->has_array_access = TRUE;
8743 if (sp [0]->type != STACK_OBJ)
8746 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8748 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8749 CHECK_TYPELOAD (klass);
8750 /* we need to make sure that this array is exactly the type it needs
8751 * to be for correctness. the wrappers are lax with their usage
8752 * so we need to ignore them here
8754 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
8755 MonoClass *array_class = mono_array_class_get (klass, 1);
8756 mini_emit_check_array_type (cfg, sp [0], array_class);
8757 CHECK_TYPELOAD (array_class);
8761 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8776 case CEE_LDELEM_REF: {
8782 if (*ip == CEE_LDELEM) {
8784 token = read32 (ip + 1);
8785 klass = mini_get_class (method, token, generic_context);
8786 CHECK_TYPELOAD (klass);
8787 mono_class_init (klass);
8790 klass = array_access_to_klass (*ip);
8792 if (sp [0]->type != STACK_OBJ)
8795 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8797 if (sp [1]->opcode == OP_ICONST) {
8798 int array_reg = sp [0]->dreg;
8799 int index_reg = sp [1]->dreg;
8800 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8802 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8803 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8805 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8806 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8809 if (*ip == CEE_LDELEM)
8822 case CEE_STELEM_REF:
8829 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8831 if (*ip == CEE_STELEM) {
8833 token = read32 (ip + 1);
8834 klass = mini_get_class (method, token, generic_context);
8835 CHECK_TYPELOAD (klass);
8836 mono_class_init (klass);
8839 klass = array_access_to_klass (*ip);
8841 if (sp [0]->type != STACK_OBJ)
8844 /* storing a NULL doesn't need any of the complex checks in stelemref */
8845 if (generic_class_is_reference_type (cfg, klass) &&
8846 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8847 MonoMethod* helper = mono_marshal_get_stelemref ();
8848 MonoInst *iargs [3];
8850 if (sp [0]->type != STACK_OBJ)
8852 if (sp [2]->type != STACK_OBJ)
8859 mono_emit_method_call (cfg, helper, iargs, NULL);
8861 if (sp [1]->opcode == OP_ICONST) {
8862 int array_reg = sp [0]->dreg;
8863 int index_reg = sp [1]->dreg;
8864 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8866 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8867 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8869 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8870 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8874 if (*ip == CEE_STELEM)
8881 case CEE_CKFINITE: {
8885 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8886 ins->sreg1 = sp [0]->dreg;
8887 ins->dreg = alloc_freg (cfg);
8888 ins->type = STACK_R8;
8889 MONO_ADD_INS (bblock, ins);
8891 *sp++ = mono_decompose_opcode (cfg, ins);
8896 case CEE_REFANYVAL: {
8897 MonoInst *src_var, *src;
8899 int klass_reg = alloc_preg (cfg);
8900 int dreg = alloc_preg (cfg);
8903 MONO_INST_NEW (cfg, ins, *ip);
8906 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8907 CHECK_TYPELOAD (klass);
8908 mono_class_init (klass);
8910 if (cfg->generic_sharing_context)
8911 context_used = mono_class_check_context_used (klass);
8914 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8916 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8917 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8918 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8921 MonoInst *klass_ins;
8923 klass_ins = emit_get_rgctx_klass (cfg, context_used,
8924 klass, MONO_RGCTX_INFO_KLASS);
8927 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8928 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8930 mini_emit_class_check (cfg, klass_reg, klass);
8932 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8933 ins->type = STACK_MP;
8938 case CEE_MKREFANY: {
8939 MonoInst *loc, *addr;
8942 MONO_INST_NEW (cfg, ins, *ip);
8945 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8946 CHECK_TYPELOAD (klass);
8947 mono_class_init (klass);
8949 if (cfg->generic_sharing_context)
8950 context_used = mono_class_check_context_used (klass);
8952 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8953 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8956 MonoInst *const_ins;
8957 int type_reg = alloc_preg (cfg);
8959 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
8960 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8961 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8962 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8963 } else if (cfg->compile_aot) {
8964 int const_reg = alloc_preg (cfg);
8965 int type_reg = alloc_preg (cfg);
8967 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8968 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8969 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8970 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8972 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8973 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8975 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8977 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8978 ins->type = STACK_VTYPE;
8979 ins->klass = mono_defaults.typed_reference_class;
8986 MonoClass *handle_class;
8988 CHECK_STACK_OVF (1);
8991 n = read32 (ip + 1);
8993 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
8994 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8995 handle = mono_method_get_wrapper_data (method, n);
8996 handle_class = mono_method_get_wrapper_data (method, n + 1);
8997 if (handle_class == mono_defaults.typehandle_class)
8998 handle = &((MonoClass*)handle)->byval_arg;
9001 handle = mono_ldtoken (image, n, &handle_class, generic_context);
9005 mono_class_init (handle_class);
9006 if (cfg->generic_sharing_context) {
9007 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
9008 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
9009 /* This case handles ldtoken
9010 of an open type, like for
9013 } else if (handle_class == mono_defaults.typehandle_class) {
9014 /* If we get a MONO_TYPE_CLASS
9015 then we need to provide the
9017 instantiation of it. */
9018 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
9021 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
9022 } else if (handle_class == mono_defaults.fieldhandle_class)
9023 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
9024 else if (handle_class == mono_defaults.methodhandle_class)
9025 context_used = mono_method_check_context_used (handle);
9027 g_assert_not_reached ();
9030 if ((cfg->opt & MONO_OPT_SHARED) &&
9031 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
9032 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
9033 MonoInst *addr, *vtvar, *iargs [3];
9034 int method_context_used;
9036 if (cfg->generic_sharing_context)
9037 method_context_used = mono_method_check_context_used (method);
9039 method_context_used = 0;
9041 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9043 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9044 EMIT_NEW_ICONST (cfg, iargs [1], n);
9045 if (method_context_used) {
9046 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
9047 method, MONO_RGCTX_INFO_METHOD);
9048 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
9050 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
9051 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
9053 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9055 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9057 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9059 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
9060 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
9061 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
9062 (cmethod->klass == mono_defaults.monotype_class->parent) &&
9063 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
9064 MonoClass *tclass = mono_class_from_mono_type (handle);
9066 mono_class_init (tclass);
9068 ins = emit_get_rgctx_klass (cfg, context_used,
9069 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
9070 } else if (cfg->compile_aot) {
9071 if (method->wrapper_type) {
9072 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
9073 /* Special case for static synchronized wrappers */
9074 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
9076 /* FIXME: n is not a normal token */
9077 cfg->disable_aot = TRUE;
9078 EMIT_NEW_PCONST (cfg, ins, NULL);
9081 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
9084 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
9086 ins->type = STACK_OBJ;
9087 ins->klass = cmethod->klass;
9090 MonoInst *addr, *vtvar;
9092 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9095 if (handle_class == mono_defaults.typehandle_class) {
9096 ins = emit_get_rgctx_klass (cfg, context_used,
9097 mono_class_from_mono_type (handle),
9098 MONO_RGCTX_INFO_TYPE);
9099 } else if (handle_class == mono_defaults.methodhandle_class) {
9100 ins = emit_get_rgctx_method (cfg, context_used,
9101 handle, MONO_RGCTX_INFO_METHOD);
9102 } else if (handle_class == mono_defaults.fieldhandle_class) {
9103 ins = emit_get_rgctx_field (cfg, context_used,
9104 handle, MONO_RGCTX_INFO_CLASS_FIELD);
9106 g_assert_not_reached ();
9108 } else if (cfg->compile_aot) {
9109 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
9111 EMIT_NEW_PCONST (cfg, ins, handle);
9113 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9114 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9115 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9125 MONO_INST_NEW (cfg, ins, OP_THROW);
9127 ins->sreg1 = sp [0]->dreg;
9129 bblock->out_of_line = TRUE;
9130 MONO_ADD_INS (bblock, ins);
9131 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
9132 MONO_ADD_INS (bblock, ins);
9135 link_bblock (cfg, bblock, end_bblock);
9136 start_new_bblock = 1;
9138 case CEE_ENDFINALLY:
9139 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
9140 MONO_ADD_INS (bblock, ins);
9142 start_new_bblock = 1;
9145 * Control will leave the method so empty the stack, otherwise
9146 * the next basic block will start with a nonempty stack.
9148 while (sp != stack_start) {
9156 if (*ip == CEE_LEAVE) {
9158 target = ip + 5 + (gint32)read32(ip + 1);
9161 target = ip + 2 + (signed char)(ip [1]);
9164 /* empty the stack */
9165 while (sp != stack_start) {
9170 * If this leave statement is in a catch block, check for a
9171 * pending exception, and rethrow it if necessary.
9172 * We avoid doing this in runtime invoke wrappers, since those are called
9173 * by native code which excepts the wrapper to catch all exceptions.
9175 for (i = 0; i < header->num_clauses; ++i) {
9176 MonoExceptionClause *clause = &header->clauses [i];
9179 * Use <= in the final comparison to handle clauses with multiple
9180 * leave statements, like in bug #78024.
9181 * The ordering of the exception clauses guarantees that we find the
9184 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
9186 MonoBasicBlock *dont_throw;
9191 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
9194 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
9196 NEW_BBLOCK (cfg, dont_throw);
9199 * Currently, we allways rethrow the abort exception, despite the
9200 * fact that this is not correct. See thread6.cs for an example.
9201 * But propagating the abort exception is more important than
9202 * getting the sematics right.
9204 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
9205 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
9206 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
9208 MONO_START_BB (cfg, dont_throw);
9213 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
9215 MonoExceptionClause *clause;
9217 for (tmp = handlers; tmp; tmp = tmp->next) {
9219 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
9221 link_bblock (cfg, bblock, tblock);
9222 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
9223 ins->inst_target_bb = tblock;
9224 ins->inst_eh_block = clause;
9225 MONO_ADD_INS (bblock, ins);
9226 bblock->has_call_handler = 1;
9227 if (COMPILE_LLVM (cfg)) {
9228 MonoBasicBlock *target_bb;
9231 * Link the finally bblock with the target, since it will
9232 * conceptually branch there.
9233 * FIXME: Have to link the bblock containing the endfinally.
9235 GET_BBLOCK (cfg, target_bb, target);
9236 link_bblock (cfg, tblock, target_bb);
9239 g_list_free (handlers);
9242 MONO_INST_NEW (cfg, ins, OP_BR);
9243 MONO_ADD_INS (bblock, ins);
9244 GET_BBLOCK (cfg, tblock, target);
9245 link_bblock (cfg, bblock, tblock);
9246 ins->inst_target_bb = tblock;
9247 start_new_bblock = 1;
9249 if (*ip == CEE_LEAVE)
9258 * Mono specific opcodes
9260 case MONO_CUSTOM_PREFIX: {
9262 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
9266 case CEE_MONO_ICALL: {
9268 MonoJitICallInfo *info;
9270 token = read32 (ip + 2);
9271 func = mono_method_get_wrapper_data (method, token);
9272 info = mono_find_jit_icall_by_addr (func);
9275 CHECK_STACK (info->sig->param_count);
9276 sp -= info->sig->param_count;
9278 ins = mono_emit_jit_icall (cfg, info->func, sp);
9279 if (!MONO_TYPE_IS_VOID (info->sig->ret))
9283 inline_costs += 10 * num_calls++;
9287 case CEE_MONO_LDPTR: {
9290 CHECK_STACK_OVF (1);
9292 token = read32 (ip + 2);
9294 ptr = mono_method_get_wrapper_data (method, token);
9295 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
9296 MonoJitICallInfo *callinfo;
9297 const char *icall_name;
9299 icall_name = method->name + strlen ("__icall_wrapper_");
9300 g_assert (icall_name);
9301 callinfo = mono_find_jit_icall_by_name (icall_name);
9302 g_assert (callinfo);
9304 if (ptr == callinfo->func) {
9305 /* Will be transformed into an AOTCONST later */
9306 EMIT_NEW_PCONST (cfg, ins, ptr);
9312 /* FIXME: Generalize this */
9313 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
9314 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
9319 EMIT_NEW_PCONST (cfg, ins, ptr);
9322 inline_costs += 10 * num_calls++;
9323 /* Can't embed random pointers into AOT code */
9324 cfg->disable_aot = 1;
9327 case CEE_MONO_ICALL_ADDR: {
9328 MonoMethod *cmethod;
9331 CHECK_STACK_OVF (1);
9333 token = read32 (ip + 2);
9335 cmethod = mono_method_get_wrapper_data (method, token);
9337 if (cfg->compile_aot) {
9338 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
9340 ptr = mono_lookup_internal_call (cmethod);
9342 EMIT_NEW_PCONST (cfg, ins, ptr);
9348 case CEE_MONO_VTADDR: {
9349 MonoInst *src_var, *src;
9355 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9356 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
9361 case CEE_MONO_NEWOBJ: {
9362 MonoInst *iargs [2];
9364 CHECK_STACK_OVF (1);
9366 token = read32 (ip + 2);
9367 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9368 mono_class_init (klass);
9369 NEW_DOMAINCONST (cfg, iargs [0]);
9370 MONO_ADD_INS (cfg->cbb, iargs [0]);
9371 NEW_CLASSCONST (cfg, iargs [1], klass);
9372 MONO_ADD_INS (cfg->cbb, iargs [1]);
9373 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
9375 inline_costs += 10 * num_calls++;
9378 case CEE_MONO_OBJADDR:
9381 MONO_INST_NEW (cfg, ins, OP_MOVE);
9382 ins->dreg = alloc_preg (cfg);
9383 ins->sreg1 = sp [0]->dreg;
9384 ins->type = STACK_MP;
9385 MONO_ADD_INS (cfg->cbb, ins);
9389 case CEE_MONO_LDNATIVEOBJ:
9391 * Similar to LDOBJ, but instead load the unmanaged
9392 * representation of the vtype to the stack.
9397 token = read32 (ip + 2);
9398 klass = mono_method_get_wrapper_data (method, token);
9399 g_assert (klass->valuetype);
9400 mono_class_init (klass);
9403 MonoInst *src, *dest, *temp;
9406 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
9407 temp->backend.is_pinvoke = 1;
9408 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
9409 mini_emit_stobj (cfg, dest, src, klass, TRUE);
9411 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
9412 dest->type = STACK_VTYPE;
9413 dest->klass = klass;
9419 case CEE_MONO_RETOBJ: {
9421 * Same as RET, but return the native representation of a vtype
9424 g_assert (cfg->ret);
9425 g_assert (mono_method_signature (method)->pinvoke);
9430 token = read32 (ip + 2);
9431 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9433 if (!cfg->vret_addr) {
9434 g_assert (cfg->ret_var_is_local);
9436 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
9438 EMIT_NEW_RETLOADA (cfg, ins);
9440 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
9442 if (sp != stack_start)
9445 MONO_INST_NEW (cfg, ins, OP_BR);
9446 ins->inst_target_bb = end_bblock;
9447 MONO_ADD_INS (bblock, ins);
9448 link_bblock (cfg, bblock, end_bblock);
9449 start_new_bblock = 1;
9453 case CEE_MONO_CISINST:
9454 case CEE_MONO_CCASTCLASS: {
9459 token = read32 (ip + 2);
9460 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9461 if (ip [1] == CEE_MONO_CISINST)
9462 ins = handle_cisinst (cfg, klass, sp [0]);
9464 ins = handle_ccastclass (cfg, klass, sp [0]);
9470 case CEE_MONO_SAVE_LMF:
9471 case CEE_MONO_RESTORE_LMF:
9472 #ifdef MONO_ARCH_HAVE_LMF_OPS
9473 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9474 MONO_ADD_INS (bblock, ins);
9475 cfg->need_lmf_area = TRUE;
9479 case CEE_MONO_CLASSCONST:
9480 CHECK_STACK_OVF (1);
9482 token = read32 (ip + 2);
9483 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9486 inline_costs += 10 * num_calls++;
9488 case CEE_MONO_NOT_TAKEN:
9489 bblock->out_of_line = TRUE;
9493 CHECK_STACK_OVF (1);
9495 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9496 ins->dreg = alloc_preg (cfg);
9497 ins->inst_offset = (gint32)read32 (ip + 2);
9498 ins->type = STACK_PTR;
9499 MONO_ADD_INS (bblock, ins);
9503 case CEE_MONO_DYN_CALL: {
9506 /* It would be easier to call a trampoline, but that would put an
9507 * extra frame on the stack, confusing exception handling. So
9508 * implement it inline using an opcode for now.
9511 if (!cfg->dyn_call_var) {
9512 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9513 /* prevent it from being register allocated */
9514 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
9517 /* Has to use a call inst since it local regalloc expects it */
9518 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
9519 ins = (MonoInst*)call;
9521 ins->sreg1 = sp [0]->dreg;
9522 ins->sreg2 = sp [1]->dreg;
9523 MONO_ADD_INS (bblock, ins);
9525 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
9526 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
9530 inline_costs += 10 * num_calls++;
9535 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9545 /* somewhat similar to LDTOKEN */
9546 MonoInst *addr, *vtvar;
9547 CHECK_STACK_OVF (1);
9548 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9550 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9551 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9553 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9554 ins->type = STACK_VTYPE;
9555 ins->klass = mono_defaults.argumenthandle_class;
9568 * The following transforms:
9569 * CEE_CEQ into OP_CEQ
9570 * CEE_CGT into OP_CGT
9571 * CEE_CGT_UN into OP_CGT_UN
9572 * CEE_CLT into OP_CLT
9573 * CEE_CLT_UN into OP_CLT_UN
9575 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9577 MONO_INST_NEW (cfg, ins, cmp->opcode);
9579 cmp->sreg1 = sp [0]->dreg;
9580 cmp->sreg2 = sp [1]->dreg;
9581 type_from_op (cmp, sp [0], sp [1]);
9583 if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9584 cmp->opcode = OP_LCOMPARE;
9585 else if (sp [0]->type == STACK_R8)
9586 cmp->opcode = OP_FCOMPARE;
9588 cmp->opcode = OP_ICOMPARE;
9589 MONO_ADD_INS (bblock, cmp);
9590 ins->type = STACK_I4;
9591 ins->dreg = alloc_dreg (cfg, ins->type);
9592 type_from_op (ins, sp [0], sp [1]);
9594 if (cmp->opcode == OP_FCOMPARE) {
9596 * The backends expect the fceq opcodes to do the
9599 cmp->opcode = OP_NOP;
9600 ins->sreg1 = cmp->sreg1;
9601 ins->sreg2 = cmp->sreg2;
9603 MONO_ADD_INS (bblock, ins);
9610 MonoMethod *cil_method;
9611 gboolean needs_static_rgctx_invoke;
9613 CHECK_STACK_OVF (1);
9615 n = read32 (ip + 2);
9616 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9617 if (!cmethod || mono_loader_get_last_error ())
9619 mono_class_init (cmethod->klass);
9621 mono_save_token_info (cfg, image, n, cmethod);
9623 if (cfg->generic_sharing_context)
9624 context_used = mono_method_check_context_used (cmethod);
9626 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9628 cil_method = cmethod;
9629 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9630 METHOD_ACCESS_FAILURE;
9632 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9633 if (check_linkdemand (cfg, method, cmethod))
9635 CHECK_CFG_EXCEPTION;
9636 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9637 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9641 * Optimize the common case of ldftn+delegate creation
9643 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9644 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9645 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9647 int invoke_context_used = 0;
9649 invoke = mono_get_delegate_invoke (ctor_method->klass);
9650 if (!invoke || !mono_method_signature (invoke))
9653 if (cfg->generic_sharing_context)
9654 invoke_context_used = mono_method_check_context_used (invoke);
9656 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
9657 /* FIXME: SGEN support */
9658 if (!cfg->gen_write_barriers && invoke_context_used == 0) {
9659 MonoInst *target_ins;
9662 if (cfg->verbose_level > 3)
9663 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9664 target_ins = sp [-1];
9666 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
9667 CHECK_CFG_EXCEPTION;
9676 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
9677 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
9681 inline_costs += 10 * num_calls++;
9684 case CEE_LDVIRTFTN: {
9689 n = read32 (ip + 2);
9690 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9691 if (!cmethod || mono_loader_get_last_error ())
9693 mono_class_init (cmethod->klass);
9695 if (cfg->generic_sharing_context)
9696 context_used = mono_method_check_context_used (cmethod);
9698 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9699 if (check_linkdemand (cfg, method, cmethod))
9701 CHECK_CFG_EXCEPTION;
9702 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9703 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9709 args [1] = emit_get_rgctx_method (cfg, context_used,
9710 cmethod, MONO_RGCTX_INFO_METHOD);
9713 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
9715 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
9718 inline_costs += 10 * num_calls++;
9722 CHECK_STACK_OVF (1);
9724 n = read16 (ip + 2);
9726 EMIT_NEW_ARGLOAD (cfg, ins, n);
9731 CHECK_STACK_OVF (1);
9733 n = read16 (ip + 2);
9735 NEW_ARGLOADA (cfg, ins, n);
9736 MONO_ADD_INS (cfg->cbb, ins);
9744 n = read16 (ip + 2);
9746 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9748 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9752 CHECK_STACK_OVF (1);
9754 n = read16 (ip + 2);
9756 EMIT_NEW_LOCLOAD (cfg, ins, n);
9761 unsigned char *tmp_ip;
9762 CHECK_STACK_OVF (1);
9764 n = read16 (ip + 2);
9767 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9773 EMIT_NEW_LOCLOADA (cfg, ins, n);
9782 n = read16 (ip + 2);
9784 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9786 emit_stloc_ir (cfg, sp, header, n);
9793 if (sp != stack_start)
9795 if (cfg->method != method)
9797 * Inlining this into a loop in a parent could lead to
9798 * stack overflows which is different behavior than the
9799 * non-inlined case, thus disable inlining in this case.
9801 goto inline_failure;
9803 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9804 ins->dreg = alloc_preg (cfg);
9805 ins->sreg1 = sp [0]->dreg;
9806 ins->type = STACK_PTR;
9807 MONO_ADD_INS (cfg->cbb, ins);
9809 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9811 ins->flags |= MONO_INST_INIT;
9816 case CEE_ENDFILTER: {
9817 MonoExceptionClause *clause, *nearest;
9818 int cc, nearest_num;
9822 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9824 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9825 ins->sreg1 = (*sp)->dreg;
9826 MONO_ADD_INS (bblock, ins);
9827 start_new_bblock = 1;
9832 for (cc = 0; cc < header->num_clauses; ++cc) {
9833 clause = &header->clauses [cc];
9834 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9835 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9836 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9842 if ((ip - header->code) != nearest->handler_offset)
9847 case CEE_UNALIGNED_:
9848 ins_flag |= MONO_INST_UNALIGNED;
9849 /* FIXME: record alignment? we can assume 1 for now */
9854 ins_flag |= MONO_INST_VOLATILE;
9858 ins_flag |= MONO_INST_TAILCALL;
9859 cfg->flags |= MONO_CFG_HAS_TAIL;
9860 /* Can't inline tail calls at this time */
9861 inline_costs += 100000;
9868 token = read32 (ip + 2);
9869 klass = mini_get_class (method, token, generic_context);
9870 CHECK_TYPELOAD (klass);
9871 if (generic_class_is_reference_type (cfg, klass))
9872 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9874 mini_emit_initobj (cfg, *sp, NULL, klass);
9878 case CEE_CONSTRAINED_:
9880 token = read32 (ip + 2);
9881 if (method->wrapper_type != MONO_WRAPPER_NONE)
9882 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
9884 constrained_call = mono_class_get_full (image, token, generic_context);
9885 CHECK_TYPELOAD (constrained_call);
9890 MonoInst *iargs [3];
9894 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9895 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9896 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9897 /* emit_memset only works when val == 0 */
9898 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9903 if (ip [1] == CEE_CPBLK) {
9904 MonoMethod *memcpy_method = get_memcpy_method ();
9905 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9907 MonoMethod *memset_method = get_memset_method ();
9908 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9918 ins_flag |= MONO_INST_NOTYPECHECK;
9920 ins_flag |= MONO_INST_NORANGECHECK;
9921 /* we ignore the no-nullcheck for now since we
9922 * really do it explicitly only when doing callvirt->call
9928 int handler_offset = -1;
9930 for (i = 0; i < header->num_clauses; ++i) {
9931 MonoExceptionClause *clause = &header->clauses [i];
9932 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9933 handler_offset = clause->handler_offset;
9938 bblock->flags |= BB_EXCEPTION_UNSAFE;
9940 g_assert (handler_offset != -1);
9942 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9943 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9944 ins->sreg1 = load->dreg;
9945 MONO_ADD_INS (bblock, ins);
9947 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
9948 MONO_ADD_INS (bblock, ins);
9951 link_bblock (cfg, bblock, end_bblock);
9952 start_new_bblock = 1;
9960 CHECK_STACK_OVF (1);
9962 token = read32 (ip + 2);
9963 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic) {
9964 MonoType *type = mono_type_create_from_typespec (image, token);
9965 token = mono_type_size (type, &ialign);
9967 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9968 CHECK_TYPELOAD (klass);
9969 mono_class_init (klass);
9970 token = mono_class_value_size (klass, &align);
9972 EMIT_NEW_ICONST (cfg, ins, token);
9977 case CEE_REFANYTYPE: {
9978 MonoInst *src_var, *src;
9984 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9986 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9987 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9988 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
10006 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
10016 g_warning ("opcode 0x%02x not handled", *ip);
10020 if (start_new_bblock != 1)
10023 bblock->cil_length = ip - bblock->cil_code;
10024 bblock->next_bb = end_bblock;
10026 if (cfg->method == method && cfg->domainvar) {
10028 MonoInst *get_domain;
10030 cfg->cbb = init_localsbb;
10032 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
10033 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
10036 get_domain->dreg = alloc_preg (cfg);
10037 MONO_ADD_INS (cfg->cbb, get_domain);
10039 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
10040 MONO_ADD_INS (cfg->cbb, store);
10043 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
10044 if (cfg->compile_aot)
10045 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
10046 mono_get_got_var (cfg);
10049 if (cfg->method == method && cfg->got_var)
10050 mono_emit_load_got_addr (cfg);
10055 cfg->cbb = init_localsbb;
10057 for (i = 0; i < header->num_locals; ++i) {
10058 MonoType *ptype = header->locals [i];
10059 int t = ptype->type;
10060 dreg = cfg->locals [i]->dreg;
10062 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
10063 t = mono_class_enum_basetype (ptype->data.klass)->type;
10064 if (ptype->byref) {
10065 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10066 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
10067 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
10068 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
10069 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
10070 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
10071 MONO_INST_NEW (cfg, ins, OP_R8CONST);
10072 ins->type = STACK_R8;
10073 ins->inst_p0 = (void*)&r8_0;
10074 ins->dreg = alloc_dreg (cfg, STACK_R8);
10075 MONO_ADD_INS (init_localsbb, ins);
10076 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
10077 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
10078 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
10079 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
10081 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10086 if (cfg->init_ref_vars && cfg->method == method) {
10087 /* Emit initialization for ref vars */
10088 // FIXME: Avoid duplication initialization for IL locals.
10089 for (i = 0; i < cfg->num_varinfo; ++i) {
10090 MonoInst *ins = cfg->varinfo [i];
10092 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
10093 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
10097 /* Add a sequence point for method entry/exit events */
10099 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
10100 MONO_ADD_INS (init_localsbb, ins);
10101 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
10102 MONO_ADD_INS (cfg->bb_exit, ins);
10107 if (cfg->method == method) {
10108 MonoBasicBlock *bb;
10109 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10110 bb->region = mono_find_block_region (cfg, bb->real_offset);
10112 mono_create_spvar_for_region (cfg, bb->region);
10113 if (cfg->verbose_level > 2)
10114 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
10118 g_slist_free (class_inits);
10119 dont_inline = g_list_remove (dont_inline, method);
10121 if (inline_costs < 0) {
10124 /* Method is too large */
10125 mname = mono_method_full_name (method, TRUE);
10126 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
10127 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
10129 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10130 mono_basic_block_free (original_bb);
10134 if ((cfg->verbose_level > 2) && (cfg->method == method))
10135 mono_print_code (cfg, "AFTER METHOD-TO-IR");
10137 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10138 mono_basic_block_free (original_bb);
10139 return inline_costs;
10142 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
10149 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
10153 set_exception_type_from_invalid_il (cfg, method, ip);
10157 g_slist_free (class_inits);
10158 mono_basic_block_free (original_bb);
10159 dont_inline = g_list_remove (dont_inline, method);
10160 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10165 store_membase_reg_to_store_membase_imm (int opcode)
10168 case OP_STORE_MEMBASE_REG:
10169 return OP_STORE_MEMBASE_IMM;
10170 case OP_STOREI1_MEMBASE_REG:
10171 return OP_STOREI1_MEMBASE_IMM;
10172 case OP_STOREI2_MEMBASE_REG:
10173 return OP_STOREI2_MEMBASE_IMM;
10174 case OP_STOREI4_MEMBASE_REG:
10175 return OP_STOREI4_MEMBASE_IMM;
10176 case OP_STOREI8_MEMBASE_REG:
10177 return OP_STOREI8_MEMBASE_IMM;
10179 g_assert_not_reached ();
10185 #endif /* DISABLE_JIT */
10188 mono_op_to_op_imm (int opcode)
10192 return OP_IADD_IMM;
10194 return OP_ISUB_IMM;
10196 return OP_IDIV_IMM;
10198 return OP_IDIV_UN_IMM;
10200 return OP_IREM_IMM;
10202 return OP_IREM_UN_IMM;
10204 return OP_IMUL_IMM;
10206 return OP_IAND_IMM;
10210 return OP_IXOR_IMM;
10212 return OP_ISHL_IMM;
10214 return OP_ISHR_IMM;
10216 return OP_ISHR_UN_IMM;
10219 return OP_LADD_IMM;
10221 return OP_LSUB_IMM;
10223 return OP_LAND_IMM;
10227 return OP_LXOR_IMM;
10229 return OP_LSHL_IMM;
10231 return OP_LSHR_IMM;
10233 return OP_LSHR_UN_IMM;
10236 return OP_COMPARE_IMM;
10238 return OP_ICOMPARE_IMM;
10240 return OP_LCOMPARE_IMM;
10242 case OP_STORE_MEMBASE_REG:
10243 return OP_STORE_MEMBASE_IMM;
10244 case OP_STOREI1_MEMBASE_REG:
10245 return OP_STOREI1_MEMBASE_IMM;
10246 case OP_STOREI2_MEMBASE_REG:
10247 return OP_STOREI2_MEMBASE_IMM;
10248 case OP_STOREI4_MEMBASE_REG:
10249 return OP_STOREI4_MEMBASE_IMM;
10251 #if defined(TARGET_X86) || defined (TARGET_AMD64)
10253 return OP_X86_PUSH_IMM;
10254 case OP_X86_COMPARE_MEMBASE_REG:
10255 return OP_X86_COMPARE_MEMBASE_IMM;
10257 #if defined(TARGET_AMD64)
10258 case OP_AMD64_ICOMPARE_MEMBASE_REG:
10259 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10261 case OP_VOIDCALL_REG:
10262 return OP_VOIDCALL;
10270 return OP_LOCALLOC_IMM;
10277 ldind_to_load_membase (int opcode)
10281 return OP_LOADI1_MEMBASE;
10283 return OP_LOADU1_MEMBASE;
10285 return OP_LOADI2_MEMBASE;
10287 return OP_LOADU2_MEMBASE;
10289 return OP_LOADI4_MEMBASE;
10291 return OP_LOADU4_MEMBASE;
10293 return OP_LOAD_MEMBASE;
10294 case CEE_LDIND_REF:
10295 return OP_LOAD_MEMBASE;
10297 return OP_LOADI8_MEMBASE;
10299 return OP_LOADR4_MEMBASE;
10301 return OP_LOADR8_MEMBASE;
10303 g_assert_not_reached ();
10310 stind_to_store_membase (int opcode)
10314 return OP_STOREI1_MEMBASE_REG;
10316 return OP_STOREI2_MEMBASE_REG;
10318 return OP_STOREI4_MEMBASE_REG;
10320 case CEE_STIND_REF:
10321 return OP_STORE_MEMBASE_REG;
10323 return OP_STOREI8_MEMBASE_REG;
10325 return OP_STORER4_MEMBASE_REG;
10327 return OP_STORER8_MEMBASE_REG;
10329 g_assert_not_reached ();
10336 mono_load_membase_to_load_mem (int opcode)
10338 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
10339 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10341 case OP_LOAD_MEMBASE:
10342 return OP_LOAD_MEM;
10343 case OP_LOADU1_MEMBASE:
10344 return OP_LOADU1_MEM;
10345 case OP_LOADU2_MEMBASE:
10346 return OP_LOADU2_MEM;
10347 case OP_LOADI4_MEMBASE:
10348 return OP_LOADI4_MEM;
10349 case OP_LOADU4_MEMBASE:
10350 return OP_LOADU4_MEM;
10351 #if SIZEOF_REGISTER == 8
10352 case OP_LOADI8_MEMBASE:
10353 return OP_LOADI8_MEM;
10362 op_to_op_dest_membase (int store_opcode, int opcode)
10364 #if defined(TARGET_X86)
10365 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
10370 return OP_X86_ADD_MEMBASE_REG;
10372 return OP_X86_SUB_MEMBASE_REG;
10374 return OP_X86_AND_MEMBASE_REG;
10376 return OP_X86_OR_MEMBASE_REG;
10378 return OP_X86_XOR_MEMBASE_REG;
10381 return OP_X86_ADD_MEMBASE_IMM;
10384 return OP_X86_SUB_MEMBASE_IMM;
10387 return OP_X86_AND_MEMBASE_IMM;
10390 return OP_X86_OR_MEMBASE_IMM;
10393 return OP_X86_XOR_MEMBASE_IMM;
10399 #if defined(TARGET_AMD64)
10400 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
10405 return OP_X86_ADD_MEMBASE_REG;
10407 return OP_X86_SUB_MEMBASE_REG;
10409 return OP_X86_AND_MEMBASE_REG;
10411 return OP_X86_OR_MEMBASE_REG;
10413 return OP_X86_XOR_MEMBASE_REG;
10415 return OP_X86_ADD_MEMBASE_IMM;
10417 return OP_X86_SUB_MEMBASE_IMM;
10419 return OP_X86_AND_MEMBASE_IMM;
10421 return OP_X86_OR_MEMBASE_IMM;
10423 return OP_X86_XOR_MEMBASE_IMM;
10425 return OP_AMD64_ADD_MEMBASE_REG;
10427 return OP_AMD64_SUB_MEMBASE_REG;
10429 return OP_AMD64_AND_MEMBASE_REG;
10431 return OP_AMD64_OR_MEMBASE_REG;
10433 return OP_AMD64_XOR_MEMBASE_REG;
10436 return OP_AMD64_ADD_MEMBASE_IMM;
10439 return OP_AMD64_SUB_MEMBASE_IMM;
10442 return OP_AMD64_AND_MEMBASE_IMM;
10445 return OP_AMD64_OR_MEMBASE_IMM;
10448 return OP_AMD64_XOR_MEMBASE_IMM;
10458 op_to_op_store_membase (int store_opcode, int opcode)
10460 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10463 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10464 return OP_X86_SETEQ_MEMBASE;
10466 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10467 return OP_X86_SETNE_MEMBASE;
10475 op_to_op_src1_membase (int load_opcode, int opcode)
10478 /* FIXME: This has sign extension issues */
10480 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10481 return OP_X86_COMPARE_MEMBASE8_IMM;
10484 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10489 return OP_X86_PUSH_MEMBASE;
10490 case OP_COMPARE_IMM:
10491 case OP_ICOMPARE_IMM:
10492 return OP_X86_COMPARE_MEMBASE_IMM;
10495 return OP_X86_COMPARE_MEMBASE_REG;
10499 #ifdef TARGET_AMD64
10500 /* FIXME: This has sign extension issues */
10502 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10503 return OP_X86_COMPARE_MEMBASE8_IMM;
10508 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10509 return OP_X86_PUSH_MEMBASE;
10511 /* FIXME: This only works for 32 bit immediates
10512 case OP_COMPARE_IMM:
10513 case OP_LCOMPARE_IMM:
10514 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10515 return OP_AMD64_COMPARE_MEMBASE_IMM;
10517 case OP_ICOMPARE_IMM:
10518 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10519 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10523 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10524 return OP_AMD64_COMPARE_MEMBASE_REG;
10527 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10528 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10537 op_to_op_src2_membase (int load_opcode, int opcode)
10540 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10546 return OP_X86_COMPARE_REG_MEMBASE;
10548 return OP_X86_ADD_REG_MEMBASE;
10550 return OP_X86_SUB_REG_MEMBASE;
10552 return OP_X86_AND_REG_MEMBASE;
10554 return OP_X86_OR_REG_MEMBASE;
10556 return OP_X86_XOR_REG_MEMBASE;
10560 #ifdef TARGET_AMD64
10561 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
10564 return OP_AMD64_ICOMPARE_REG_MEMBASE;
10566 return OP_X86_ADD_REG_MEMBASE;
10568 return OP_X86_SUB_REG_MEMBASE;
10570 return OP_X86_AND_REG_MEMBASE;
10572 return OP_X86_OR_REG_MEMBASE;
10574 return OP_X86_XOR_REG_MEMBASE;
10576 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
10580 return OP_AMD64_COMPARE_REG_MEMBASE;
10582 return OP_AMD64_ADD_REG_MEMBASE;
10584 return OP_AMD64_SUB_REG_MEMBASE;
10586 return OP_AMD64_AND_REG_MEMBASE;
10588 return OP_AMD64_OR_REG_MEMBASE;
10590 return OP_AMD64_XOR_REG_MEMBASE;
10599 mono_op_to_op_imm_noemul (int opcode)
10602 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10608 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10616 return mono_op_to_op_imm (opcode);
10620 #ifndef DISABLE_JIT
10623 * mono_handle_global_vregs:
10625 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10629 mono_handle_global_vregs (MonoCompile *cfg)
10631 gint32 *vreg_to_bb;
10632 MonoBasicBlock *bb;
10635 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
10637 #ifdef MONO_ARCH_SIMD_INTRINSICS
10638 if (cfg->uses_simd_intrinsics)
10639 mono_simd_simplify_indirection (cfg);
10642 /* Find local vregs used in more than one bb */
10643 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10644 MonoInst *ins = bb->code;
10645 int block_num = bb->block_num;
10647 if (cfg->verbose_level > 2)
10648 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
10651 for (; ins; ins = ins->next) {
10652 const char *spec = INS_INFO (ins->opcode);
10653 int regtype = 0, regindex;
10656 if (G_UNLIKELY (cfg->verbose_level > 2))
10657 mono_print_ins (ins);
10659 g_assert (ins->opcode >= MONO_CEE_LAST);
10661 for (regindex = 0; regindex < 4; regindex ++) {
10664 if (regindex == 0) {
10665 regtype = spec [MONO_INST_DEST];
10666 if (regtype == ' ')
10669 } else if (regindex == 1) {
10670 regtype = spec [MONO_INST_SRC1];
10671 if (regtype == ' ')
10674 } else if (regindex == 2) {
10675 regtype = spec [MONO_INST_SRC2];
10676 if (regtype == ' ')
10679 } else if (regindex == 3) {
10680 regtype = spec [MONO_INST_SRC3];
10681 if (regtype == ' ')
10686 #if SIZEOF_REGISTER == 4
10687 /* In the LLVM case, the long opcodes are not decomposed */
10688 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
10690 * Since some instructions reference the original long vreg,
10691 * and some reference the two component vregs, it is quite hard
10692 * to determine when it needs to be global. So be conservative.
10694 if (!get_vreg_to_inst (cfg, vreg)) {
10695 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10697 if (cfg->verbose_level > 2)
10698 printf ("LONG VREG R%d made global.\n", vreg);
10702 * Make the component vregs volatile since the optimizations can
10703 * get confused otherwise.
10705 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
10706 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
10710 g_assert (vreg != -1);
10712 prev_bb = vreg_to_bb [vreg];
10713 if (prev_bb == 0) {
10714 /* 0 is a valid block num */
10715 vreg_to_bb [vreg] = block_num + 1;
10716 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
10717 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
10720 if (!get_vreg_to_inst (cfg, vreg)) {
10721 if (G_UNLIKELY (cfg->verbose_level > 2))
10722 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
10726 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
10729 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10732 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
10735 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
10738 g_assert_not_reached ();
10742 /* Flag as having been used in more than one bb */
10743 vreg_to_bb [vreg] = -1;
10749 /* If a variable is used in only one bblock, convert it into a local vreg */
10750 for (i = 0; i < cfg->num_varinfo; i++) {
10751 MonoInst *var = cfg->varinfo [i];
10752 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
10754 switch (var->type) {
10760 #if SIZEOF_REGISTER == 8
10763 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
10764 /* Enabling this screws up the fp stack on x86 */
10767 /* Arguments are implicitly global */
10768 /* Putting R4 vars into registers doesn't work currently */
10769 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
10771 * Make that the variable's liveness interval doesn't contain a call, since
10772 * that would cause the lvreg to be spilled, making the whole optimization
10775 /* This is too slow for JIT compilation */
10777 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
10779 int def_index, call_index, ins_index;
10780 gboolean spilled = FALSE;
10785 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
10786 const char *spec = INS_INFO (ins->opcode);
10788 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
10789 def_index = ins_index;
10791 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
10792 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
10793 if (call_index > def_index) {
10799 if (MONO_IS_CALL (ins))
10800 call_index = ins_index;
10810 if (G_UNLIKELY (cfg->verbose_level > 2))
10811 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10812 var->flags |= MONO_INST_IS_DEAD;
10813 cfg->vreg_to_inst [var->dreg] = NULL;
10820 * Compress the varinfo and vars tables so the liveness computation is faster and
10821 * takes up less space.
10824 for (i = 0; i < cfg->num_varinfo; ++i) {
10825 MonoInst *var = cfg->varinfo [i];
10826 if (pos < i && cfg->locals_start == i)
10827 cfg->locals_start = pos;
10828 if (!(var->flags & MONO_INST_IS_DEAD)) {
10830 cfg->varinfo [pos] = cfg->varinfo [i];
10831 cfg->varinfo [pos]->inst_c0 = pos;
10832 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10833 cfg->vars [pos].idx = pos;
10834 #if SIZEOF_REGISTER == 4
10835 if (cfg->varinfo [pos]->type == STACK_I8) {
10836 /* Modify the two component vars too */
10839 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10840 var1->inst_c0 = pos;
10841 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10842 var1->inst_c0 = pos;
10849 cfg->num_varinfo = pos;
10850 if (cfg->locals_start > cfg->num_varinfo)
10851 cfg->locals_start = cfg->num_varinfo;
10855 * mono_spill_global_vars:
10857 * Generate spill code for variables which are not allocated to registers,
10858 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10859 * code is generated which could be optimized by the local optimization passes.
10862 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10864 MonoBasicBlock *bb;
10866 int orig_next_vreg;
10867 guint32 *vreg_to_lvreg;
10869 guint32 i, lvregs_len;
10870 gboolean dest_has_lvreg = FALSE;
10871 guint32 stacktypes [128];
10872 MonoInst **live_range_start, **live_range_end;
10873 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
10875 *need_local_opts = FALSE;
10877 memset (spec2, 0, sizeof (spec2));
10879 /* FIXME: Move this function to mini.c */
10880 stacktypes ['i'] = STACK_PTR;
10881 stacktypes ['l'] = STACK_I8;
10882 stacktypes ['f'] = STACK_R8;
10883 #ifdef MONO_ARCH_SIMD_INTRINSICS
10884 stacktypes ['x'] = STACK_VTYPE;
10887 #if SIZEOF_REGISTER == 4
10888 /* Create MonoInsts for longs */
10889 for (i = 0; i < cfg->num_varinfo; i++) {
10890 MonoInst *ins = cfg->varinfo [i];
10892 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10893 switch (ins->type) {
10898 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
10901 g_assert (ins->opcode == OP_REGOFFSET);
10903 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10905 tree->opcode = OP_REGOFFSET;
10906 tree->inst_basereg = ins->inst_basereg;
10907 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10909 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10911 tree->opcode = OP_REGOFFSET;
10912 tree->inst_basereg = ins->inst_basereg;
10913 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10923 /* FIXME: widening and truncation */
10926 * As an optimization, when a variable allocated to the stack is first loaded into
10927 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10928 * the variable again.
10930 orig_next_vreg = cfg->next_vreg;
10931 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10932 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10936 * These arrays contain the first and last instructions accessing a given
10938 * Since we emit bblocks in the same order we process them here, and we
10939 * don't split live ranges, these will precisely describe the live range of
10940 * the variable, i.e. the instruction range where a valid value can be found
10941 * in the variables location.
10942 * The live range is computed using the liveness info computed by the liveness pass.
10943 * We can't use vmv->range, since that is an abstract live range, and we need
10944 * one which is instruction precise.
10945 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
10947 /* FIXME: Only do this if debugging info is requested */
10948 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
10949 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
10950 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10951 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10953 /* Add spill loads/stores */
10954 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10957 if (cfg->verbose_level > 2)
10958 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10960 /* Clear vreg_to_lvreg array */
10961 for (i = 0; i < lvregs_len; i++)
10962 vreg_to_lvreg [lvregs [i]] = 0;
10966 MONO_BB_FOR_EACH_INS (bb, ins) {
10967 const char *spec = INS_INFO (ins->opcode);
10968 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
10969 gboolean store, no_lvreg;
10970 int sregs [MONO_MAX_SRC_REGS];
10972 if (G_UNLIKELY (cfg->verbose_level > 2))
10973 mono_print_ins (ins);
10975 if (ins->opcode == OP_NOP)
10979 * We handle LDADDR here as well, since it can only be decomposed
10980 * when variable addresses are known.
10982 if (ins->opcode == OP_LDADDR) {
10983 MonoInst *var = ins->inst_p0;
10985 if (var->opcode == OP_VTARG_ADDR) {
10986 /* Happens on SPARC/S390 where vtypes are passed by reference */
10987 MonoInst *vtaddr = var->inst_left;
10988 if (vtaddr->opcode == OP_REGVAR) {
10989 ins->opcode = OP_MOVE;
10990 ins->sreg1 = vtaddr->dreg;
10992 else if (var->inst_left->opcode == OP_REGOFFSET) {
10993 ins->opcode = OP_LOAD_MEMBASE;
10994 ins->inst_basereg = vtaddr->inst_basereg;
10995 ins->inst_offset = vtaddr->inst_offset;
10999 g_assert (var->opcode == OP_REGOFFSET);
11001 ins->opcode = OP_ADD_IMM;
11002 ins->sreg1 = var->inst_basereg;
11003 ins->inst_imm = var->inst_offset;
11006 *need_local_opts = TRUE;
11007 spec = INS_INFO (ins->opcode);
11010 if (ins->opcode < MONO_CEE_LAST) {
11011 mono_print_ins (ins);
11012 g_assert_not_reached ();
11016 * Store opcodes have destbasereg in the dreg, but in reality, it is an
11020 if (MONO_IS_STORE_MEMBASE (ins)) {
11021 tmp_reg = ins->dreg;
11022 ins->dreg = ins->sreg2;
11023 ins->sreg2 = tmp_reg;
11026 spec2 [MONO_INST_DEST] = ' ';
11027 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11028 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11029 spec2 [MONO_INST_SRC3] = ' ';
11031 } else if (MONO_IS_STORE_MEMINDEX (ins))
11032 g_assert_not_reached ();
11037 if (G_UNLIKELY (cfg->verbose_level > 2)) {
11038 printf ("\t %.3s %d", spec, ins->dreg);
11039 num_sregs = mono_inst_get_src_registers (ins, sregs);
11040 for (srcindex = 0; srcindex < 3; ++srcindex)
11041 printf (" %d", sregs [srcindex]);
11048 regtype = spec [MONO_INST_DEST];
11049 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
11052 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
11053 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
11054 MonoInst *store_ins;
11056 MonoInst *def_ins = ins;
11057 int dreg = ins->dreg; /* The original vreg */
11059 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
11061 if (var->opcode == OP_REGVAR) {
11062 ins->dreg = var->dreg;
11063 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
11065 * Instead of emitting a load+store, use a _membase opcode.
11067 g_assert (var->opcode == OP_REGOFFSET);
11068 if (ins->opcode == OP_MOVE) {
11072 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
11073 ins->inst_basereg = var->inst_basereg;
11074 ins->inst_offset = var->inst_offset;
11077 spec = INS_INFO (ins->opcode);
11081 g_assert (var->opcode == OP_REGOFFSET);
11083 prev_dreg = ins->dreg;
11085 /* Invalidate any previous lvreg for this vreg */
11086 vreg_to_lvreg [ins->dreg] = 0;
11090 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
11092 store_opcode = OP_STOREI8_MEMBASE_REG;
11095 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
11097 if (regtype == 'l') {
11098 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
11099 mono_bblock_insert_after_ins (bb, ins, store_ins);
11100 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
11101 mono_bblock_insert_after_ins (bb, ins, store_ins);
11102 def_ins = store_ins;
11105 g_assert (store_opcode != OP_STOREV_MEMBASE);
11107 /* Try to fuse the store into the instruction itself */
11108 /* FIXME: Add more instructions */
11109 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
11110 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
11111 ins->inst_imm = ins->inst_c0;
11112 ins->inst_destbasereg = var->inst_basereg;
11113 ins->inst_offset = var->inst_offset;
11114 spec = INS_INFO (ins->opcode);
11115 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
11116 ins->opcode = store_opcode;
11117 ins->inst_destbasereg = var->inst_basereg;
11118 ins->inst_offset = var->inst_offset;
11122 tmp_reg = ins->dreg;
11123 ins->dreg = ins->sreg2;
11124 ins->sreg2 = tmp_reg;
11127 spec2 [MONO_INST_DEST] = ' ';
11128 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11129 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11130 spec2 [MONO_INST_SRC3] = ' ';
11132 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
11133 // FIXME: The backends expect the base reg to be in inst_basereg
11134 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
11136 ins->inst_basereg = var->inst_basereg;
11137 ins->inst_offset = var->inst_offset;
11138 spec = INS_INFO (ins->opcode);
11140 /* printf ("INS: "); mono_print_ins (ins); */
11141 /* Create a store instruction */
11142 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
11144 /* Insert it after the instruction */
11145 mono_bblock_insert_after_ins (bb, ins, store_ins);
11147 def_ins = store_ins;
11150 * We can't assign ins->dreg to var->dreg here, since the
11151 * sregs could use it. So set a flag, and do it after
11154 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
11155 dest_has_lvreg = TRUE;
11160 if (def_ins && !live_range_start [dreg]) {
11161 live_range_start [dreg] = def_ins;
11162 live_range_start_bb [dreg] = bb;
11169 num_sregs = mono_inst_get_src_registers (ins, sregs);
11170 for (srcindex = 0; srcindex < 3; ++srcindex) {
11171 regtype = spec [MONO_INST_SRC1 + srcindex];
11172 sreg = sregs [srcindex];
11174 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
11175 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
11176 MonoInst *var = get_vreg_to_inst (cfg, sreg);
11177 MonoInst *use_ins = ins;
11178 MonoInst *load_ins;
11179 guint32 load_opcode;
11181 if (var->opcode == OP_REGVAR) {
11182 sregs [srcindex] = var->dreg;
11183 //mono_inst_set_src_registers (ins, sregs);
11184 live_range_end [sreg] = use_ins;
11185 live_range_end_bb [sreg] = bb;
11189 g_assert (var->opcode == OP_REGOFFSET);
11191 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
11193 g_assert (load_opcode != OP_LOADV_MEMBASE);
11195 if (vreg_to_lvreg [sreg]) {
11196 g_assert (vreg_to_lvreg [sreg] != -1);
11198 /* The variable is already loaded to an lvreg */
11199 if (G_UNLIKELY (cfg->verbose_level > 2))
11200 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
11201 sregs [srcindex] = vreg_to_lvreg [sreg];
11202 //mono_inst_set_src_registers (ins, sregs);
11206 /* Try to fuse the load into the instruction */
11207 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
11208 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
11209 sregs [0] = var->inst_basereg;
11210 //mono_inst_set_src_registers (ins, sregs);
11211 ins->inst_offset = var->inst_offset;
11212 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
11213 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
11214 sregs [1] = var->inst_basereg;
11215 //mono_inst_set_src_registers (ins, sregs);
11216 ins->inst_offset = var->inst_offset;
11218 if (MONO_IS_REAL_MOVE (ins)) {
11219 ins->opcode = OP_NOP;
11222 //printf ("%d ", srcindex); mono_print_ins (ins);
11224 sreg = alloc_dreg (cfg, stacktypes [regtype]);
11226 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
11227 if (var->dreg == prev_dreg) {
11229 * sreg refers to the value loaded by the load
11230 * emitted below, but we need to use ins->dreg
11231 * since it refers to the store emitted earlier.
11235 g_assert (sreg != -1);
11236 vreg_to_lvreg [var->dreg] = sreg;
11237 g_assert (lvregs_len < 1024);
11238 lvregs [lvregs_len ++] = var->dreg;
11242 sregs [srcindex] = sreg;
11243 //mono_inst_set_src_registers (ins, sregs);
11245 if (regtype == 'l') {
11246 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
11247 mono_bblock_insert_before_ins (bb, ins, load_ins);
11248 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
11249 mono_bblock_insert_before_ins (bb, ins, load_ins);
11250 use_ins = load_ins;
11253 #if SIZEOF_REGISTER == 4
11254 g_assert (load_opcode != OP_LOADI8_MEMBASE);
11256 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
11257 mono_bblock_insert_before_ins (bb, ins, load_ins);
11258 use_ins = load_ins;
11262 if (var->dreg < orig_next_vreg) {
11263 live_range_end [var->dreg] = use_ins;
11264 live_range_end_bb [var->dreg] = bb;
11268 mono_inst_set_src_registers (ins, sregs);
11270 if (dest_has_lvreg) {
11271 g_assert (ins->dreg != -1);
11272 vreg_to_lvreg [prev_dreg] = ins->dreg;
11273 g_assert (lvregs_len < 1024);
11274 lvregs [lvregs_len ++] = prev_dreg;
11275 dest_has_lvreg = FALSE;
11279 tmp_reg = ins->dreg;
11280 ins->dreg = ins->sreg2;
11281 ins->sreg2 = tmp_reg;
11284 if (MONO_IS_CALL (ins)) {
11285 /* Clear vreg_to_lvreg array */
11286 for (i = 0; i < lvregs_len; i++)
11287 vreg_to_lvreg [lvregs [i]] = 0;
11289 } else if (ins->opcode == OP_NOP) {
11291 MONO_INST_NULLIFY_SREGS (ins);
11294 if (cfg->verbose_level > 2)
11295 mono_print_ins_index (1, ins);
11298 /* Extend the live range based on the liveness info */
11299 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
11300 for (i = 0; i < cfg->num_varinfo; i ++) {
11301 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
11303 if (vreg_is_volatile (cfg, vi->vreg))
11304 /* The liveness info is incomplete */
11307 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
11308 /* Live from at least the first ins of this bb */
11309 live_range_start [vi->vreg] = bb->code;
11310 live_range_start_bb [vi->vreg] = bb;
11313 if (mono_bitset_test_fast (bb->live_out_set, i)) {
11314 /* Live at least until the last ins of this bb */
11315 live_range_end [vi->vreg] = bb->last_ins;
11316 live_range_end_bb [vi->vreg] = bb;
11322 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
11324 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
11325 * by storing the current native offset into MonoMethodVar->live_range_start/end.
11327 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
11328 for (i = 0; i < cfg->num_varinfo; ++i) {
11329 int vreg = MONO_VARINFO (cfg, i)->vreg;
11332 if (live_range_start [vreg]) {
11333 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
11335 ins->inst_c1 = vreg;
11336 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
11338 if (live_range_end [vreg]) {
11339 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
11341 ins->inst_c1 = vreg;
11342 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
11343 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
11345 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
11351 g_free (live_range_start);
11352 g_free (live_range_end);
11353 g_free (live_range_start_bb);
11354 g_free (live_range_end_bb);
11359 * - use 'iadd' instead of 'int_add'
11360 * - handling ovf opcodes: decompose in method_to_ir.
11361 * - unify iregs/fregs
11362 * -> partly done, the missing parts are:
11363 * - a more complete unification would involve unifying the hregs as well, so
11364 * code wouldn't need if (fp) all over the place. but that would mean the hregs
11365 * would no longer map to the machine hregs, so the code generators would need to
11366 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
11367 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
11368 * fp/non-fp branches speeds it up by about 15%.
11369 * - use sext/zext opcodes instead of shifts
11371 * - get rid of TEMPLOADs if possible and use vregs instead
11372 * - clean up usage of OP_P/OP_ opcodes
11373 * - cleanup usage of DUMMY_USE
11374 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
11376 * - set the stack type and allocate a dreg in the EMIT_NEW macros
11377 * - get rid of all the <foo>2 stuff when the new JIT is ready.
11378 * - make sure handle_stack_args () is called before the branch is emitted
11379 * - when the new IR is done, get rid of all unused stuff
11380 * - COMPARE/BEQ as separate instructions or unify them ?
11381 * - keeping them separate allows specialized compare instructions like
11382 * compare_imm, compare_membase
11383 * - most back ends unify fp compare+branch, fp compare+ceq
11384 * - integrate mono_save_args into inline_method
11385 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
11386 * - handle long shift opts on 32 bit platforms somehow: they require
11387 * 3 sregs (2 for arg1 and 1 for arg2)
11388 * - make byref a 'normal' type.
11389 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
11390 * variable if needed.
11391 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
11392 * like inline_method.
11393 * - remove inlining restrictions
11394 * - fix LNEG and enable cfold of INEG
11395 * - generalize x86 optimizations like ldelema as a peephole optimization
11396 * - add store_mem_imm for amd64
11397 * - optimize the loading of the interruption flag in the managed->native wrappers
11398 * - avoid special handling of OP_NOP in passes
11399 * - move code inserting instructions into one function/macro.
11400 * - try a coalescing phase after liveness analysis
11401 * - add float -> vreg conversion + local optimizations on !x86
11402 * - figure out how to handle decomposed branches during optimizations, ie.
11403 * compare+branch, op_jump_table+op_br etc.
11404 * - promote RuntimeXHandles to vregs
11405 * - vtype cleanups:
11406 * - add a NEW_VARLOADA_VREG macro
11407 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11408 * accessing vtype fields.
11409 * - get rid of I8CONST on 64 bit platforms
11410 * - dealing with the increase in code size due to branches created during opcode
11412 * - use extended basic blocks
11413 * - all parts of the JIT
11414 * - handle_global_vregs () && local regalloc
11415 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
11416 * - sources of increase in code size:
11419 * - isinst and castclass
11420 * - lvregs not allocated to global registers even if used multiple times
11421 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
11423 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
11424 * - add all micro optimizations from the old JIT
11425 * - put tree optimizations into the deadce pass
11426 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
11427 * specific function.
11428 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
11429 * fcompare + branchCC.
11430 * - create a helper function for allocating a stack slot, taking into account
11431 * MONO_CFG_HAS_SPILLUP.
11433 * - merge the ia64 switch changes.
11434 * - optimize mono_regstate2_alloc_int/float.
11435 * - fix the pessimistic handling of variables accessed in exception handler blocks.
11436 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
11437 * parts of the tree could be separated by other instructions, killing the tree
11438 * arguments, or stores killing loads etc. Also, should we fold loads into other
11439 * instructions if the result of the load is used multiple times ?
11440 * - make the REM_IMM optimization in mini-x86.c arch-independent.
11441 * - LAST MERGE: 108395.
11442 * - when returning vtypes in registers, generate IR and append it to the end of the
11443 * last bb instead of doing it in the epilog.
11444 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
11452 - When to decompose opcodes:
11453 - earlier: this makes some optimizations hard to implement, since the low level IR
11454 no longer contains the neccessary information. But it is easier to do.
11455 - later: harder to implement, enables more optimizations.
11456 - Branches inside bblocks:
11457 - created when decomposing complex opcodes.
11458 - branches to another bblock: harmless, but not tracked by the branch
11459 optimizations, so need to branch to a label at the start of the bblock.
11460 - branches to inside the same bblock: very problematic, trips up the local
11461 reg allocator. Can be fixed by spitting the current bblock, but that is a
11462 complex operation, since some local vregs can become global vregs etc.
11463 - Local/global vregs:
11464 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11465 local register allocator.
11466 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11467 structure, created by mono_create_var (). Assigned to hregs or the stack by
11468 the global register allocator.
11469 - When to do optimizations like alu->alu_imm:
11470 - earlier -> saves work later on since the IR will be smaller/simpler
11471 - later -> can work on more instructions
11472 - Handling of valuetypes:
11473 - When a vtype is pushed on the stack, a new temporary is created, an
11474 instruction computing its address (LDADDR) is emitted and pushed on
11475 the stack. Need to optimize cases when the vtype is used immediately as in
11476 argument passing, stloc etc.
11477 - Instead of the to_end stuff in the old JIT, simply call the function handling
11478 the values on the stack before emitting the last instruction of the bb.
11481 #endif /* DISABLE_JIT */