2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
30 #ifdef HAVE_VALGRIND_MEMCHECK_H
31 #include <valgrind/memcheck.h>
34 #include <mono/metadata/assembly.h>
35 #include <mono/metadata/loader.h>
36 #include <mono/metadata/tabledefs.h>
37 #include <mono/metadata/class.h>
38 #include <mono/metadata/object.h>
39 #include <mono/metadata/exception.h>
40 #include <mono/metadata/opcodes.h>
41 #include <mono/metadata/mono-endian.h>
42 #include <mono/metadata/tokentype.h>
43 #include <mono/metadata/tabledefs.h>
44 #include <mono/metadata/marshal.h>
45 #include <mono/metadata/debug-helpers.h>
46 #include <mono/metadata/mono-debug.h>
47 #include <mono/metadata/gc-internal.h>
48 #include <mono/metadata/security-manager.h>
49 #include <mono/metadata/threads-types.h>
50 #include <mono/metadata/security-core-clr.h>
51 #include <mono/metadata/monitor.h>
52 #include <mono/utils/mono-compiler.h>
59 #include "jit-icalls.h"
61 #define BRANCH_COST 100
62 #define INLINE_LENGTH_LIMIT 20
63 #define INLINE_FAILURE do {\
64 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
67 #define CHECK_CFG_EXCEPTION do {\
68 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
71 #define METHOD_ACCESS_FAILURE do { \
72 char *method_fname = mono_method_full_name (method, TRUE); \
73 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
74 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
75 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
76 g_free (method_fname); \
77 g_free (cil_method_fname); \
78 goto exception_exit; \
80 #define FIELD_ACCESS_FAILURE do { \
81 char *method_fname = mono_method_full_name (method, TRUE); \
82 char *field_fname = mono_field_full_name (field); \
83 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
84 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
85 g_free (method_fname); \
86 g_free (field_fname); \
87 goto exception_exit; \
89 #define GENERIC_SHARING_FAILURE(opcode) do { \
90 if (cfg->generic_sharing_context) { \
91 if (cfg->verbose_level > 2) \
92 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
93 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
94 goto exception_exit; \
98 /* Determine whenever 'ins' represents a load of the 'this' argument */
99 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
101 static int ldind_to_load_membase (int opcode);
102 static int stind_to_store_membase (int opcode);
104 int mono_op_to_op_imm (int opcode);
105 int mono_op_to_op_imm_noemul (int opcode);
107 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
108 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
109 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
111 /* helper methods signature */
112 extern MonoMethodSignature *helper_sig_class_init_trampoline;
113 extern MonoMethodSignature *helper_sig_domain_get;
114 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
115 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
116 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
119 * Instruction metadata
127 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
128 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
134 #if SIZEOF_REGISTER == 8
139 /* keep in sync with the enum in mini.h */
142 #include "mini-ops.h"
147 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
148 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
150 * This should contain the index of the last sreg + 1. This is not the same
151 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
153 const gint8 ins_sreg_counts[] = {
154 #include "mini-ops.h"
159 extern GHashTable *jit_icall_name_hash;
161 #define MONO_INIT_VARINFO(vi,id) do { \
162 (vi)->range.first_use.pos.bid = 0xffff; \
168 mono_inst_set_src_registers (MonoInst *ins, int *regs)
170 ins->sreg1 = regs [0];
171 ins->sreg2 = regs [1];
172 ins->sreg3 = regs [2];
176 mono_alloc_ireg (MonoCompile *cfg)
178 return alloc_ireg (cfg);
182 mono_alloc_freg (MonoCompile *cfg)
184 return alloc_freg (cfg);
188 mono_alloc_preg (MonoCompile *cfg)
190 return alloc_preg (cfg);
194 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
196 return alloc_dreg (cfg, stack_type);
200 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
206 switch (type->type) {
209 case MONO_TYPE_BOOLEAN:
221 case MONO_TYPE_FNPTR:
223 case MONO_TYPE_CLASS:
224 case MONO_TYPE_STRING:
225 case MONO_TYPE_OBJECT:
226 case MONO_TYPE_SZARRAY:
227 case MONO_TYPE_ARRAY:
231 #if SIZEOF_REGISTER == 8
240 case MONO_TYPE_VALUETYPE:
241 if (type->data.klass->enumtype) {
242 type = mono_class_enum_basetype (type->data.klass);
245 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
248 case MONO_TYPE_TYPEDBYREF:
250 case MONO_TYPE_GENERICINST:
251 type = &type->data.generic_class->container_class->byval_arg;
255 g_assert (cfg->generic_sharing_context);
258 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
264 mono_print_bb (MonoBasicBlock *bb, const char *msg)
269 printf ("\n%s %d: [IN: ", msg, bb->block_num);
270 for (i = 0; i < bb->in_count; ++i)
271 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
273 for (i = 0; i < bb->out_count; ++i)
274 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
276 for (tree = bb->code; tree; tree = tree->next)
277 mono_print_ins_index (-1, tree);
281 * Can't put this at the beginning, since other files reference stuff from this
286 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
288 #define GET_BBLOCK(cfg,tblock,ip) do { \
289 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
291 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
292 NEW_BBLOCK (cfg, (tblock)); \
293 (tblock)->cil_code = (ip); \
294 ADD_BBLOCK (cfg, (tblock)); \
298 #if defined(TARGET_X86) || defined(TARGET_AMD64)
299 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
300 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
301 (dest)->dreg = alloc_preg ((cfg)); \
302 (dest)->sreg1 = (sr1); \
303 (dest)->sreg2 = (sr2); \
304 (dest)->inst_imm = (imm); \
305 (dest)->backend.shift_amount = (shift); \
306 MONO_ADD_INS ((cfg)->cbb, (dest)); \
310 #if SIZEOF_REGISTER == 8
311 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
312 /* FIXME: Need to add many more cases */ \
313 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
315 int dr = alloc_preg (cfg); \
316 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
317 (ins)->sreg2 = widen->dreg; \
321 #define ADD_WIDEN_OP(ins, arg1, arg2)
324 #define ADD_BINOP(op) do { \
325 MONO_INST_NEW (cfg, ins, (op)); \
327 ins->sreg1 = sp [0]->dreg; \
328 ins->sreg2 = sp [1]->dreg; \
329 type_from_op (ins, sp [0], sp [1]); \
331 /* Have to insert a widening op */ \
332 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
333 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
334 MONO_ADD_INS ((cfg)->cbb, (ins)); \
335 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
338 #define ADD_UNOP(op) do { \
339 MONO_INST_NEW (cfg, ins, (op)); \
341 ins->sreg1 = sp [0]->dreg; \
342 type_from_op (ins, sp [0], NULL); \
344 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
345 MONO_ADD_INS ((cfg)->cbb, (ins)); \
346 *sp++ = mono_decompose_opcode (cfg, ins); \
349 #define ADD_BINCOND(next_block) do { \
352 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
353 cmp->sreg1 = sp [0]->dreg; \
354 cmp->sreg2 = sp [1]->dreg; \
355 type_from_op (cmp, sp [0], sp [1]); \
357 type_from_op (ins, sp [0], sp [1]); \
358 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
359 GET_BBLOCK (cfg, tblock, target); \
360 link_bblock (cfg, bblock, tblock); \
361 ins->inst_true_bb = tblock; \
362 if ((next_block)) { \
363 link_bblock (cfg, bblock, (next_block)); \
364 ins->inst_false_bb = (next_block); \
365 start_new_bblock = 1; \
367 GET_BBLOCK (cfg, tblock, ip); \
368 link_bblock (cfg, bblock, tblock); \
369 ins->inst_false_bb = tblock; \
370 start_new_bblock = 2; \
372 if (sp != stack_start) { \
373 handle_stack_args (cfg, stack_start, sp - stack_start); \
374 CHECK_UNVERIFIABLE (cfg); \
376 MONO_ADD_INS (bblock, cmp); \
377 MONO_ADD_INS (bblock, ins); \
381 * link_bblock: Links two basic blocks
383 * links two basic blocks in the control flow graph, the 'from'
384 * argument is the starting block and the 'to' argument is the block
385 * the control flow ends to after 'from'.
388 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
390 MonoBasicBlock **newa;
394 if (from->cil_code) {
396 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
398 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
401 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
403 printf ("edge from entry to exit\n");
408 for (i = 0; i < from->out_count; ++i) {
409 if (to == from->out_bb [i]) {
415 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
416 for (i = 0; i < from->out_count; ++i) {
417 newa [i] = from->out_bb [i];
425 for (i = 0; i < to->in_count; ++i) {
426 if (from == to->in_bb [i]) {
432 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
433 for (i = 0; i < to->in_count; ++i) {
434 newa [i] = to->in_bb [i];
443 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
445 link_bblock (cfg, from, to);
449 * mono_find_block_region:
451 * We mark each basic block with a region ID. We use that to avoid BB
452 * optimizations when blocks are in different regions.
455 * A region token that encodes where this region is, and information
456 * about the clause owner for this block.
458 * The region encodes the try/catch/filter clause that owns this block
459 * as well as the type. -1 is a special value that represents a block
460 * that is in none of try/catch/filter.
463 mono_find_block_region (MonoCompile *cfg, int offset)
465 MonoMethod *method = cfg->method;
466 MonoMethodHeader *header = mono_method_get_header (method);
467 MonoExceptionClause *clause;
470 for (i = 0; i < header->num_clauses; ++i) {
471 clause = &header->clauses [i];
472 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
473 (offset < (clause->handler_offset)))
474 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
476 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
477 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
478 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
479 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
480 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
482 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
485 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
486 return ((i + 1) << 8) | clause->flags;
493 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
495 MonoMethod *method = cfg->method;
496 MonoMethodHeader *header = mono_method_get_header (method);
497 MonoExceptionClause *clause;
498 MonoBasicBlock *handler;
502 for (i = 0; i < header->num_clauses; ++i) {
503 clause = &header->clauses [i];
504 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
505 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
506 if (clause->flags == type) {
507 handler = cfg->cil_offset_to_bb [clause->handler_offset];
509 res = g_list_append (res, handler);
517 mono_create_spvar_for_region (MonoCompile *cfg, int region)
521 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
525 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
526 /* prevent it from being register allocated */
527 var->flags |= MONO_INST_INDIRECT;
529 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
533 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
535 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
539 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
543 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
547 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
548 /* prevent it from being register allocated */
549 var->flags |= MONO_INST_INDIRECT;
551 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
557 * Returns the type used in the eval stack when @type is loaded.
558 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
561 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
565 inst->klass = klass = mono_class_from_mono_type (type);
567 inst->type = STACK_MP;
572 switch (type->type) {
574 inst->type = STACK_INV;
578 case MONO_TYPE_BOOLEAN:
584 inst->type = STACK_I4;
589 case MONO_TYPE_FNPTR:
590 inst->type = STACK_PTR;
592 case MONO_TYPE_CLASS:
593 case MONO_TYPE_STRING:
594 case MONO_TYPE_OBJECT:
595 case MONO_TYPE_SZARRAY:
596 case MONO_TYPE_ARRAY:
597 inst->type = STACK_OBJ;
601 inst->type = STACK_I8;
605 inst->type = STACK_R8;
607 case MONO_TYPE_VALUETYPE:
608 if (type->data.klass->enumtype) {
609 type = mono_class_enum_basetype (type->data.klass);
613 inst->type = STACK_VTYPE;
616 case MONO_TYPE_TYPEDBYREF:
617 inst->klass = mono_defaults.typed_reference_class;
618 inst->type = STACK_VTYPE;
620 case MONO_TYPE_GENERICINST:
621 type = &type->data.generic_class->container_class->byval_arg;
624 case MONO_TYPE_MVAR :
625 /* FIXME: all the arguments must be references for now,
626 * later look inside cfg and see if the arg num is
629 g_assert (cfg->generic_sharing_context);
630 inst->type = STACK_OBJ;
633 g_error ("unknown type 0x%02x in eval stack type", type->type);
638 * The following tables are used to quickly validate the IL code in type_from_op ().
641 bin_num_table [STACK_MAX] [STACK_MAX] = {
642 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
643 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
644 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
645 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
646 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
647 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
648 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
649 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
654 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
657 /* reduce the size of this table */
659 bin_int_table [STACK_MAX] [STACK_MAX] = {
660 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
661 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
662 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
663 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
664 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
665 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
666 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
667 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
671 bin_comp_table [STACK_MAX] [STACK_MAX] = {
672 /* Inv i L p F & O vt */
674 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
675 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
676 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
677 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
678 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
679 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
680 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
683 /* reduce the size of this table */
685 shift_table [STACK_MAX] [STACK_MAX] = {
686 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
687 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
688 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
689 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
690 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
691 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
692 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
693 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
697 * Tables to map from the non-specific opcode to the matching
698 * type-specific opcode.
700 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
702 binops_op_map [STACK_MAX] = {
703 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
706 /* handles from CEE_NEG to CEE_CONV_U8 */
708 unops_op_map [STACK_MAX] = {
709 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
712 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
714 ovfops_op_map [STACK_MAX] = {
715 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
718 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
720 ovf2ops_op_map [STACK_MAX] = {
721 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
724 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
726 ovf3ops_op_map [STACK_MAX] = {
727 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
730 /* handles from CEE_BEQ to CEE_BLT_UN */
732 beqops_op_map [STACK_MAX] = {
733 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
736 /* handles from CEE_CEQ to CEE_CLT_UN */
738 ceqops_op_map [STACK_MAX] = {
739 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
743 * Sets ins->type (the type on the eval stack) according to the
744 * type of the opcode and the arguments to it.
745 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
747 * FIXME: this function sets ins->type unconditionally in some cases, but
748 * it should set it to invalid for some types (a conv.x on an object)
751 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
753 switch (ins->opcode) {
760 /* FIXME: check unverifiable args for STACK_MP */
761 ins->type = bin_num_table [src1->type] [src2->type];
762 ins->opcode += binops_op_map [ins->type];
769 ins->type = bin_int_table [src1->type] [src2->type];
770 ins->opcode += binops_op_map [ins->type];
775 ins->type = shift_table [src1->type] [src2->type];
776 ins->opcode += binops_op_map [ins->type];
781 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
782 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
783 ins->opcode = OP_LCOMPARE;
784 else if (src1->type == STACK_R8)
785 ins->opcode = OP_FCOMPARE;
787 ins->opcode = OP_ICOMPARE;
789 case OP_ICOMPARE_IMM:
790 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
791 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
792 ins->opcode = OP_LCOMPARE_IMM;
804 ins->opcode += beqops_op_map [src1->type];
807 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
808 ins->opcode += ceqops_op_map [src1->type];
814 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
815 ins->opcode += ceqops_op_map [src1->type];
819 ins->type = neg_table [src1->type];
820 ins->opcode += unops_op_map [ins->type];
823 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
824 ins->type = src1->type;
826 ins->type = STACK_INV;
827 ins->opcode += unops_op_map [ins->type];
833 ins->type = STACK_I4;
834 ins->opcode += unops_op_map [src1->type];
837 ins->type = STACK_R8;
838 switch (src1->type) {
841 ins->opcode = OP_ICONV_TO_R_UN;
844 ins->opcode = OP_LCONV_TO_R_UN;
848 case CEE_CONV_OVF_I1:
849 case CEE_CONV_OVF_U1:
850 case CEE_CONV_OVF_I2:
851 case CEE_CONV_OVF_U2:
852 case CEE_CONV_OVF_I4:
853 case CEE_CONV_OVF_U4:
854 ins->type = STACK_I4;
855 ins->opcode += ovf3ops_op_map [src1->type];
857 case CEE_CONV_OVF_I_UN:
858 case CEE_CONV_OVF_U_UN:
859 ins->type = STACK_PTR;
860 ins->opcode += ovf2ops_op_map [src1->type];
862 case CEE_CONV_OVF_I1_UN:
863 case CEE_CONV_OVF_I2_UN:
864 case CEE_CONV_OVF_I4_UN:
865 case CEE_CONV_OVF_U1_UN:
866 case CEE_CONV_OVF_U2_UN:
867 case CEE_CONV_OVF_U4_UN:
868 ins->type = STACK_I4;
869 ins->opcode += ovf2ops_op_map [src1->type];
872 ins->type = STACK_PTR;
873 switch (src1->type) {
875 ins->opcode = OP_ICONV_TO_U;
879 #if SIZEOF_REGISTER == 8
880 ins->opcode = OP_LCONV_TO_U;
882 ins->opcode = OP_MOVE;
886 ins->opcode = OP_LCONV_TO_U;
889 ins->opcode = OP_FCONV_TO_U;
895 ins->type = STACK_I8;
896 ins->opcode += unops_op_map [src1->type];
898 case CEE_CONV_OVF_I8:
899 case CEE_CONV_OVF_U8:
900 ins->type = STACK_I8;
901 ins->opcode += ovf3ops_op_map [src1->type];
903 case CEE_CONV_OVF_U8_UN:
904 case CEE_CONV_OVF_I8_UN:
905 ins->type = STACK_I8;
906 ins->opcode += ovf2ops_op_map [src1->type];
910 ins->type = STACK_R8;
911 ins->opcode += unops_op_map [src1->type];
914 ins->type = STACK_R8;
918 ins->type = STACK_I4;
919 ins->opcode += ovfops_op_map [src1->type];
924 ins->type = STACK_PTR;
925 ins->opcode += ovfops_op_map [src1->type];
933 ins->type = bin_num_table [src1->type] [src2->type];
934 ins->opcode += ovfops_op_map [src1->type];
935 if (ins->type == STACK_R8)
936 ins->type = STACK_INV;
938 case OP_LOAD_MEMBASE:
939 ins->type = STACK_PTR;
941 case OP_LOADI1_MEMBASE:
942 case OP_LOADU1_MEMBASE:
943 case OP_LOADI2_MEMBASE:
944 case OP_LOADU2_MEMBASE:
945 case OP_LOADI4_MEMBASE:
946 case OP_LOADU4_MEMBASE:
947 ins->type = STACK_PTR;
949 case OP_LOADI8_MEMBASE:
950 ins->type = STACK_I8;
952 case OP_LOADR4_MEMBASE:
953 case OP_LOADR8_MEMBASE:
954 ins->type = STACK_R8;
957 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
961 if (ins->type == STACK_MP)
962 ins->klass = mono_defaults.object_class;
967 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
973 param_table [STACK_MAX] [STACK_MAX] = {
978 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
982 switch (args->type) {
992 for (i = 0; i < sig->param_count; ++i) {
993 switch (args [i].type) {
997 if (!sig->params [i]->byref)
1001 if (sig->params [i]->byref)
1003 switch (sig->params [i]->type) {
1004 case MONO_TYPE_CLASS:
1005 case MONO_TYPE_STRING:
1006 case MONO_TYPE_OBJECT:
1007 case MONO_TYPE_SZARRAY:
1008 case MONO_TYPE_ARRAY:
1015 if (sig->params [i]->byref)
1017 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1026 /*if (!param_table [args [i].type] [sig->params [i]->type])
1034 * When we need a pointer to the current domain many times in a method, we
1035 * call mono_domain_get() once and we store the result in a local variable.
1036 * This function returns the variable that represents the MonoDomain*.
1038 inline static MonoInst *
1039 mono_get_domainvar (MonoCompile *cfg)
1041 if (!cfg->domainvar)
1042 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1043 return cfg->domainvar;
1047 * The got_var contains the address of the Global Offset Table when AOT
1051 mono_get_got_var (MonoCompile *cfg)
1053 #ifdef MONO_ARCH_NEED_GOT_VAR
1054 if (!cfg->compile_aot)
1056 if (!cfg->got_var) {
1057 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1059 return cfg->got_var;
1066 mono_get_vtable_var (MonoCompile *cfg)
1068 g_assert (cfg->generic_sharing_context);
1070 if (!cfg->rgctx_var) {
1071 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1072 /* force the var to be stack allocated */
1073 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1076 return cfg->rgctx_var;
1080 type_from_stack_type (MonoInst *ins) {
1081 switch (ins->type) {
1082 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1083 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1084 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1085 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1087 return &ins->klass->this_arg;
1088 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1089 case STACK_VTYPE: return &ins->klass->byval_arg;
1091 g_error ("stack type %d to monotype not handled\n", ins->type);
1096 static G_GNUC_UNUSED int
1097 type_to_stack_type (MonoType *t)
1099 switch (mono_type_get_underlying_type (t)->type) {
1102 case MONO_TYPE_BOOLEAN:
1105 case MONO_TYPE_CHAR:
1112 case MONO_TYPE_FNPTR:
1114 case MONO_TYPE_CLASS:
1115 case MONO_TYPE_STRING:
1116 case MONO_TYPE_OBJECT:
1117 case MONO_TYPE_SZARRAY:
1118 case MONO_TYPE_ARRAY:
1126 case MONO_TYPE_VALUETYPE:
1127 case MONO_TYPE_TYPEDBYREF:
1129 case MONO_TYPE_GENERICINST:
1130 if (mono_type_generic_inst_is_valuetype (t))
1136 g_assert_not_reached ();
1143 array_access_to_klass (int opcode)
1147 return mono_defaults.byte_class;
1149 return mono_defaults.uint16_class;
1152 return mono_defaults.int_class;
1155 return mono_defaults.sbyte_class;
1158 return mono_defaults.int16_class;
1161 return mono_defaults.int32_class;
1163 return mono_defaults.uint32_class;
1166 return mono_defaults.int64_class;
1169 return mono_defaults.single_class;
1172 return mono_defaults.double_class;
1173 case CEE_LDELEM_REF:
1174 case CEE_STELEM_REF:
1175 return mono_defaults.object_class;
1177 g_assert_not_reached ();
1183 * We try to share variables when possible
1186 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1191 /* inlining can result in deeper stacks */
1192 if (slot >= mono_method_get_header (cfg->method)->max_stack)
1193 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1195 pos = ins->type - 1 + slot * STACK_MAX;
1197 switch (ins->type) {
1204 if ((vnum = cfg->intvars [pos]))
1205 return cfg->varinfo [vnum];
1206 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1207 cfg->intvars [pos] = res->inst_c0;
1210 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1216 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1219 * Don't use this if a generic_context is set, since that means AOT can't
1220 * look up the method using just the image+token.
1221 * table == 0 means this is a reference made from a wrapper.
1223 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1224 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1225 jump_info_token->image = image;
1226 jump_info_token->token = token;
1227 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1232 * This function is called to handle items that are left on the evaluation stack
1233 * at basic block boundaries. What happens is that we save the values to local variables
1234 * and we reload them later when first entering the target basic block (with the
1235 * handle_loaded_temps () function).
1236 * A single joint point will use the same variables (stored in the array bb->out_stack or
1237 * bb->in_stack, if the basic block is before or after the joint point).
1239 * This function needs to be called _before_ emitting the last instruction of
1240 * the bb (i.e. before emitting a branch).
1241 * If the stack merge fails at a join point, cfg->unverifiable is set.
1244 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1247 MonoBasicBlock *bb = cfg->cbb;
1248 MonoBasicBlock *outb;
1249 MonoInst *inst, **locals;
1254 if (cfg->verbose_level > 3)
1255 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1256 if (!bb->out_scount) {
1257 bb->out_scount = count;
1258 //printf ("bblock %d has out:", bb->block_num);
1260 for (i = 0; i < bb->out_count; ++i) {
1261 outb = bb->out_bb [i];
1262 /* exception handlers are linked, but they should not be considered for stack args */
1263 if (outb->flags & BB_EXCEPTION_HANDLER)
1265 //printf (" %d", outb->block_num);
1266 if (outb->in_stack) {
1268 bb->out_stack = outb->in_stack;
1274 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1275 for (i = 0; i < count; ++i) {
1277 * try to reuse temps already allocated for this purpouse, if they occupy the same
1278 * stack slot and if they are of the same type.
1279 * This won't cause conflicts since if 'local' is used to
1280 * store one of the values in the in_stack of a bblock, then
1281 * the same variable will be used for the same outgoing stack
1283 * This doesn't work when inlining methods, since the bblocks
1284 * in the inlined methods do not inherit their in_stack from
1285 * the bblock they are inlined to. See bug #58863 for an
1288 if (cfg->inlined_method)
1289 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1291 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1296 for (i = 0; i < bb->out_count; ++i) {
1297 outb = bb->out_bb [i];
1298 /* exception handlers are linked, but they should not be considered for stack args */
1299 if (outb->flags & BB_EXCEPTION_HANDLER)
1301 if (outb->in_scount) {
1302 if (outb->in_scount != bb->out_scount) {
1303 cfg->unverifiable = TRUE;
1306 continue; /* check they are the same locals */
1308 outb->in_scount = count;
1309 outb->in_stack = bb->out_stack;
1312 locals = bb->out_stack;
1314 for (i = 0; i < count; ++i) {
1315 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1316 inst->cil_code = sp [i]->cil_code;
1317 sp [i] = locals [i];
1318 if (cfg->verbose_level > 3)
1319 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1323 * It is possible that the out bblocks already have in_stack assigned, and
1324 * the in_stacks differ. In this case, we will store to all the different
1331 /* Find a bblock which has a different in_stack */
1333 while (bindex < bb->out_count) {
1334 outb = bb->out_bb [bindex];
1335 /* exception handlers are linked, but they should not be considered for stack args */
1336 if (outb->flags & BB_EXCEPTION_HANDLER) {
1340 if (outb->in_stack != locals) {
1341 for (i = 0; i < count; ++i) {
1342 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1343 inst->cil_code = sp [i]->cil_code;
1344 sp [i] = locals [i];
1345 if (cfg->verbose_level > 3)
1346 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1348 locals = outb->in_stack;
1357 /* Emit code which loads interface_offsets [klass->interface_id]
1358 * The array is stored in memory before vtable.
1361 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1363 if (cfg->compile_aot) {
1364 int ioffset_reg = alloc_preg (cfg);
1365 int iid_reg = alloc_preg (cfg);
1367 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1368 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1369 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1372 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1377 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1378 * stored in "klass_reg" implements the interface "klass".
1381 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1383 int ibitmap_reg = alloc_preg (cfg);
1384 int ibitmap_byte_reg = alloc_preg (cfg);
1386 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap));
1388 if (cfg->compile_aot) {
1389 int iid_reg = alloc_preg (cfg);
1390 int shifted_iid_reg = alloc_preg (cfg);
1391 int ibitmap_byte_address_reg = alloc_preg (cfg);
1392 int masked_iid_reg = alloc_preg (cfg);
1393 int iid_one_bit_reg = alloc_preg (cfg);
1394 int iid_bit_reg = alloc_preg (cfg);
1395 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1396 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1397 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1398 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1399 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1400 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1401 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1402 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1404 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1405 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1410 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1411 * stored in "vtable_reg" implements the interface "klass".
1414 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1416 int ibitmap_reg = alloc_preg (cfg);
1417 int ibitmap_byte_reg = alloc_preg (cfg);
1419 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap));
1421 if (cfg->compile_aot) {
1422 int iid_reg = alloc_preg (cfg);
1423 int shifted_iid_reg = alloc_preg (cfg);
1424 int ibitmap_byte_address_reg = alloc_preg (cfg);
1425 int masked_iid_reg = alloc_preg (cfg);
1426 int iid_one_bit_reg = alloc_preg (cfg);
1427 int iid_bit_reg = alloc_preg (cfg);
1428 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1429 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, shifted_iid_reg, iid_reg, 3);
1430 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1431 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1432 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, masked_iid_reg, iid_reg, 7);
1433 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1434 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1435 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1437 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1438 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1443 * Emit code which checks whenever the interface id of @klass is smaller than
1444 * than the value given by max_iid_reg.
1447 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1448 MonoBasicBlock *false_target)
1450 if (cfg->compile_aot) {
1451 int iid_reg = alloc_preg (cfg);
1452 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1453 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1456 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1458 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1460 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1463 /* Same as above, but obtains max_iid from a vtable */
1465 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1466 MonoBasicBlock *false_target)
1468 int max_iid_reg = alloc_preg (cfg);
1470 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1471 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1474 /* Same as above, but obtains max_iid from a klass */
1476 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1477 MonoBasicBlock *false_target)
1479 int max_iid_reg = alloc_preg (cfg);
1481 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1482 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1486 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1488 int idepth_reg = alloc_preg (cfg);
1489 int stypes_reg = alloc_preg (cfg);
1490 int stype = alloc_preg (cfg);
1492 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1493 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1494 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1495 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1497 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1498 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1499 if (cfg->compile_aot) {
1500 int const_reg = alloc_preg (cfg);
1501 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1502 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1504 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1506 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1510 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1512 int intf_reg = alloc_preg (cfg);
1514 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1515 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1516 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1518 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1520 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1524 * Variant of the above that takes a register to the class, not the vtable.
1527 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1529 int intf_bit_reg = alloc_preg (cfg);
1531 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1532 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1533 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1535 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1537 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1541 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1543 if (cfg->compile_aot) {
1544 int const_reg = alloc_preg (cfg);
1545 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1546 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1548 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1550 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1554 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1556 if (cfg->compile_aot) {
1557 int const_reg = alloc_preg (cfg);
1558 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1559 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1561 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1563 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1567 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1570 int rank_reg = alloc_preg (cfg);
1571 int eclass_reg = alloc_preg (cfg);
1573 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1574 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1575 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1576 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1577 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1578 if (klass->cast_class == mono_defaults.object_class) {
1579 int parent_reg = alloc_preg (cfg);
1580 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1581 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1582 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1583 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1584 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1585 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1586 } else if (klass->cast_class == mono_defaults.enum_class) {
1587 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1588 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1589 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1591 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1592 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1595 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1596 /* Check that the object is a vector too */
1597 int bounds_reg = alloc_preg (cfg);
1598 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1599 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1600 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1603 int idepth_reg = alloc_preg (cfg);
1604 int stypes_reg = alloc_preg (cfg);
1605 int stype = alloc_preg (cfg);
1607 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1608 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1609 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1610 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1612 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1613 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1614 mini_emit_class_check (cfg, stype, klass);
1619 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1623 g_assert (val == 0);
1628 if ((size <= 4) && (size <= align)) {
1631 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1634 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1637 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1639 #if SIZEOF_REGISTER == 8
1641 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1647 val_reg = alloc_preg (cfg);
1649 if (SIZEOF_REGISTER == 8)
1650 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1652 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1655 /* This could be optimized further if neccesary */
1657 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1664 #if !NO_UNALIGNED_ACCESS
1665 if (SIZEOF_REGISTER == 8) {
1667 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1672 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1680 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1685 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1690 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1696 #endif /* DISABLE_JIT */
1699 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1707 /* This could be optimized further if neccesary */
1709 cur_reg = alloc_preg (cfg);
1710 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1711 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1718 #if !NO_UNALIGNED_ACCESS
1719 if (SIZEOF_REGISTER == 8) {
1721 cur_reg = alloc_preg (cfg);
1722 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1723 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1732 cur_reg = alloc_preg (cfg);
1733 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1734 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1740 cur_reg = alloc_preg (cfg);
1741 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1742 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1748 cur_reg = alloc_preg (cfg);
1749 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1750 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1760 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1763 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1766 type = mini_get_basic_type_from_generic (gsctx, type);
1767 switch (type->type) {
1768 case MONO_TYPE_VOID:
1769 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1772 case MONO_TYPE_BOOLEAN:
1775 case MONO_TYPE_CHAR:
1778 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1782 case MONO_TYPE_FNPTR:
1783 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1784 case MONO_TYPE_CLASS:
1785 case MONO_TYPE_STRING:
1786 case MONO_TYPE_OBJECT:
1787 case MONO_TYPE_SZARRAY:
1788 case MONO_TYPE_ARRAY:
1789 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1792 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1795 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1796 case MONO_TYPE_VALUETYPE:
1797 if (type->data.klass->enumtype) {
1798 type = mono_class_enum_basetype (type->data.klass);
1801 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1802 case MONO_TYPE_TYPEDBYREF:
1803 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1804 case MONO_TYPE_GENERICINST:
1805 type = &type->data.generic_class->container_class->byval_arg;
1808 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1814 * target_type_is_incompatible:
1815 * @cfg: MonoCompile context
1817 * Check that the item @arg on the evaluation stack can be stored
1818 * in the target type (can be a local, or field, etc).
1819 * The cfg arg can be used to check if we need verification or just
1822 * Returns: non-0 value if arg can't be stored on a target.
1825 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1827 MonoType *simple_type;
1830 if (target->byref) {
1831 /* FIXME: check that the pointed to types match */
1832 if (arg->type == STACK_MP)
1833 return arg->klass != mono_class_from_mono_type (target);
1834 if (arg->type == STACK_PTR)
1839 simple_type = mono_type_get_underlying_type (target);
1840 switch (simple_type->type) {
1841 case MONO_TYPE_VOID:
1845 case MONO_TYPE_BOOLEAN:
1848 case MONO_TYPE_CHAR:
1851 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1855 /* STACK_MP is needed when setting pinned locals */
1856 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1861 case MONO_TYPE_FNPTR:
1862 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1865 case MONO_TYPE_CLASS:
1866 case MONO_TYPE_STRING:
1867 case MONO_TYPE_OBJECT:
1868 case MONO_TYPE_SZARRAY:
1869 case MONO_TYPE_ARRAY:
1870 if (arg->type != STACK_OBJ)
1872 /* FIXME: check type compatibility */
1876 if (arg->type != STACK_I8)
1881 if (arg->type != STACK_R8)
1884 case MONO_TYPE_VALUETYPE:
1885 if (arg->type != STACK_VTYPE)
1887 klass = mono_class_from_mono_type (simple_type);
1888 if (klass != arg->klass)
1891 case MONO_TYPE_TYPEDBYREF:
1892 if (arg->type != STACK_VTYPE)
1894 klass = mono_class_from_mono_type (simple_type);
1895 if (klass != arg->klass)
1898 case MONO_TYPE_GENERICINST:
1899 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1900 if (arg->type != STACK_VTYPE)
1902 klass = mono_class_from_mono_type (simple_type);
1903 if (klass != arg->klass)
1907 if (arg->type != STACK_OBJ)
1909 /* FIXME: check type compatibility */
1913 case MONO_TYPE_MVAR:
1914 /* FIXME: all the arguments must be references for now,
1915 * later look inside cfg and see if the arg num is
1916 * really a reference
1918 g_assert (cfg->generic_sharing_context);
1919 if (arg->type != STACK_OBJ)
1923 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1929 * Prepare arguments for passing to a function call.
1930 * Return a non-zero value if the arguments can't be passed to the given
1932 * The type checks are not yet complete and some conversions may need
1933 * casts on 32 or 64 bit architectures.
1935 * FIXME: implement this using target_type_is_incompatible ()
1938 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1940 MonoType *simple_type;
1944 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1948 for (i = 0; i < sig->param_count; ++i) {
1949 if (sig->params [i]->byref) {
1950 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1954 simple_type = sig->params [i];
1955 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1957 switch (simple_type->type) {
1958 case MONO_TYPE_VOID:
1963 case MONO_TYPE_BOOLEAN:
1966 case MONO_TYPE_CHAR:
1969 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
1975 case MONO_TYPE_FNPTR:
1976 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
1979 case MONO_TYPE_CLASS:
1980 case MONO_TYPE_STRING:
1981 case MONO_TYPE_OBJECT:
1982 case MONO_TYPE_SZARRAY:
1983 case MONO_TYPE_ARRAY:
1984 if (args [i]->type != STACK_OBJ)
1989 if (args [i]->type != STACK_I8)
1994 if (args [i]->type != STACK_R8)
1997 case MONO_TYPE_VALUETYPE:
1998 if (simple_type->data.klass->enumtype) {
1999 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2002 if (args [i]->type != STACK_VTYPE)
2005 case MONO_TYPE_TYPEDBYREF:
2006 if (args [i]->type != STACK_VTYPE)
2009 case MONO_TYPE_GENERICINST:
2010 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2014 g_error ("unknown type 0x%02x in check_call_signature",
2022 callvirt_to_call (int opcode)
2027 case OP_VOIDCALLVIRT:
2036 g_assert_not_reached ();
2043 callvirt_to_call_membase (int opcode)
2047 return OP_CALL_MEMBASE;
2048 case OP_VOIDCALLVIRT:
2049 return OP_VOIDCALL_MEMBASE;
2051 return OP_FCALL_MEMBASE;
2053 return OP_LCALL_MEMBASE;
2055 return OP_VCALL_MEMBASE;
2057 g_assert_not_reached ();
2063 #ifdef MONO_ARCH_HAVE_IMT
2065 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2067 #ifdef MONO_ARCH_IMT_REG
2068 int method_reg = alloc_preg (cfg);
2071 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2072 } else if (cfg->compile_aot) {
2073 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2076 MONO_INST_NEW (cfg, ins, OP_PCONST);
2077 ins->inst_p0 = call->method;
2078 ins->dreg = method_reg;
2079 MONO_ADD_INS (cfg->cbb, ins);
2082 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2084 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2089 static MonoJumpInfo *
2090 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2092 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2096 ji->data.target = target;
2101 inline static MonoInst*
2102 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args);
2104 inline static MonoCallInst *
2105 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2106 MonoInst **args, int calli, int virtual, int tail)
2109 #ifdef MONO_ARCH_SOFT_FLOAT
2114 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2116 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2119 call->signature = sig;
2121 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2124 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2125 call->vret_var = cfg->vret_addr;
2126 //g_assert_not_reached ();
2128 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2129 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2132 temp->backend.is_pinvoke = sig->pinvoke;
2135 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2136 * address of return value to increase optimization opportunities.
2137 * Before vtype decomposition, the dreg of the call ins itself represents the
2138 * fact the call modifies the return value. After decomposition, the call will
2139 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2140 * will be transformed into an LDADDR.
2142 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2143 loada->dreg = alloc_preg (cfg);
2144 loada->inst_p0 = temp;
2145 /* We reference the call too since call->dreg could change during optimization */
2146 loada->inst_p1 = call;
2147 MONO_ADD_INS (cfg->cbb, loada);
2149 call->inst.dreg = temp->dreg;
2151 call->vret_var = loada;
2152 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2153 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2155 #ifdef MONO_ARCH_SOFT_FLOAT
2157 * If the call has a float argument, we would need to do an r8->r4 conversion using
2158 * an icall, but that cannot be done during the call sequence since it would clobber
2159 * the call registers + the stack. So we do it before emitting the call.
2161 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2163 MonoInst *in = call->args [i];
2165 if (i >= sig->hasthis)
2166 t = sig->params [i - sig->hasthis];
2168 t = &mono_defaults.int_class->byval_arg;
2169 t = mono_type_get_underlying_type (t);
2171 if (!t->byref && t->type == MONO_TYPE_R4) {
2172 MonoInst *iargs [1];
2176 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2178 /* The result will be in an int vreg */
2179 call->args [i] = conv;
2185 if (COMPILE_LLVM (cfg))
2186 mono_llvm_emit_call (cfg, call);
2188 mono_arch_emit_call (cfg, call);
2190 mono_arch_emit_call (cfg, call);
2193 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2194 cfg->flags |= MONO_CFG_HAS_CALLS;
2199 inline static MonoInst*
2200 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2202 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE);
2204 call->inst.sreg1 = addr->dreg;
2206 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2208 return (MonoInst*)call;
2211 inline static MonoInst*
2212 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2214 #ifdef MONO_ARCH_RGCTX_REG
2219 rgctx_reg = mono_alloc_preg (cfg);
2220 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2222 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2224 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2225 cfg->uses_rgctx_reg = TRUE;
2226 call->rgctx_reg = TRUE;
2228 return (MonoInst*)call;
2230 g_assert_not_reached ();
2236 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, int rgctx_type);
2239 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2240 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2242 gboolean might_be_remote;
2243 gboolean virtual = this != NULL;
2244 gboolean enable_for_aot = TRUE;
2248 if (method->string_ctor) {
2249 /* Create the real signature */
2250 /* FIXME: Cache these */
2251 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2252 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2257 might_be_remote = this && sig->hasthis &&
2258 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2259 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this);
2261 context_used = mono_method_check_context_used (method);
2262 if (might_be_remote && context_used) {
2265 g_assert (cfg->generic_sharing_context);
2267 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2269 return mono_emit_calli (cfg, sig, args, addr);
2272 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE);
2274 if (might_be_remote)
2275 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2277 call->method = method;
2278 call->inst.flags |= MONO_INST_HAS_METHOD;
2279 call->inst.inst_left = this;
2282 int vtable_reg, slot_reg, this_reg;
2284 this_reg = this->dreg;
2286 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2287 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2288 /* Make a call to delegate->invoke_impl */
2289 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2290 call->inst.inst_basereg = this_reg;
2291 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2292 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2294 return (MonoInst*)call;
2298 if ((!cfg->compile_aot || enable_for_aot) &&
2299 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2300 (MONO_METHOD_IS_FINAL (method) &&
2301 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK))) {
2303 * the method is not virtual, we just need to ensure this is not null
2304 * and then we can call the method directly.
2306 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2307 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2310 if (!method->string_ctor) {
2311 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2312 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2313 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2316 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2318 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2320 return (MonoInst*)call;
2323 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2325 * the method is virtual, but we can statically dispatch since either
2326 * it's class or the method itself are sealed.
2327 * But first we need to ensure it's not a null reference.
2329 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2330 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2331 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2333 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2334 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2336 return (MonoInst*)call;
2339 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2341 vtable_reg = alloc_preg (cfg);
2342 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2343 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2345 #ifdef MONO_ARCH_HAVE_IMT
2347 guint32 imt_slot = mono_method_get_imt_slot (method);
2348 emit_imt_argument (cfg, call, imt_arg);
2349 slot_reg = vtable_reg;
2350 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2353 if (slot_reg == -1) {
2354 slot_reg = alloc_preg (cfg);
2355 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2356 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2359 slot_reg = vtable_reg;
2360 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2361 (mono_method_get_vtable_index (method) * SIZEOF_VOID_P);
2362 #ifdef MONO_ARCH_HAVE_IMT
2364 g_assert (mono_method_signature (method)->generic_param_count);
2365 emit_imt_argument (cfg, call, imt_arg);
2370 call->inst.sreg1 = slot_reg;
2371 call->virtual = TRUE;
2374 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2376 return (MonoInst*)call;
2380 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2381 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2388 #ifdef MONO_ARCH_RGCTX_REG
2389 rgctx_reg = mono_alloc_preg (cfg);
2390 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2395 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2397 call = (MonoCallInst*)ins;
2399 #ifdef MONO_ARCH_RGCTX_REG
2400 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2401 cfg->uses_rgctx_reg = TRUE;
2402 call->rgctx_reg = TRUE;
2411 static inline MonoInst*
2412 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2414 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2418 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2425 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE);
2428 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2430 return (MonoInst*)call;
2433 inline static MonoInst*
2434 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2436 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2440 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2444 * mono_emit_abs_call:
2446 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2448 inline static MonoInst*
2449 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2450 MonoMethodSignature *sig, MonoInst **args)
2452 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2456 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2459 if (cfg->abs_patches == NULL)
2460 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2461 g_hash_table_insert (cfg->abs_patches, ji, ji);
2462 ins = mono_emit_native_call (cfg, ji, sig, args);
2463 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2468 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2470 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2471 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2475 * Native code might return non register sized integers
2476 * without initializing the upper bits.
2478 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2479 case OP_LOADI1_MEMBASE:
2480 widen_op = OP_ICONV_TO_I1;
2482 case OP_LOADU1_MEMBASE:
2483 widen_op = OP_ICONV_TO_U1;
2485 case OP_LOADI2_MEMBASE:
2486 widen_op = OP_ICONV_TO_I2;
2488 case OP_LOADU2_MEMBASE:
2489 widen_op = OP_ICONV_TO_U2;
2495 if (widen_op != -1) {
2496 int dreg = alloc_preg (cfg);
2499 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2500 widen->type = ins->type;
2510 get_memcpy_method (void)
2512 static MonoMethod *memcpy_method = NULL;
2513 if (!memcpy_method) {
2514 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2516 g_error ("Old corlib found. Install a new one");
2518 return memcpy_method;
2522 * Emit code to copy a valuetype of type @klass whose address is stored in
2523 * @src->dreg to memory whose address is stored at @dest->dreg.
2526 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2528 MonoInst *iargs [3];
2531 MonoMethod *memcpy_method;
2535 * This check breaks with spilled vars... need to handle it during verification anyway.
2536 * g_assert (klass && klass == src->klass && klass == dest->klass);
2540 n = mono_class_native_size (klass, &align);
2542 n = mono_class_value_size (klass, &align);
2544 #if HAVE_WRITE_BARRIERS
2545 /* if native is true there should be no references in the struct */
2546 if (klass->has_references && !native) {
2547 /* Avoid barriers when storing to the stack */
2548 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2549 (dest->opcode == OP_LDADDR))) {
2552 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2554 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2559 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2560 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2561 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2565 EMIT_NEW_ICONST (cfg, iargs [2], n);
2567 memcpy_method = get_memcpy_method ();
2568 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2573 get_memset_method (void)
2575 static MonoMethod *memset_method = NULL;
2576 if (!memset_method) {
2577 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2579 g_error ("Old corlib found. Install a new one");
2581 return memset_method;
2585 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2587 MonoInst *iargs [3];
2590 MonoMethod *memset_method;
2592 /* FIXME: Optimize this for the case when dest is an LDADDR */
2594 mono_class_init (klass);
2595 n = mono_class_value_size (klass, &align);
2597 if (n <= sizeof (gpointer) * 5) {
2598 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2601 memset_method = get_memset_method ();
2603 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2604 EMIT_NEW_ICONST (cfg, iargs [2], n);
2605 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2610 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2612 MonoInst *this = NULL;
2614 g_assert (cfg->generic_sharing_context);
2616 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2617 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2618 !method->klass->valuetype)
2619 EMIT_NEW_ARGLOAD (cfg, this, 0);
2621 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2622 MonoInst *mrgctx_loc, *mrgctx_var;
2625 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2627 mrgctx_loc = mono_get_vtable_var (cfg);
2628 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2631 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2632 MonoInst *vtable_loc, *vtable_var;
2636 vtable_loc = mono_get_vtable_var (cfg);
2637 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2639 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2640 MonoInst *mrgctx_var = vtable_var;
2643 vtable_reg = alloc_preg (cfg);
2644 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2645 vtable_var->type = STACK_PTR;
2651 int vtable_reg, res_reg;
2653 vtable_reg = alloc_preg (cfg);
2654 res_reg = alloc_preg (cfg);
2655 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2660 static MonoJumpInfoRgctxEntry *
2661 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2663 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2664 res->method = method;
2665 res->in_mrgctx = in_mrgctx;
2666 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2667 res->data->type = patch_type;
2668 res->data->data.target = patch_data;
2669 res->info_type = info_type;
2674 static inline MonoInst*
2675 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2677 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2681 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2682 MonoClass *klass, int rgctx_type)
2684 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2685 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2687 return emit_rgctx_fetch (cfg, rgctx, entry);
2691 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2692 MonoMethod *cmethod, int rgctx_type)
2694 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2695 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2697 return emit_rgctx_fetch (cfg, rgctx, entry);
2701 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2702 MonoClassField *field, int rgctx_type)
2704 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2705 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2707 return emit_rgctx_fetch (cfg, rgctx, entry);
2711 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
2713 MonoInst *vtable_arg;
2715 int context_used = 0;
2717 if (cfg->generic_sharing_context)
2718 context_used = mono_class_check_context_used (klass);
2721 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
2722 klass, MONO_RGCTX_INFO_VTABLE);
2724 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2728 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
2731 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
2732 #ifdef MONO_ARCH_VTABLE_REG
2733 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
2734 cfg->uses_vtable_reg = TRUE;
2741 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
2743 int vtable_reg = alloc_preg (cfg);
2744 int context_used = 0;
2746 if (cfg->generic_sharing_context)
2747 context_used = mono_class_check_context_used (array_class);
2749 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2751 if (cfg->opt & MONO_OPT_SHARED) {
2752 int class_reg = alloc_preg (cfg);
2753 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2754 if (cfg->compile_aot) {
2755 int klass_reg = alloc_preg (cfg);
2756 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
2757 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
2759 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
2761 } else if (context_used) {
2762 MonoInst *vtable_ins;
2764 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
2765 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
2767 if (cfg->compile_aot) {
2768 int vt_reg = alloc_preg (cfg);
2769 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, mono_class_vtable (cfg->domain, array_class));
2770 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
2772 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, mono_class_vtable (cfg->domain, array_class));
2776 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
2780 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
2782 if (mini_get_debug_options ()->better_cast_details) {
2783 int to_klass_reg = alloc_preg (cfg);
2784 int vtable_reg = alloc_preg (cfg);
2785 int klass_reg = alloc_preg (cfg);
2786 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2789 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2793 MONO_ADD_INS (cfg->cbb, tls_get);
2794 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2795 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2797 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
2798 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
2799 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
2804 reset_cast_details (MonoCompile *cfg)
2806 /* Reset the variables holding the cast details */
2807 if (mini_get_debug_options ()->better_cast_details) {
2808 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2810 MONO_ADD_INS (cfg->cbb, tls_get);
2811 /* It is enough to reset the from field */
2812 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
2817 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
2818 * generic code is generated.
2821 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
2823 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
2826 MonoInst *rgctx, *addr;
2828 /* FIXME: What if the class is shared? We might not
2829 have to get the address of the method from the
2831 addr = emit_get_rgctx_method (cfg, context_used, method,
2832 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2834 rgctx = emit_get_rgctx (cfg, method, context_used);
2836 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2838 return mono_emit_method_call (cfg, method, &val, NULL);
2843 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
2847 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
2848 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
2849 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
2850 int rank_reg = alloc_dreg (cfg ,STACK_I4);
2852 obj_reg = sp [0]->dreg;
2853 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2854 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2856 /* FIXME: generics */
2857 g_assert (klass->rank == 0);
2860 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
2861 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2863 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2864 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
2867 MonoInst *element_class;
2869 /* This assertion is from the unboxcast insn */
2870 g_assert (klass->rank == 0);
2872 element_class = emit_get_rgctx_klass (cfg, context_used,
2873 klass->element_class, MONO_RGCTX_INFO_KLASS);
2875 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
2876 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2878 save_cast_details (cfg, klass->element_class, obj_reg);
2879 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
2880 reset_cast_details (cfg);
2883 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
2884 MONO_ADD_INS (cfg->cbb, add);
2885 add->type = STACK_MP;
2892 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box)
2894 MonoInst *iargs [2];
2897 if (cfg->opt & MONO_OPT_SHARED) {
2898 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2899 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
2901 alloc_ftn = mono_object_new;
2902 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
2903 /* This happens often in argument checking code, eg. throw new FooException... */
2904 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
2905 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
2906 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
2908 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2909 #ifdef MONO_CROSS_COMPILE
2910 MonoMethod *managed_alloc = NULL;
2912 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2916 if (managed_alloc) {
2917 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2918 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2920 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
2922 guint32 lw = vtable->klass->instance_size;
2923 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
2924 EMIT_NEW_ICONST (cfg, iargs [0], lw);
2925 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
2928 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2932 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2936 handle_alloc_from_inst (MonoCompile *cfg, MonoClass *klass, MonoInst *data_inst,
2939 MonoInst *iargs [2];
2940 MonoMethod *managed_alloc = NULL;
2944 FIXME: we cannot get managed_alloc here because we can't get
2945 the class's vtable (because it's not a closed class)
2947 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2948 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2951 if (cfg->opt & MONO_OPT_SHARED) {
2952 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2953 iargs [1] = data_inst;
2954 alloc_ftn = mono_object_new;
2956 if (managed_alloc) {
2957 iargs [0] = data_inst;
2958 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2961 iargs [0] = data_inst;
2962 alloc_ftn = mono_object_new_specific;
2965 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2969 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass)
2971 MonoInst *alloc, *ins;
2973 if (mono_class_is_nullable (klass)) {
2974 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
2975 return mono_emit_method_call (cfg, method, &val, NULL);
2978 alloc = handle_alloc (cfg, klass, TRUE);
2980 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
2986 handle_box_from_inst (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoInst *data_inst)
2988 MonoInst *alloc, *ins;
2990 if (mono_class_is_nullable (klass)) {
2991 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
2992 /* FIXME: What if the class is shared? We might not
2993 have to get the method address from the RGCTX. */
2994 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
2995 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2996 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2998 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3000 alloc = handle_alloc_from_inst (cfg, klass, data_inst, TRUE);
3002 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3009 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3011 MonoBasicBlock *is_null_bb;
3012 int obj_reg = src->dreg;
3013 int vtable_reg = alloc_preg (cfg);
3015 NEW_BBLOCK (cfg, is_null_bb);
3017 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3018 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3020 save_cast_details (cfg, klass, obj_reg);
3022 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3023 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3024 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3026 int klass_reg = alloc_preg (cfg);
3028 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3030 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3031 /* the remoting code is broken, access the class for now */
3033 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3034 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3036 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3037 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3039 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3041 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3042 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, is_null_bb);
3046 MONO_START_BB (cfg, is_null_bb);
3048 reset_cast_details (cfg);
3054 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3057 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3058 int obj_reg = src->dreg;
3059 int vtable_reg = alloc_preg (cfg);
3060 int res_reg = alloc_preg (cfg);
3062 NEW_BBLOCK (cfg, is_null_bb);
3063 NEW_BBLOCK (cfg, false_bb);
3064 NEW_BBLOCK (cfg, end_bb);
3066 /* Do the assignment at the beginning, so the other assignment can be if converted */
3067 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3068 ins->type = STACK_OBJ;
3071 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3072 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3074 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3075 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3076 /* the is_null_bb target simply copies the input register to the output */
3077 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3079 int klass_reg = alloc_preg (cfg);
3081 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3084 int rank_reg = alloc_preg (cfg);
3085 int eclass_reg = alloc_preg (cfg);
3087 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3088 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3089 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3090 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3091 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3092 if (klass->cast_class == mono_defaults.object_class) {
3093 int parent_reg = alloc_preg (cfg);
3094 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3095 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3096 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3097 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3098 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3099 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3100 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3101 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3102 } else if (klass->cast_class == mono_defaults.enum_class) {
3103 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3104 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3105 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3106 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3108 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3109 /* Check that the object is a vector too */
3110 int bounds_reg = alloc_preg (cfg);
3111 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3112 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3113 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3116 /* the is_null_bb target simply copies the input register to the output */
3117 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3119 } else if (mono_class_is_nullable (klass)) {
3120 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3121 /* the is_null_bb target simply copies the input register to the output */
3122 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3124 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3125 /* the remoting code is broken, access the class for now */
3127 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3128 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3130 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3131 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3133 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3134 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3136 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3137 /* the is_null_bb target simply copies the input register to the output */
3138 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, is_null_bb);
3143 MONO_START_BB (cfg, false_bb);
3145 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3146 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3148 MONO_START_BB (cfg, is_null_bb);
3150 MONO_START_BB (cfg, end_bb);
3156 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3158 /* This opcode takes as input an object reference and a class, and returns:
3159 0) if the object is an instance of the class,
3160 1) if the object is not instance of the class,
3161 2) if the object is a proxy whose type cannot be determined */
3164 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3165 int obj_reg = src->dreg;
3166 int dreg = alloc_ireg (cfg);
3168 int klass_reg = alloc_preg (cfg);
3170 NEW_BBLOCK (cfg, true_bb);
3171 NEW_BBLOCK (cfg, false_bb);
3172 NEW_BBLOCK (cfg, false2_bb);
3173 NEW_BBLOCK (cfg, end_bb);
3174 NEW_BBLOCK (cfg, no_proxy_bb);
3176 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3177 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3179 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3180 NEW_BBLOCK (cfg, interface_fail_bb);
3182 tmp_reg = alloc_preg (cfg);
3183 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3184 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3185 MONO_START_BB (cfg, interface_fail_bb);
3186 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3188 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3190 tmp_reg = alloc_preg (cfg);
3191 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3192 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3193 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3195 tmp_reg = alloc_preg (cfg);
3196 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3197 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3199 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3200 tmp_reg = alloc_preg (cfg);
3201 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3202 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3204 tmp_reg = alloc_preg (cfg);
3205 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3206 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3207 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3209 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3210 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3212 MONO_START_BB (cfg, no_proxy_bb);
3214 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3217 MONO_START_BB (cfg, false_bb);
3219 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3220 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3222 MONO_START_BB (cfg, false2_bb);
3224 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3225 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3227 MONO_START_BB (cfg, true_bb);
3229 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3231 MONO_START_BB (cfg, end_bb);
3234 MONO_INST_NEW (cfg, ins, OP_ICONST);
3236 ins->type = STACK_I4;
3242 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3244 /* This opcode takes as input an object reference and a class, and returns:
3245 0) if the object is an instance of the class,
3246 1) if the object is a proxy whose type cannot be determined
3247 an InvalidCastException exception is thrown otherwhise*/
3250 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3251 int obj_reg = src->dreg;
3252 int dreg = alloc_ireg (cfg);
3253 int tmp_reg = alloc_preg (cfg);
3254 int klass_reg = alloc_preg (cfg);
3256 NEW_BBLOCK (cfg, end_bb);
3257 NEW_BBLOCK (cfg, ok_result_bb);
3259 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3260 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3262 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3263 NEW_BBLOCK (cfg, interface_fail_bb);
3265 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3266 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3267 MONO_START_BB (cfg, interface_fail_bb);
3268 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3270 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3272 tmp_reg = alloc_preg (cfg);
3273 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3274 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3275 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3277 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3278 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3281 NEW_BBLOCK (cfg, no_proxy_bb);
3283 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3284 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3285 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3287 tmp_reg = alloc_preg (cfg);
3288 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3289 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3291 tmp_reg = alloc_preg (cfg);
3292 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3293 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3294 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3296 NEW_BBLOCK (cfg, fail_1_bb);
3298 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3300 MONO_START_BB (cfg, fail_1_bb);
3302 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3303 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3305 MONO_START_BB (cfg, no_proxy_bb);
3307 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3310 MONO_START_BB (cfg, ok_result_bb);
3312 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3314 MONO_START_BB (cfg, end_bb);
3317 MONO_INST_NEW (cfg, ins, OP_ICONST);
3319 ins->type = STACK_I4;
3324 static G_GNUC_UNUSED MonoInst*
3325 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method)
3327 gpointer *trampoline;
3328 MonoInst *obj, *method_ins, *tramp_ins;
3332 obj = handle_alloc (cfg, klass, FALSE);
3334 /* Inline the contents of mono_delegate_ctor */
3336 /* Set target field */
3337 /* Optimize away setting of NULL target */
3338 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3339 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3341 /* Set method field */
3342 EMIT_NEW_METHODCONST (cfg, method_ins, method);
3343 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3346 * To avoid looking up the compiled code belonging to the target method
3347 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3348 * store it, and we fill it after the method has been compiled.
3350 if (!cfg->compile_aot && !method->dynamic) {
3351 MonoInst *code_slot_ins;
3353 domain = mono_domain_get ();
3354 mono_domain_lock (domain);
3355 if (!domain_jit_info (domain)->method_code_hash)
3356 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3357 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3359 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3360 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3362 mono_domain_unlock (domain);
3364 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3365 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3368 /* Set invoke_impl field */
3369 if (cfg->compile_aot) {
3370 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3372 trampoline = mono_create_delegate_trampoline (klass);
3373 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3375 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3377 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3383 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3385 MonoJitICallInfo *info;
3387 /* Need to register the icall so it gets an icall wrapper */
3388 info = mono_get_array_new_va_icall (rank);
3390 cfg->flags |= MONO_CFG_HAS_VARARGS;
3392 /* mono_array_new_va () needs a vararg calling convention */
3393 cfg->disable_llvm = TRUE;
3395 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3396 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3400 mono_emit_load_got_addr (MonoCompile *cfg)
3402 MonoInst *getaddr, *dummy_use;
3404 if (!cfg->got_var || cfg->got_var_allocated)
3407 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3408 getaddr->dreg = cfg->got_var->dreg;
3410 /* Add it to the start of the first bblock */
3411 if (cfg->bb_entry->code) {
3412 getaddr->next = cfg->bb_entry->code;
3413 cfg->bb_entry->code = getaddr;
3416 MONO_ADD_INS (cfg->bb_entry, getaddr);
3418 cfg->got_var_allocated = TRUE;
3421 * Add a dummy use to keep the got_var alive, since real uses might
3422 * only be generated by the back ends.
3423 * Add it to end_bblock, so the variable's lifetime covers the whole
3425 * It would be better to make the usage of the got var explicit in all
3426 * cases when the backend needs it (i.e. calls, throw etc.), so this
3427 * wouldn't be needed.
3429 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3430 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3433 static int inline_limit;
3434 static gboolean inline_limit_inited;
3437 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3439 MonoMethodHeader *header;
3441 #ifdef MONO_ARCH_SOFT_FLOAT
3442 MonoMethodSignature *sig = mono_method_signature (method);
3446 if (cfg->generic_sharing_context)
3449 #ifdef MONO_ARCH_HAVE_LMF_OPS
3450 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3451 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3452 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3456 if (method->is_inflated)
3457 /* Avoid inflating the header */
3458 header = mono_method_get_header (((MonoMethodInflated*)method)->declaring);
3460 header = mono_method_get_header (method);
3462 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME) ||
3463 (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3464 (method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3465 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3466 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) ||
3467 (method->klass->marshalbyref) ||
3468 !header || header->num_clauses)
3471 /* also consider num_locals? */
3472 /* Do the size check early to avoid creating vtables */
3473 if (!inline_limit_inited) {
3474 if (getenv ("MONO_INLINELIMIT"))
3475 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3477 inline_limit = INLINE_LENGTH_LIMIT;
3478 inline_limit_inited = TRUE;
3480 if (header->code_size >= inline_limit)
3484 * if we can initialize the class of the method right away, we do,
3485 * otherwise we don't allow inlining if the class needs initialization,
3486 * since it would mean inserting a call to mono_runtime_class_init()
3487 * inside the inlined code
3489 if (!(cfg->opt & MONO_OPT_SHARED)) {
3490 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3491 if (cfg->run_cctors && method->klass->has_cctor) {
3492 if (!method->klass->runtime_info)
3493 /* No vtable created yet */
3495 vtable = mono_class_vtable (cfg->domain, method->klass);
3498 /* This makes so that inline cannot trigger */
3499 /* .cctors: too many apps depend on them */
3500 /* running with a specific order... */
3501 if (! vtable->initialized)
3503 mono_runtime_class_init (vtable);
3505 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3506 if (!method->klass->runtime_info)
3507 /* No vtable created yet */
3509 vtable = mono_class_vtable (cfg->domain, method->klass);
3512 if (!vtable->initialized)
3517 * If we're compiling for shared code
3518 * the cctor will need to be run at aot method load time, for example,
3519 * or at the end of the compilation of the inlining method.
3521 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3526 * CAS - do not inline methods with declarative security
3527 * Note: this has to be before any possible return TRUE;
3529 if (mono_method_has_declsec (method))
3532 #ifdef MONO_ARCH_SOFT_FLOAT
3534 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3536 for (i = 0; i < sig->param_count; ++i)
3537 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3545 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3547 if (vtable->initialized && !cfg->compile_aot)
3550 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3553 if (!mono_class_needs_cctor_run (vtable->klass, method))
3556 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3557 /* The initialization is already done before the method is called */
3564 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index)
3568 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3570 mono_class_init (klass);
3571 size = mono_class_array_element_size (klass);
3573 mult_reg = alloc_preg (cfg);
3574 array_reg = arr->dreg;
3575 index_reg = index->dreg;
3577 #if SIZEOF_REGISTER == 8
3578 /* The array reg is 64 bits but the index reg is only 32 */
3579 index2_reg = alloc_preg (cfg);
3580 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3582 if (index->type == STACK_I8) {
3583 index2_reg = alloc_preg (cfg);
3584 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
3586 index2_reg = index_reg;
3590 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3592 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3593 if (size == 1 || size == 2 || size == 4 || size == 8) {
3594 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3596 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3597 ins->type = STACK_PTR;
3603 add_reg = alloc_preg (cfg);
3605 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3606 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3607 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3608 ins->type = STACK_PTR;
3609 MONO_ADD_INS (cfg->cbb, ins);
3614 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3616 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3618 int bounds_reg = alloc_preg (cfg);
3619 int add_reg = alloc_preg (cfg);
3620 int mult_reg = alloc_preg (cfg);
3621 int mult2_reg = alloc_preg (cfg);
3622 int low1_reg = alloc_preg (cfg);
3623 int low2_reg = alloc_preg (cfg);
3624 int high1_reg = alloc_preg (cfg);
3625 int high2_reg = alloc_preg (cfg);
3626 int realidx1_reg = alloc_preg (cfg);
3627 int realidx2_reg = alloc_preg (cfg);
3628 int sum_reg = alloc_preg (cfg);
3633 mono_class_init (klass);
3634 size = mono_class_array_element_size (klass);
3636 index1 = index_ins1->dreg;
3637 index2 = index_ins2->dreg;
3639 /* range checking */
3640 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3641 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3643 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3644 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3645 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3646 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3647 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3648 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3649 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3651 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3652 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3653 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3654 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3655 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3656 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3657 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3659 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3660 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3661 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3662 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3663 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3665 ins->type = STACK_MP;
3667 MONO_ADD_INS (cfg->cbb, ins);
3674 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3678 MonoMethod *addr_method;
3681 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3684 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1]);
3686 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3687 /* emit_ldelema_2 depends on OP_LMUL */
3688 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3689 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3693 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3694 addr_method = mono_marshal_get_array_address (rank, element_size);
3695 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
3701 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
3703 MonoInst *ins = NULL;
3705 static MonoClass *runtime_helpers_class = NULL;
3706 if (! runtime_helpers_class)
3707 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
3708 "System.Runtime.CompilerServices", "RuntimeHelpers");
3710 if (cmethod->klass == mono_defaults.string_class) {
3711 if (strcmp (cmethod->name, "get_Chars") == 0) {
3712 int dreg = alloc_ireg (cfg);
3713 int index_reg = alloc_preg (cfg);
3714 int mult_reg = alloc_preg (cfg);
3715 int add_reg = alloc_preg (cfg);
3717 #if SIZEOF_REGISTER == 8
3718 /* The array reg is 64 bits but the index reg is only 32 */
3719 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
3721 index_reg = args [1]->dreg;
3723 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
3725 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3726 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
3727 add_reg = ins->dreg;
3728 /* Avoid a warning */
3730 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3733 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
3734 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3735 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3736 add_reg, G_STRUCT_OFFSET (MonoString, chars));
3738 type_from_op (ins, NULL, NULL);
3740 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3741 int dreg = alloc_ireg (cfg);
3742 /* Decompose later to allow more optimizations */
3743 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
3744 ins->type = STACK_I4;
3745 cfg->cbb->has_array_access = TRUE;
3746 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
3749 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
3750 int mult_reg = alloc_preg (cfg);
3751 int add_reg = alloc_preg (cfg);
3753 /* The corlib functions check for oob already. */
3754 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
3755 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3756 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
3759 } else if (cmethod->klass == mono_defaults.object_class) {
3761 if (strcmp (cmethod->name, "GetType") == 0) {
3762 int dreg = alloc_preg (cfg);
3763 int vt_reg = alloc_preg (cfg);
3764 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3765 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
3766 type_from_op (ins, NULL, NULL);
3769 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
3770 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0) {
3771 int dreg = alloc_ireg (cfg);
3772 int t1 = alloc_ireg (cfg);
3774 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
3775 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
3776 ins->type = STACK_I4;
3780 } else if (strcmp (cmethod->name, ".ctor") == 0) {
3781 MONO_INST_NEW (cfg, ins, OP_NOP);
3782 MONO_ADD_INS (cfg->cbb, ins);
3786 } else if (cmethod->klass == mono_defaults.array_class) {
3787 if (cmethod->name [0] != 'g')
3790 if (strcmp (cmethod->name, "get_Rank") == 0) {
3791 int dreg = alloc_ireg (cfg);
3792 int vtable_reg = alloc_preg (cfg);
3793 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, vtable_reg,
3794 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3795 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
3796 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3797 type_from_op (ins, NULL, NULL);
3800 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3801 int dreg = alloc_ireg (cfg);
3803 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
3804 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
3805 type_from_op (ins, NULL, NULL);
3810 } else if (cmethod->klass == runtime_helpers_class) {
3812 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
3813 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
3817 } else if (cmethod->klass == mono_defaults.thread_class) {
3818 if (strcmp (cmethod->name, "get_CurrentThread") == 0 && (ins = mono_arch_get_thread_intrinsic (cfg))) {
3819 ins->dreg = alloc_preg (cfg);
3820 ins->type = STACK_OBJ;
3821 MONO_ADD_INS (cfg->cbb, ins);
3823 } else if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
3824 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
3825 MONO_ADD_INS (cfg->cbb, ins);
3827 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
3828 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
3829 MONO_ADD_INS (cfg->cbb, ins);
3832 } else if (cmethod->klass == mono_defaults.monitor_class) {
3833 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
3834 if (strcmp (cmethod->name, "Enter") == 0) {
3837 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
3838 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
3839 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
3840 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
3842 return (MonoInst*)call;
3843 } else if (strcmp (cmethod->name, "Exit") == 0) {
3846 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
3847 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
3848 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
3849 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
3851 return (MonoInst*)call;
3853 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
3854 MonoMethod *fast_method = NULL;
3856 /* Avoid infinite recursion */
3857 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
3858 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
3859 strcmp (cfg->method->name, "FastMonitorExit") == 0))
3862 if (strcmp (cmethod->name, "Enter") == 0 ||
3863 strcmp (cmethod->name, "Exit") == 0)
3864 fast_method = mono_monitor_get_fast_path (cmethod);
3868 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
3870 } else if (mini_class_is_system_array (cmethod->klass) &&
3871 strcmp (cmethod->name, "GetGenericValueImpl") == 0) {
3872 MonoInst *addr, *store, *load;
3873 MonoClass *eklass = mono_class_from_mono_type (fsig->params [1]);
3875 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1]);
3876 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
3877 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
3879 } else if (cmethod->klass->image == mono_defaults.corlib &&
3880 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
3881 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
3884 #if SIZEOF_REGISTER == 8
3885 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
3886 /* 64 bit reads are already atomic */
3887 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
3888 ins->dreg = mono_alloc_preg (cfg);
3889 ins->inst_basereg = args [0]->dreg;
3890 ins->inst_offset = 0;
3891 MONO_ADD_INS (cfg->cbb, ins);
3895 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
3896 if (strcmp (cmethod->name, "Increment") == 0) {
3897 MonoInst *ins_iconst;
3900 if (fsig->params [0]->type == MONO_TYPE_I4)
3901 opcode = OP_ATOMIC_ADD_NEW_I4;
3902 #if SIZEOF_REGISTER == 8
3903 else if (fsig->params [0]->type == MONO_TYPE_I8)
3904 opcode = OP_ATOMIC_ADD_NEW_I8;
3907 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3908 ins_iconst->inst_c0 = 1;
3909 ins_iconst->dreg = mono_alloc_ireg (cfg);
3910 MONO_ADD_INS (cfg->cbb, ins_iconst);
3912 MONO_INST_NEW (cfg, ins, opcode);
3913 ins->dreg = mono_alloc_ireg (cfg);
3914 ins->inst_basereg = args [0]->dreg;
3915 ins->inst_offset = 0;
3916 ins->sreg2 = ins_iconst->dreg;
3917 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3918 MONO_ADD_INS (cfg->cbb, ins);
3920 } else if (strcmp (cmethod->name, "Decrement") == 0) {
3921 MonoInst *ins_iconst;
3924 if (fsig->params [0]->type == MONO_TYPE_I4)
3925 opcode = OP_ATOMIC_ADD_NEW_I4;
3926 #if SIZEOF_REGISTER == 8
3927 else if (fsig->params [0]->type == MONO_TYPE_I8)
3928 opcode = OP_ATOMIC_ADD_NEW_I8;
3931 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3932 ins_iconst->inst_c0 = -1;
3933 ins_iconst->dreg = mono_alloc_ireg (cfg);
3934 MONO_ADD_INS (cfg->cbb, ins_iconst);
3936 MONO_INST_NEW (cfg, ins, opcode);
3937 ins->dreg = mono_alloc_ireg (cfg);
3938 ins->inst_basereg = args [0]->dreg;
3939 ins->inst_offset = 0;
3940 ins->sreg2 = ins_iconst->dreg;
3941 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3942 MONO_ADD_INS (cfg->cbb, ins);
3944 } else if (strcmp (cmethod->name, "Add") == 0) {
3947 if (fsig->params [0]->type == MONO_TYPE_I4)
3948 opcode = OP_ATOMIC_ADD_NEW_I4;
3949 #if SIZEOF_REGISTER == 8
3950 else if (fsig->params [0]->type == MONO_TYPE_I8)
3951 opcode = OP_ATOMIC_ADD_NEW_I8;
3955 MONO_INST_NEW (cfg, ins, opcode);
3956 ins->dreg = mono_alloc_ireg (cfg);
3957 ins->inst_basereg = args [0]->dreg;
3958 ins->inst_offset = 0;
3959 ins->sreg2 = args [1]->dreg;
3960 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3961 MONO_ADD_INS (cfg->cbb, ins);
3964 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
3966 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
3967 if (strcmp (cmethod->name, "Exchange") == 0) {
3969 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
3971 if (fsig->params [0]->type == MONO_TYPE_I4)
3972 opcode = OP_ATOMIC_EXCHANGE_I4;
3973 #if SIZEOF_REGISTER == 8
3974 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
3975 (fsig->params [0]->type == MONO_TYPE_I))
3976 opcode = OP_ATOMIC_EXCHANGE_I8;
3978 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
3979 opcode = OP_ATOMIC_EXCHANGE_I4;
3984 MONO_INST_NEW (cfg, ins, opcode);
3985 ins->dreg = mono_alloc_ireg (cfg);
3986 ins->inst_basereg = args [0]->dreg;
3987 ins->inst_offset = 0;
3988 ins->sreg2 = args [1]->dreg;
3989 MONO_ADD_INS (cfg->cbb, ins);
3991 switch (fsig->params [0]->type) {
3993 ins->type = STACK_I4;
3997 ins->type = STACK_I8;
3999 case MONO_TYPE_OBJECT:
4000 ins->type = STACK_OBJ;
4003 g_assert_not_reached ();
4006 #if HAVE_WRITE_BARRIERS
4008 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4009 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4013 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4015 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4016 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4018 gboolean is_ref = MONO_TYPE_IS_REFERENCE (fsig->params [1]);
4019 if (fsig->params [1]->type == MONO_TYPE_I4)
4021 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4022 size = sizeof (gpointer);
4023 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I4)
4026 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4027 ins->dreg = alloc_ireg (cfg);
4028 ins->sreg1 = args [0]->dreg;
4029 ins->sreg2 = args [1]->dreg;
4030 ins->sreg3 = args [2]->dreg;
4031 ins->type = STACK_I4;
4032 MONO_ADD_INS (cfg->cbb, ins);
4033 } else if (size == 8) {
4034 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4035 ins->dreg = alloc_ireg (cfg);
4036 ins->sreg1 = args [0]->dreg;
4037 ins->sreg2 = args [1]->dreg;
4038 ins->sreg3 = args [2]->dreg;
4039 ins->type = STACK_I8;
4040 MONO_ADD_INS (cfg->cbb, ins);
4042 /* g_assert_not_reached (); */
4044 #if HAVE_WRITE_BARRIERS
4046 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4047 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4051 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4055 } else if (cmethod->klass->image == mono_defaults.corlib) {
4056 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4057 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4058 MONO_INST_NEW (cfg, ins, OP_BREAK);
4059 MONO_ADD_INS (cfg->cbb, ins);
4062 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4063 && strcmp (cmethod->klass->name, "Environment") == 0) {
4064 #ifdef PLATFORM_WIN32
4065 EMIT_NEW_ICONST (cfg, ins, 1);
4067 EMIT_NEW_ICONST (cfg, ins, 0);
4071 } else if (cmethod->klass == mono_defaults.math_class) {
4073 * There is general branches code for Min/Max, but it does not work for
4075 * http://everything2.com/?node_id=1051618
4079 #ifdef MONO_ARCH_SIMD_INTRINSICS
4080 if (cfg->opt & MONO_OPT_SIMD) {
4081 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4087 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4091 * This entry point could be used later for arbitrary method
4094 inline static MonoInst*
4095 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4096 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4098 if (method->klass == mono_defaults.string_class) {
4099 /* managed string allocation support */
4100 if (strcmp (method->name, "InternalAllocateStr") == 0) {
4101 MonoInst *iargs [2];
4102 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4103 #ifdef MONO_CROSS_COMPILE
4104 MonoMethod *managed_alloc = NULL;
4106 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4110 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4111 iargs [1] = args [0];
4112 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4119 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4121 MonoInst *store, *temp;
4124 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4125 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4128 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4129 * would be different than the MonoInst's used to represent arguments, and
4130 * the ldelema implementation can't deal with that.
4131 * Solution: When ldelema is used on an inline argument, create a var for
4132 * it, emit ldelema on that var, and emit the saving code below in
4133 * inline_method () if needed.
4135 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4136 cfg->args [i] = temp;
4137 /* This uses cfg->args [i] which is set by the preceeding line */
4138 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4139 store->cil_code = sp [0]->cil_code;
4144 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4145 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4147 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4149 check_inline_called_method_name_limit (MonoMethod *called_method)
4152 static char *limit = NULL;
4154 if (limit == NULL) {
4155 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4157 if (limit_string != NULL)
4158 limit = limit_string;
4160 limit = (char *) "";
4163 if (limit [0] != '\0') {
4164 char *called_method_name = mono_method_full_name (called_method, TRUE);
4166 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4167 g_free (called_method_name);
4169 //return (strncmp_result <= 0);
4170 return (strncmp_result == 0);
4177 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4179 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4182 static char *limit = NULL;
4184 if (limit == NULL) {
4185 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4186 if (limit_string != NULL) {
4187 limit = limit_string;
4189 limit = (char *) "";
4193 if (limit [0] != '\0') {
4194 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4196 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4197 g_free (caller_method_name);
4199 //return (strncmp_result <= 0);
4200 return (strncmp_result == 0);
4208 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4209 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4211 MonoInst *ins, *rvar = NULL;
4212 MonoMethodHeader *cheader;
4213 MonoBasicBlock *ebblock, *sbblock;
4215 MonoMethod *prev_inlined_method;
4216 MonoInst **prev_locals, **prev_args;
4217 MonoType **prev_arg_types;
4218 guint prev_real_offset;
4219 GHashTable *prev_cbb_hash;
4220 MonoBasicBlock **prev_cil_offset_to_bb;
4221 MonoBasicBlock *prev_cbb;
4222 unsigned char* prev_cil_start;
4223 guint32 prev_cil_offset_to_bb_len;
4224 MonoMethod *prev_current_method;
4225 MonoGenericContext *prev_generic_context;
4226 gboolean ret_var_set, prev_ret_var_set;
4228 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4230 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4231 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4234 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4235 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4239 if (cfg->verbose_level > 2)
4240 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4242 if (!cmethod->inline_info) {
4243 mono_jit_stats.inlineable_methods++;
4244 cmethod->inline_info = 1;
4246 /* allocate space to store the return value */
4247 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4248 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4251 /* allocate local variables */
4252 cheader = mono_method_get_header (cmethod);
4253 prev_locals = cfg->locals;
4254 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4255 for (i = 0; i < cheader->num_locals; ++i)
4256 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4258 /* allocate start and end blocks */
4259 /* This is needed so if the inline is aborted, we can clean up */
4260 NEW_BBLOCK (cfg, sbblock);
4261 sbblock->real_offset = real_offset;
4263 NEW_BBLOCK (cfg, ebblock);
4264 ebblock->block_num = cfg->num_bblocks++;
4265 ebblock->real_offset = real_offset;
4267 prev_args = cfg->args;
4268 prev_arg_types = cfg->arg_types;
4269 prev_inlined_method = cfg->inlined_method;
4270 cfg->inlined_method = cmethod;
4271 cfg->ret_var_set = FALSE;
4272 prev_real_offset = cfg->real_offset;
4273 prev_cbb_hash = cfg->cbb_hash;
4274 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4275 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4276 prev_cil_start = cfg->cil_start;
4277 prev_cbb = cfg->cbb;
4278 prev_current_method = cfg->current_method;
4279 prev_generic_context = cfg->generic_context;
4280 prev_ret_var_set = cfg->ret_var_set;
4282 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4284 ret_var_set = cfg->ret_var_set;
4286 cfg->inlined_method = prev_inlined_method;
4287 cfg->real_offset = prev_real_offset;
4288 cfg->cbb_hash = prev_cbb_hash;
4289 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4290 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4291 cfg->cil_start = prev_cil_start;
4292 cfg->locals = prev_locals;
4293 cfg->args = prev_args;
4294 cfg->arg_types = prev_arg_types;
4295 cfg->current_method = prev_current_method;
4296 cfg->generic_context = prev_generic_context;
4297 cfg->ret_var_set = prev_ret_var_set;
4299 if ((costs >= 0 && costs < 60) || inline_allways) {
4300 if (cfg->verbose_level > 2)
4301 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4303 mono_jit_stats.inlined_methods++;
4305 /* always add some code to avoid block split failures */
4306 MONO_INST_NEW (cfg, ins, OP_NOP);
4307 MONO_ADD_INS (prev_cbb, ins);
4309 prev_cbb->next_bb = sbblock;
4310 link_bblock (cfg, prev_cbb, sbblock);
4313 * Get rid of the begin and end bblocks if possible to aid local
4316 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4318 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4319 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4321 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4322 MonoBasicBlock *prev = ebblock->in_bb [0];
4323 mono_merge_basic_blocks (cfg, prev, ebblock);
4325 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4326 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4327 cfg->cbb = prev_cbb;
4335 * If the inlined method contains only a throw, then the ret var is not
4336 * set, so set it to a dummy value.
4339 static double r8_0 = 0.0;
4341 switch (rvar->type) {
4343 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4346 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4351 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4354 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4355 ins->type = STACK_R8;
4356 ins->inst_p0 = (void*)&r8_0;
4357 ins->dreg = rvar->dreg;
4358 MONO_ADD_INS (cfg->cbb, ins);
4361 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4364 g_assert_not_reached ();
4368 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4373 if (cfg->verbose_level > 2)
4374 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4375 cfg->exception_type = MONO_EXCEPTION_NONE;
4376 mono_loader_clear_error ();
4378 /* This gets rid of the newly added bblocks */
4379 cfg->cbb = prev_cbb;
4385 * Some of these comments may well be out-of-date.
4386 * Design decisions: we do a single pass over the IL code (and we do bblock
4387 * splitting/merging in the few cases when it's required: a back jump to an IL
4388 * address that was not already seen as bblock starting point).
4389 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4390 * Complex operations are decomposed in simpler ones right away. We need to let the
4391 * arch-specific code peek and poke inside this process somehow (except when the
4392 * optimizations can take advantage of the full semantic info of coarse opcodes).
4393 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4394 * MonoInst->opcode initially is the IL opcode or some simplification of that
4395 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4396 * opcode with value bigger than OP_LAST.
4397 * At this point the IR can be handed over to an interpreter, a dumb code generator
4398 * or to the optimizing code generator that will translate it to SSA form.
4400 * Profiling directed optimizations.
4401 * We may compile by default with few or no optimizations and instrument the code
4402 * or the user may indicate what methods to optimize the most either in a config file
4403 * or through repeated runs where the compiler applies offline the optimizations to
4404 * each method and then decides if it was worth it.
4407 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4408 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4409 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4410 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4411 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4412 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4413 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4414 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4416 /* offset from br.s -> br like opcodes */
4417 #define BIG_BRANCH_OFFSET 13
4420 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4422 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4424 return b == NULL || b == bb;
4428 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4430 unsigned char *ip = start;
4431 unsigned char *target;
4434 MonoBasicBlock *bblock;
4435 const MonoOpcode *opcode;
4438 cli_addr = ip - start;
4439 i = mono_opcode_value ((const guint8 **)&ip, end);
4442 opcode = &mono_opcodes [i];
4443 switch (opcode->argument) {
4444 case MonoInlineNone:
4447 case MonoInlineString:
4448 case MonoInlineType:
4449 case MonoInlineField:
4450 case MonoInlineMethod:
4453 case MonoShortInlineR:
4460 case MonoShortInlineVar:
4461 case MonoShortInlineI:
4464 case MonoShortInlineBrTarget:
4465 target = start + cli_addr + 2 + (signed char)ip [1];
4466 GET_BBLOCK (cfg, bblock, target);
4469 GET_BBLOCK (cfg, bblock, ip);
4471 case MonoInlineBrTarget:
4472 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4473 GET_BBLOCK (cfg, bblock, target);
4476 GET_BBLOCK (cfg, bblock, ip);
4478 case MonoInlineSwitch: {
4479 guint32 n = read32 (ip + 1);
4482 cli_addr += 5 + 4 * n;
4483 target = start + cli_addr;
4484 GET_BBLOCK (cfg, bblock, target);
4486 for (j = 0; j < n; ++j) {
4487 target = start + cli_addr + (gint32)read32 (ip);
4488 GET_BBLOCK (cfg, bblock, target);
4498 g_assert_not_reached ();
4501 if (i == CEE_THROW) {
4502 unsigned char *bb_start = ip - 1;
4504 /* Find the start of the bblock containing the throw */
4506 while ((bb_start >= start) && !bblock) {
4507 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4511 bblock->out_of_line = 1;
4520 static inline MonoMethod *
4521 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4525 if (m->wrapper_type != MONO_WRAPPER_NONE)
4526 return mono_method_get_wrapper_data (m, token);
4528 method = mono_get_method_full (m->klass->image, token, klass, context);
4533 static inline MonoMethod *
4534 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4536 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4538 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4544 static inline MonoClass*
4545 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4549 if (method->wrapper_type != MONO_WRAPPER_NONE)
4550 klass = mono_method_get_wrapper_data (method, token);
4552 klass = mono_class_get_full (method->klass->image, token, context);
4554 mono_class_init (klass);
4559 * Returns TRUE if the JIT should abort inlining because "callee"
4560 * is influenced by security attributes.
4563 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4567 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
4571 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
4572 if (result == MONO_JIT_SECURITY_OK)
4575 if (result == MONO_JIT_LINKDEMAND_ECMA) {
4576 /* Generate code to throw a SecurityException before the actual call/link */
4577 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4580 NEW_ICONST (cfg, args [0], 4);
4581 NEW_METHODCONST (cfg, args [1], caller);
4582 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
4583 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
4584 /* don't hide previous results */
4585 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
4586 cfg->exception_data = result;
4594 method_access_exception (void)
4596 static MonoMethod *method = NULL;
4599 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4600 method = mono_class_get_method_from_name (secman->securitymanager,
4601 "MethodAccessException", 2);
4608 emit_throw_method_access_exception (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4609 MonoBasicBlock *bblock, unsigned char *ip)
4611 MonoMethod *thrower = method_access_exception ();
4614 EMIT_NEW_METHODCONST (cfg, args [0], caller);
4615 EMIT_NEW_METHODCONST (cfg, args [1], callee);
4616 mono_emit_method_call (cfg, thrower, args, NULL);
4620 field_access_exception (void)
4622 static MonoMethod *method = NULL;
4625 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4626 method = mono_class_get_method_from_name (secman->securitymanager,
4627 "FieldAccessException", 2);
4634 emit_throw_field_access_exception (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
4635 MonoBasicBlock *bblock, unsigned char *ip)
4637 MonoMethod *thrower = field_access_exception ();
4640 EMIT_NEW_METHODCONST (cfg, args [0], caller);
4641 EMIT_NEW_METHODCONST (cfg, args [1], field);
4642 mono_emit_method_call (cfg, thrower, args, NULL);
4646 * Return the original method is a wrapper is specified. We can only access
4647 * the custom attributes from the original method.
4650 get_original_method (MonoMethod *method)
4652 if (method->wrapper_type == MONO_WRAPPER_NONE)
4655 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
4656 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
4659 /* in other cases we need to find the original method */
4660 return mono_marshal_method_from_wrapper (method);
4664 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
4665 MonoBasicBlock *bblock, unsigned char *ip)
4667 /* there's no restriction to access Transparent or SafeCritical fields, so we only check calls to Critical methods */
4668 if (mono_security_core_clr_class_level (mono_field_get_parent (field)) != MONO_SECURITY_CORE_CLR_CRITICAL)
4671 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4672 caller = get_original_method (caller);
4676 /* caller is Critical! only SafeCritical and Critical callers can access the field, so we throw if caller is Transparent */
4677 if (mono_security_core_clr_method_level (caller, TRUE) == MONO_SECURITY_CORE_CLR_TRANSPARENT)
4678 emit_throw_field_access_exception (cfg, caller, field, bblock, ip);
4682 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4683 MonoBasicBlock *bblock, unsigned char *ip)
4685 /* there's no restriction to call Transparent or SafeCritical code, so we only check calls to Critical methods */
4686 if (mono_security_core_clr_method_level (callee, TRUE) != MONO_SECURITY_CORE_CLR_CRITICAL)
4689 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4690 caller = get_original_method (caller);
4694 /* caller is Critical! only SafeCritical and Critical callers can call it, so we throw if the caller is Transparent */
4695 if (mono_security_core_clr_method_level (caller, TRUE) == MONO_SECURITY_CORE_CLR_TRANSPARENT)
4696 emit_throw_method_access_exception (cfg, caller, callee, bblock, ip);
4700 * Check that the IL instructions at ip are the array initialization
4701 * sequence and return the pointer to the data and the size.
4704 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
4707 * newarr[System.Int32]
4709 * ldtoken field valuetype ...
4710 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
4712 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
4713 guint32 token = read32 (ip + 7);
4714 guint32 field_token = read32 (ip + 2);
4715 guint32 field_index = field_token & 0xffffff;
4717 const char *data_ptr;
4719 MonoMethod *cmethod;
4720 MonoClass *dummy_class;
4721 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
4727 *out_field_token = field_token;
4729 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
4732 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
4734 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
4735 case MONO_TYPE_BOOLEAN:
4739 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
4740 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
4741 case MONO_TYPE_CHAR:
4751 return NULL; /* stupid ARM FP swapped format */
4761 if (size > mono_type_size (field->type, &dummy_align))
4764 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
4765 if (!method->klass->image->dynamic) {
4766 field_index = read32 (ip + 2) & 0xffffff;
4767 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
4768 data_ptr = mono_image_rva_map (method->klass->image, rva);
4769 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
4770 /* for aot code we do the lookup on load */
4771 if (aot && data_ptr)
4772 return GUINT_TO_POINTER (rva);
4774 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
4776 data_ptr = mono_field_get_data (field);
4784 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
4786 char *method_fname = mono_method_full_name (method, TRUE);
4789 if (mono_method_get_header (method)->code_size == 0)
4790 method_code = g_strdup ("method body is empty.");
4792 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
4793 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
4794 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
4795 g_free (method_fname);
4796 g_free (method_code);
4800 set_exception_object (MonoCompile *cfg, MonoException *exception)
4802 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
4803 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
4804 cfg->exception_ptr = exception;
4808 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4812 if (cfg->generic_sharing_context)
4813 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
4815 type = &klass->byval_arg;
4816 return MONO_TYPE_IS_REFERENCE (type);
4820 * mono_decompose_array_access_opts:
4822 * Decompose array access opcodes.
4823 * This should be in decompose.c, but it emits calls so it has to stay here until
4824 * the old JIT is gone.
4827 mono_decompose_array_access_opts (MonoCompile *cfg)
4829 MonoBasicBlock *bb, *first_bb;
4832 * Unlike decompose_long_opts, this pass does not alter the CFG of the method so it
4833 * can be executed anytime. It should be run before decompose_long
4837 * Create a dummy bblock and emit code into it so we can use the normal
4838 * code generation macros.
4840 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4841 first_bb = cfg->cbb;
4843 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4845 MonoInst *prev = NULL;
4847 MonoInst *iargs [3];
4850 if (!bb->has_array_access)
4853 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE DECOMPOSE-ARRAY-ACCESS-OPTS ");
4855 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4861 for (ins = bb->code; ins; ins = ins->next) {
4862 switch (ins->opcode) {
4864 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg, ins->sreg1,
4865 G_STRUCT_OFFSET (MonoArray, max_length));
4866 MONO_ADD_INS (cfg->cbb, dest);
4868 case OP_BOUNDS_CHECK:
4869 MONO_ARCH_EMIT_BOUNDS_CHECK (cfg, ins->sreg1, ins->inst_imm, ins->sreg2);
4872 if (cfg->opt & MONO_OPT_SHARED) {
4873 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4874 EMIT_NEW_CLASSCONST (cfg, iargs [1], ins->inst_newa_class);
4875 MONO_INST_NEW (cfg, iargs [2], OP_MOVE);
4876 iargs [2]->dreg = ins->sreg1;
4878 dest = mono_emit_jit_icall (cfg, mono_array_new, iargs);
4879 dest->dreg = ins->dreg;
4881 MonoVTable *vtable = mono_class_vtable (cfg->domain, mono_array_class_get (ins->inst_newa_class, 1));
4884 NEW_VTABLECONST (cfg, iargs [0], vtable);
4885 MONO_ADD_INS (cfg->cbb, iargs [0]);
4886 MONO_INST_NEW (cfg, iargs [1], OP_MOVE);
4887 iargs [1]->dreg = ins->sreg1;
4889 dest = mono_emit_jit_icall (cfg, mono_array_new_specific, iargs);
4890 dest->dreg = ins->dreg;
4894 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg,
4895 ins->sreg1, G_STRUCT_OFFSET (MonoString, length));
4896 MONO_ADD_INS (cfg->cbb, dest);
4902 g_assert (cfg->cbb == first_bb);
4904 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
4905 /* Replace the original instruction with the new code sequence */
4907 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
4908 first_bb->code = first_bb->last_ins = NULL;
4909 first_bb->in_count = first_bb->out_count = 0;
4910 cfg->cbb = first_bb;
4917 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER DECOMPOSE-ARRAY-ACCESS-OPTS ");
4927 #ifdef MONO_ARCH_SOFT_FLOAT
4930 * mono_decompose_soft_float:
4932 * Soft float support on ARM. We store each double value in a pair of integer vregs,
4933 * similar to long support on 32 bit platforms. 32 bit float values require special
4934 * handling when used as locals, arguments, and in calls.
4935 * One big problem with soft-float is that there are few r4 test cases in our test suite.
4938 mono_decompose_soft_float (MonoCompile *cfg)
4940 MonoBasicBlock *bb, *first_bb;
4943 * This pass creates long opcodes, so it should be run before decompose_long_opts ().
4947 * Create a dummy bblock and emit code into it so we can use the normal
4948 * code generation macros.
4950 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4951 first_bb = cfg->cbb;
4953 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4955 MonoInst *prev = NULL;
4958 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE HANDLE-SOFT-FLOAT ");
4960 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4966 for (ins = bb->code; ins; ins = ins->next) {
4967 const char *spec = INS_INFO (ins->opcode);
4969 /* Most fp operations are handled automatically by opcode emulation */
4971 switch (ins->opcode) {
4974 d.vald = *(double*)ins->inst_p0;
4975 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
4980 /* We load the r8 value */
4981 d.vald = *(float*)ins->inst_p0;
4982 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
4986 ins->opcode = OP_LMOVE;
4989 ins->opcode = OP_MOVE;
4990 ins->sreg1 = ins->sreg1 + 1;
4993 ins->opcode = OP_MOVE;
4994 ins->sreg1 = ins->sreg1 + 2;
4997 int reg = ins->sreg1;
4999 ins->opcode = OP_SETLRET;
5001 ins->sreg1 = reg + 1;
5002 ins->sreg2 = reg + 2;
5005 case OP_LOADR8_MEMBASE:
5006 ins->opcode = OP_LOADI8_MEMBASE;
5008 case OP_STORER8_MEMBASE_REG:
5009 ins->opcode = OP_STOREI8_MEMBASE_REG;
5011 case OP_STORER4_MEMBASE_REG: {
5012 MonoInst *iargs [2];
5015 /* Arg 1 is the double value */
5016 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
5017 iargs [0]->dreg = ins->sreg1;
5019 /* Arg 2 is the address to store to */
5020 addr_reg = mono_alloc_preg (cfg);
5021 EMIT_NEW_BIALU_IMM (cfg, iargs [1], OP_PADD_IMM, addr_reg, ins->inst_destbasereg, ins->inst_offset);
5022 mono_emit_jit_icall (cfg, mono_fstore_r4, iargs);
5026 case OP_LOADR4_MEMBASE: {
5027 MonoInst *iargs [1];
5031 addr_reg = mono_alloc_preg (cfg);
5032 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, addr_reg, ins->inst_basereg, ins->inst_offset);
5033 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
5034 conv->dreg = ins->dreg;
5039 case OP_FCALL_MEMBASE: {
5040 MonoCallInst *call = (MonoCallInst*)ins;
5041 if (call->signature->ret->type == MONO_TYPE_R4) {
5042 MonoCallInst *call2;
5043 MonoInst *iargs [1];
5046 /* Convert the call into a call returning an int */
5047 MONO_INST_NEW_CALL (cfg, call2, OP_CALL);
5048 memcpy (call2, call, sizeof (MonoCallInst));
5049 switch (ins->opcode) {
5051 call2->inst.opcode = OP_CALL;
5054 call2->inst.opcode = OP_CALL_REG;
5056 case OP_FCALL_MEMBASE:
5057 call2->inst.opcode = OP_CALL_MEMBASE;
5060 g_assert_not_reached ();
5062 call2->inst.dreg = mono_alloc_ireg (cfg);
5063 MONO_ADD_INS (cfg->cbb, (MonoInst*)call2);
5065 /* FIXME: Optimize this */
5067 /* Emit an r4->r8 conversion */
5068 EMIT_NEW_VARLOADA_VREG (cfg, iargs [0], call2->inst.dreg, &mono_defaults.int32_class->byval_arg);
5069 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
5070 conv->dreg = ins->dreg;
5072 switch (ins->opcode) {
5074 ins->opcode = OP_LCALL;
5077 ins->opcode = OP_LCALL_REG;
5079 case OP_FCALL_MEMBASE:
5080 ins->opcode = OP_LCALL_MEMBASE;
5083 g_assert_not_reached ();
5089 MonoJitICallInfo *info;
5090 MonoInst *iargs [2];
5091 MonoInst *call, *cmp, *br;
5093 /* Convert fcompare+fbcc to icall+icompare+beq */
5095 info = mono_find_jit_opcode_emulation (ins->next->opcode);
5098 /* Create dummy MonoInst's for the arguments */
5099 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
5100 iargs [0]->dreg = ins->sreg1;
5101 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
5102 iargs [1]->dreg = ins->sreg2;
5104 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
5106 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE_IMM);
5107 cmp->sreg1 = call->dreg;
5109 MONO_ADD_INS (cfg->cbb, cmp);
5111 MONO_INST_NEW (cfg, br, OP_IBNE_UN);
5112 br->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * 2);
5113 br->inst_true_bb = ins->next->inst_true_bb;
5114 br->inst_false_bb = ins->next->inst_false_bb;
5115 MONO_ADD_INS (cfg->cbb, br);
5117 /* The call sequence might include fp ins */
5120 /* Skip fbcc or fccc */
5121 NULLIFY_INS (ins->next);
5129 MonoJitICallInfo *info;
5130 MonoInst *iargs [2];
5133 /* Convert fccc to icall+icompare+iceq */
5135 info = mono_find_jit_opcode_emulation (ins->opcode);
5138 /* Create dummy MonoInst's for the arguments */
5139 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
5140 iargs [0]->dreg = ins->sreg1;
5141 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
5142 iargs [1]->dreg = ins->sreg2;
5144 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
5146 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, call->dreg, 1);
5147 MONO_EMIT_NEW_UNALU (cfg, OP_ICEQ, ins->dreg, -1);
5149 /* The call sequence might include fp ins */
5154 MonoInst *iargs [2];
5155 MonoInst *call, *cmp;
5157 /* Convert to icall+icompare+cond_exc+move */
5159 /* Create dummy MonoInst's for the arguments */
5160 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
5161 iargs [0]->dreg = ins->sreg1;
5163 call = mono_emit_jit_icall (cfg, mono_isfinite, iargs);
5165 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE_IMM);
5166 cmp->sreg1 = call->dreg;
5168 MONO_ADD_INS (cfg->cbb, cmp);
5170 MONO_EMIT_NEW_COND_EXC (cfg, INE_UN, "ArithmeticException");
5172 /* Do the assignment if the value is finite */
5173 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, ins->dreg, ins->sreg1);
5179 if (spec [MONO_INST_SRC1] == 'f' || spec [MONO_INST_SRC2] == 'f' || spec [MONO_INST_DEST] == 'f') {
5180 mono_print_ins (ins);
5181 g_assert_not_reached ();
5186 g_assert (cfg->cbb == first_bb);
5188 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
5189 /* Replace the original instruction with the new code sequence */
5191 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
5192 first_bb->code = first_bb->last_ins = NULL;
5193 first_bb->in_count = first_bb->out_count = 0;
5194 cfg->cbb = first_bb;
5201 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER HANDLE-SOFT-FLOAT ");
5204 mono_decompose_long_opts (cfg);
5210 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5213 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5214 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5215 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5216 /* Optimize reg-reg moves away */
5218 * Can't optimize other opcodes, since sp[0] might point to
5219 * the last ins of a decomposed opcode.
5221 sp [0]->dreg = (cfg)->locals [n]->dreg;
5223 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5228 * ldloca inhibits many optimizations so try to get rid of it in common
5231 static inline unsigned char *
5232 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5241 local = read16 (ip + 2);
5245 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5246 gboolean skip = FALSE;
5248 /* From the INITOBJ case */
5249 token = read32 (ip + 2);
5250 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5251 CHECK_TYPELOAD (klass);
5252 if (generic_class_is_reference_type (cfg, klass)) {
5253 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5254 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5255 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5256 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5257 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5270 is_exception_class (MonoClass *class)
5273 if (class == mono_defaults.exception_class)
5275 class = class->parent;
5281 * mono_method_to_ir:
5283 * Translate the .net IL into linear IR.
5286 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5287 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5288 guint inline_offset, gboolean is_virtual_call)
5290 MonoInst *ins, **sp, **stack_start;
5291 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5292 MonoMethod *cmethod, *method_definition;
5293 MonoInst **arg_array;
5294 MonoMethodHeader *header;
5296 guint32 token, ins_flag;
5298 MonoClass *constrained_call = NULL;
5299 unsigned char *ip, *end, *target, *err_pos;
5300 static double r8_0 = 0.0;
5301 MonoMethodSignature *sig;
5302 MonoGenericContext *generic_context = NULL;
5303 MonoGenericContainer *generic_container = NULL;
5304 MonoType **param_types;
5305 int i, n, start_new_bblock, dreg;
5306 int num_calls = 0, inline_costs = 0;
5307 int breakpoint_id = 0;
5309 MonoBoolean security, pinvoke;
5310 MonoSecurityManager* secman = NULL;
5311 MonoDeclSecurityActions actions;
5312 GSList *class_inits = NULL;
5313 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5315 gboolean init_locals;
5317 /* serialization and xdomain stuff may need access to private fields and methods */
5318 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5319 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5320 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5321 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5322 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5323 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5325 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5327 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5328 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5329 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5330 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5332 image = method->klass->image;
5333 header = mono_method_get_header (method);
5334 generic_container = mono_method_get_generic_container (method);
5335 sig = mono_method_signature (method);
5336 num_args = sig->hasthis + sig->param_count;
5337 ip = (unsigned char*)header->code;
5338 cfg->cil_start = ip;
5339 end = ip + header->code_size;
5340 mono_jit_stats.cil_code_size += header->code_size;
5341 init_locals = header->init_locals;
5344 * Methods without init_locals set could cause asserts in various passes
5349 method_definition = method;
5350 while (method_definition->is_inflated) {
5351 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5352 method_definition = imethod->declaring;
5355 /* SkipVerification is not allowed if core-clr is enabled */
5356 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5358 dont_verify_stloc = TRUE;
5361 if (!dont_verify && mini_method_verify (cfg, method_definition))
5362 goto exception_exit;
5364 if (mono_debug_using_mono_debugger ())
5365 cfg->keep_cil_nops = TRUE;
5367 if (sig->is_inflated)
5368 generic_context = mono_method_get_context (method);
5369 else if (generic_container)
5370 generic_context = &generic_container->context;
5371 cfg->generic_context = generic_context;
5373 if (!cfg->generic_sharing_context)
5374 g_assert (!sig->has_type_parameters);
5376 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5377 g_assert (method->is_inflated);
5378 g_assert (mono_method_get_context (method)->method_inst);
5380 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5381 g_assert (sig->generic_param_count);
5383 if (cfg->method == method) {
5384 cfg->real_offset = 0;
5386 cfg->real_offset = inline_offset;
5389 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5390 cfg->cil_offset_to_bb_len = header->code_size;
5392 cfg->current_method = method;
5394 if (cfg->verbose_level > 2)
5395 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5397 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5399 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5400 for (n = 0; n < sig->param_count; ++n)
5401 param_types [n + sig->hasthis] = sig->params [n];
5402 cfg->arg_types = param_types;
5404 dont_inline = g_list_prepend (dont_inline, method);
5405 if (cfg->method == method) {
5407 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5408 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5411 NEW_BBLOCK (cfg, start_bblock);
5412 cfg->bb_entry = start_bblock;
5413 start_bblock->cil_code = NULL;
5414 start_bblock->cil_length = 0;
5417 NEW_BBLOCK (cfg, end_bblock);
5418 cfg->bb_exit = end_bblock;
5419 end_bblock->cil_code = NULL;
5420 end_bblock->cil_length = 0;
5421 g_assert (cfg->num_bblocks == 2);
5423 arg_array = cfg->args;
5425 if (header->num_clauses) {
5426 cfg->spvars = g_hash_table_new (NULL, NULL);
5427 cfg->exvars = g_hash_table_new (NULL, NULL);
5429 /* handle exception clauses */
5430 for (i = 0; i < header->num_clauses; ++i) {
5431 MonoBasicBlock *try_bb;
5432 MonoExceptionClause *clause = &header->clauses [i];
5433 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5434 try_bb->real_offset = clause->try_offset;
5435 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5436 tblock->real_offset = clause->handler_offset;
5437 tblock->flags |= BB_EXCEPTION_HANDLER;
5439 link_bblock (cfg, try_bb, tblock);
5441 if (*(ip + clause->handler_offset) == CEE_POP)
5442 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5444 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5445 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5446 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5447 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5448 MONO_ADD_INS (tblock, ins);
5450 /* todo: is a fault block unsafe to optimize? */
5451 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5452 tblock->flags |= BB_EXCEPTION_UNSAFE;
5456 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5458 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5460 /* catch and filter blocks get the exception object on the stack */
5461 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5462 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5463 MonoInst *dummy_use;
5465 /* mostly like handle_stack_args (), but just sets the input args */
5466 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5467 tblock->in_scount = 1;
5468 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5469 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5472 * Add a dummy use for the exvar so its liveness info will be
5476 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5478 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5479 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5480 tblock->flags |= BB_EXCEPTION_HANDLER;
5481 tblock->real_offset = clause->data.filter_offset;
5482 tblock->in_scount = 1;
5483 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5484 /* The filter block shares the exvar with the handler block */
5485 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5486 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5487 MONO_ADD_INS (tblock, ins);
5491 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5492 clause->data.catch_class &&
5493 cfg->generic_sharing_context &&
5494 mono_class_check_context_used (clause->data.catch_class)) {
5496 * In shared generic code with catch
5497 * clauses containing type variables
5498 * the exception handling code has to
5499 * be able to get to the rgctx.
5500 * Therefore we have to make sure that
5501 * the vtable/mrgctx argument (for
5502 * static or generic methods) or the
5503 * "this" argument (for non-static
5504 * methods) are live.
5506 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5507 mini_method_get_context (method)->method_inst ||
5508 method->klass->valuetype) {
5509 mono_get_vtable_var (cfg);
5511 MonoInst *dummy_use;
5513 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5518 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5519 cfg->cbb = start_bblock;
5520 cfg->args = arg_array;
5521 mono_save_args (cfg, sig, inline_args);
5524 /* FIRST CODE BLOCK */
5525 NEW_BBLOCK (cfg, bblock);
5526 bblock->cil_code = ip;
5530 ADD_BBLOCK (cfg, bblock);
5532 if (cfg->method == method) {
5533 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5534 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5535 MONO_INST_NEW (cfg, ins, OP_BREAK);
5536 MONO_ADD_INS (bblock, ins);
5540 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5541 secman = mono_security_manager_get_methods ();
5543 security = (secman && mono_method_has_declsec (method));
5544 /* at this point having security doesn't mean we have any code to generate */
5545 if (security && (cfg->method == method)) {
5546 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5547 * And we do not want to enter the next section (with allocation) if we
5548 * have nothing to generate */
5549 security = mono_declsec_get_demands (method, &actions);
5552 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5553 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5555 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5556 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5557 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5559 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5560 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5564 mono_custom_attrs_free (custom);
5567 custom = mono_custom_attrs_from_class (wrapped->klass);
5568 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5572 mono_custom_attrs_free (custom);
5575 /* not a P/Invoke after all */
5580 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5581 /* we use a separate basic block for the initialization code */
5582 NEW_BBLOCK (cfg, init_localsbb);
5583 cfg->bb_init = init_localsbb;
5584 init_localsbb->real_offset = cfg->real_offset;
5585 start_bblock->next_bb = init_localsbb;
5586 init_localsbb->next_bb = bblock;
5587 link_bblock (cfg, start_bblock, init_localsbb);
5588 link_bblock (cfg, init_localsbb, bblock);
5590 cfg->cbb = init_localsbb;
5592 start_bblock->next_bb = bblock;
5593 link_bblock (cfg, start_bblock, bblock);
5596 /* at this point we know, if security is TRUE, that some code needs to be generated */
5597 if (security && (cfg->method == method)) {
5600 mono_jit_stats.cas_demand_generation++;
5602 if (actions.demand.blob) {
5603 /* Add code for SecurityAction.Demand */
5604 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5605 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5606 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5607 mono_emit_method_call (cfg, secman->demand, args, NULL);
5609 if (actions.noncasdemand.blob) {
5610 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5611 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5612 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5613 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5614 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5615 mono_emit_method_call (cfg, secman->demand, args, NULL);
5617 if (actions.demandchoice.blob) {
5618 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5619 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5620 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5621 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5622 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5626 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5628 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5631 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5632 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5633 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5634 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5635 if (!(method->klass && method->klass->image &&
5636 mono_security_core_clr_is_platform_image (method->klass->image))) {
5637 emit_throw_method_access_exception (cfg, method, wrapped, bblock, ip);
5643 if (header->code_size == 0)
5646 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5651 if (cfg->method == method)
5652 mono_debug_init_method (cfg, bblock, breakpoint_id);
5654 for (n = 0; n < header->num_locals; ++n) {
5655 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5660 /* We force the vtable variable here for all shared methods
5661 for the possibility that they might show up in a stack
5662 trace where their exact instantiation is needed. */
5663 if (cfg->generic_sharing_context && method == cfg->method) {
5664 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5665 mini_method_get_context (method)->method_inst ||
5666 method->klass->valuetype) {
5667 mono_get_vtable_var (cfg);
5669 /* FIXME: Is there a better way to do this?
5670 We need the variable live for the duration
5671 of the whole method. */
5672 cfg->args [0]->flags |= MONO_INST_INDIRECT;
5676 /* add a check for this != NULL to inlined methods */
5677 if (is_virtual_call) {
5680 NEW_ARGLOAD (cfg, arg_ins, 0);
5681 MONO_ADD_INS (cfg->cbb, arg_ins);
5682 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
5683 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, arg_ins->dreg);
5684 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, arg_ins->dreg);
5687 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5688 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5691 start_new_bblock = 0;
5695 if (cfg->method == method)
5696 cfg->real_offset = ip - header->code;
5698 cfg->real_offset = inline_offset;
5703 if (start_new_bblock) {
5704 bblock->cil_length = ip - bblock->cil_code;
5705 if (start_new_bblock == 2) {
5706 g_assert (ip == tblock->cil_code);
5708 GET_BBLOCK (cfg, tblock, ip);
5710 bblock->next_bb = tblock;
5713 start_new_bblock = 0;
5714 for (i = 0; i < bblock->in_scount; ++i) {
5715 if (cfg->verbose_level > 3)
5716 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5717 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5721 g_slist_free (class_inits);
5724 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5725 link_bblock (cfg, bblock, tblock);
5726 if (sp != stack_start) {
5727 handle_stack_args (cfg, stack_start, sp - stack_start);
5729 CHECK_UNVERIFIABLE (cfg);
5731 bblock->next_bb = tblock;
5734 for (i = 0; i < bblock->in_scount; ++i) {
5735 if (cfg->verbose_level > 3)
5736 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5737 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5740 g_slist_free (class_inits);
5745 bblock->real_offset = cfg->real_offset;
5747 if ((cfg->method == method) && cfg->coverage_info) {
5748 guint32 cil_offset = ip - header->code;
5749 cfg->coverage_info->data [cil_offset].cil_code = ip;
5751 /* TODO: Use an increment here */
5752 #if defined(TARGET_X86)
5753 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5754 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5756 MONO_ADD_INS (cfg->cbb, ins);
5758 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5759 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5763 if (cfg->verbose_level > 3)
5764 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5768 if (cfg->keep_cil_nops)
5769 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
5771 MONO_INST_NEW (cfg, ins, OP_NOP);
5773 MONO_ADD_INS (bblock, ins);
5776 MONO_INST_NEW (cfg, ins, OP_BREAK);
5778 MONO_ADD_INS (bblock, ins);
5784 CHECK_STACK_OVF (1);
5785 n = (*ip)-CEE_LDARG_0;
5787 EMIT_NEW_ARGLOAD (cfg, ins, n);
5795 CHECK_STACK_OVF (1);
5796 n = (*ip)-CEE_LDLOC_0;
5798 EMIT_NEW_LOCLOAD (cfg, ins, n);
5807 n = (*ip)-CEE_STLOC_0;
5810 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5812 emit_stloc_ir (cfg, sp, header, n);
5819 CHECK_STACK_OVF (1);
5822 EMIT_NEW_ARGLOAD (cfg, ins, n);
5828 CHECK_STACK_OVF (1);
5831 NEW_ARGLOADA (cfg, ins, n);
5832 MONO_ADD_INS (cfg->cbb, ins);
5842 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5844 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5849 CHECK_STACK_OVF (1);
5852 EMIT_NEW_LOCLOAD (cfg, ins, n);
5856 case CEE_LDLOCA_S: {
5857 unsigned char *tmp_ip;
5859 CHECK_STACK_OVF (1);
5860 CHECK_LOCAL (ip [1]);
5862 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
5868 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5877 CHECK_LOCAL (ip [1]);
5878 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5880 emit_stloc_ir (cfg, sp, header, ip [1]);
5885 CHECK_STACK_OVF (1);
5886 EMIT_NEW_PCONST (cfg, ins, NULL);
5887 ins->type = STACK_OBJ;
5892 CHECK_STACK_OVF (1);
5893 EMIT_NEW_ICONST (cfg, ins, -1);
5906 CHECK_STACK_OVF (1);
5907 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
5913 CHECK_STACK_OVF (1);
5915 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
5921 CHECK_STACK_OVF (1);
5922 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
5928 CHECK_STACK_OVF (1);
5929 MONO_INST_NEW (cfg, ins, OP_I8CONST);
5930 ins->type = STACK_I8;
5931 ins->dreg = alloc_dreg (cfg, STACK_I8);
5933 ins->inst_l = (gint64)read64 (ip);
5934 MONO_ADD_INS (bblock, ins);
5940 gboolean use_aotconst = FALSE;
5942 #ifdef TARGET_POWERPC
5943 /* FIXME: Clean this up */
5944 if (cfg->compile_aot)
5945 use_aotconst = TRUE;
5948 /* FIXME: we should really allocate this only late in the compilation process */
5949 f = mono_domain_alloc (cfg->domain, sizeof (float));
5951 CHECK_STACK_OVF (1);
5957 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
5959 dreg = alloc_freg (cfg);
5960 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
5961 ins->type = STACK_R8;
5963 MONO_INST_NEW (cfg, ins, OP_R4CONST);
5964 ins->type = STACK_R8;
5965 ins->dreg = alloc_dreg (cfg, STACK_R8);
5967 MONO_ADD_INS (bblock, ins);
5977 gboolean use_aotconst = FALSE;
5979 #ifdef TARGET_POWERPC
5980 /* FIXME: Clean this up */
5981 if (cfg->compile_aot)
5982 use_aotconst = TRUE;
5985 /* FIXME: we should really allocate this only late in the compilation process */
5986 d = mono_domain_alloc (cfg->domain, sizeof (double));
5988 CHECK_STACK_OVF (1);
5994 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
5996 dreg = alloc_freg (cfg);
5997 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
5998 ins->type = STACK_R8;
6000 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6001 ins->type = STACK_R8;
6002 ins->dreg = alloc_dreg (cfg, STACK_R8);
6004 MONO_ADD_INS (bblock, ins);
6013 MonoInst *temp, *store;
6015 CHECK_STACK_OVF (1);
6019 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
6020 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
6022 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6025 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6038 if (sp [0]->type == STACK_R8)
6039 /* we need to pop the value from the x86 FP stack */
6040 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
6049 if (stack_start != sp)
6051 token = read32 (ip + 1);
6052 /* FIXME: check the signature matches */
6053 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6058 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
6059 GENERIC_SHARING_FAILURE (CEE_JMP);
6061 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
6062 CHECK_CFG_EXCEPTION;
6064 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6066 MonoMethodSignature *fsig = mono_method_signature (cmethod);
6069 /* Handle tail calls similarly to calls */
6070 n = fsig->param_count + fsig->hasthis;
6072 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
6073 call->method = cmethod;
6074 call->tail_call = TRUE;
6075 call->signature = mono_method_signature (cmethod);
6076 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
6077 call->inst.inst_p0 = cmethod;
6078 for (i = 0; i < n; ++i)
6079 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
6081 mono_arch_emit_call (cfg, call);
6082 MONO_ADD_INS (bblock, (MonoInst*)call);
6085 for (i = 0; i < num_args; ++i)
6086 /* Prevent arguments from being optimized away */
6087 arg_array [i]->flags |= MONO_INST_VOLATILE;
6089 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6090 ins = (MonoInst*)call;
6091 ins->inst_p0 = cmethod;
6092 MONO_ADD_INS (bblock, ins);
6096 start_new_bblock = 1;
6101 case CEE_CALLVIRT: {
6102 MonoInst *addr = NULL;
6103 MonoMethodSignature *fsig = NULL;
6105 int virtual = *ip == CEE_CALLVIRT;
6106 int calli = *ip == CEE_CALLI;
6107 gboolean pass_imt_from_rgctx = FALSE;
6108 MonoInst *imt_arg = NULL;
6109 gboolean pass_vtable = FALSE;
6110 gboolean pass_mrgctx = FALSE;
6111 MonoInst *vtable_arg = NULL;
6112 gboolean check_this = FALSE;
6113 gboolean supported_tail_call = FALSE;
6116 token = read32 (ip + 1);
6123 if (method->wrapper_type != MONO_WRAPPER_NONE)
6124 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6126 fsig = mono_metadata_parse_signature (image, token);
6128 n = fsig->param_count + fsig->hasthis;
6130 MonoMethod *cil_method;
6132 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6133 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6134 cil_method = cmethod;
6135 } else if (constrained_call) {
6136 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6138 * This is needed since get_method_constrained can't find
6139 * the method in klass representing a type var.
6140 * The type var is guaranteed to be a reference type in this
6143 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6144 cil_method = cmethod;
6145 g_assert (!cmethod->klass->valuetype);
6147 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6150 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6151 cil_method = cmethod;
6156 if (!dont_verify && !cfg->skip_visibility) {
6157 MonoMethod *target_method = cil_method;
6158 if (method->is_inflated) {
6159 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6161 if (!mono_method_can_access_method (method_definition, target_method) &&
6162 !mono_method_can_access_method (method, cil_method))
6163 METHOD_ACCESS_FAILURE;
6166 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6167 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6169 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6170 /* MS.NET seems to silently convert this to a callvirt */
6173 if (!cmethod->klass->inited)
6174 if (!mono_class_init (cmethod->klass))
6177 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6178 mini_class_is_system_array (cmethod->klass)) {
6179 array_rank = cmethod->klass->rank;
6180 fsig = mono_method_signature (cmethod);
6182 if (mono_method_signature (cmethod)->pinvoke) {
6183 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6184 check_for_pending_exc, FALSE);
6185 fsig = mono_method_signature (wrapper);
6186 } else if (constrained_call) {
6187 fsig = mono_method_signature (cmethod);
6189 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6193 mono_save_token_info (cfg, image, token, cil_method);
6195 n = fsig->param_count + fsig->hasthis;
6197 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6198 if (check_linkdemand (cfg, method, cmethod))
6200 CHECK_CFG_EXCEPTION;
6203 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6204 g_assert_not_reached ();
6207 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6210 if (!cfg->generic_sharing_context && cmethod)
6211 g_assert (!mono_method_check_context_used (cmethod));
6215 //g_assert (!virtual || fsig->hasthis);
6219 if (constrained_call) {
6221 * We have the `constrained.' prefix opcode.
6223 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6227 * The type parameter is instantiated as a valuetype,
6228 * but that type doesn't override the method we're
6229 * calling, so we need to box `this'.
6231 dreg = alloc_dreg (cfg, STACK_VTYPE);
6232 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADV_MEMBASE, dreg, sp [0]->dreg, 0);
6233 ins->klass = constrained_call;
6234 sp [0] = handle_box (cfg, ins, constrained_call);
6235 } else if (!constrained_call->valuetype) {
6236 int dreg = alloc_preg (cfg);
6239 * The type parameter is instantiated as a reference
6240 * type. We have a managed pointer on the stack, so
6241 * we need to dereference it here.
6243 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6244 ins->type = STACK_OBJ;
6246 } else if (cmethod->klass->valuetype)
6248 constrained_call = NULL;
6251 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6255 * If the callee is a shared method, then its static cctor
6256 * might not get called after the call was patched.
6258 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6259 emit_generic_class_init (cfg, cmethod->klass);
6262 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6263 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6264 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6265 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6266 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6269 * Pass vtable iff target method might
6270 * be shared, which means that sharing
6271 * is enabled for its class and its
6272 * context is sharable (and it's not a
6275 if (sharing_enabled && context_sharable &&
6276 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6280 if (cmethod && mini_method_get_context (cmethod) &&
6281 mini_method_get_context (cmethod)->method_inst) {
6282 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6283 MonoGenericContext *context = mini_method_get_context (cmethod);
6284 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6286 g_assert (!pass_vtable);
6288 if (sharing_enabled && context_sharable)
6292 if (cfg->generic_sharing_context && cmethod) {
6293 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6295 context_used = mono_method_check_context_used (cmethod);
6297 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6298 /* Generic method interface
6299 calls are resolved via a
6300 helper function and don't
6302 if (!cmethod_context || !cmethod_context->method_inst)
6303 pass_imt_from_rgctx = TRUE;
6307 * If a shared method calls another
6308 * shared method then the caller must
6309 * have a generic sharing context
6310 * because the magic trampoline
6311 * requires it. FIXME: We shouldn't
6312 * have to force the vtable/mrgctx
6313 * variable here. Instead there
6314 * should be a flag in the cfg to
6315 * request a generic sharing context.
6318 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6319 mono_get_vtable_var (cfg);
6324 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6326 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6328 CHECK_TYPELOAD (cmethod->klass);
6329 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6334 g_assert (!vtable_arg);
6337 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6339 EMIT_NEW_METHOD_RGCTX_CONST (cfg, vtable_arg, cmethod);
6342 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6343 MONO_METHOD_IS_FINAL (cmethod)) {
6350 if (pass_imt_from_rgctx) {
6351 g_assert (!pass_vtable);
6354 imt_arg = emit_get_rgctx_method (cfg, context_used,
6355 cmethod, MONO_RGCTX_INFO_METHOD);
6361 MONO_INST_NEW (cfg, check, OP_CHECK_THIS);
6362 check->sreg1 = sp [0]->dreg;
6363 MONO_ADD_INS (cfg->cbb, check);
6366 /* Calling virtual generic methods */
6367 if (cmethod && virtual &&
6368 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6369 !(MONO_METHOD_IS_FINAL (cmethod) &&
6370 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6371 mono_method_signature (cmethod)->generic_param_count) {
6372 MonoInst *this_temp, *this_arg_temp, *store;
6373 MonoInst *iargs [4];
6375 g_assert (mono_method_signature (cmethod)->is_inflated);
6377 /* Prevent inlining of methods that contain indirect calls */
6380 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && !defined(ENABLE_LLVM)
6381 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt) {
6382 g_assert (!imt_arg);
6384 imt_arg = emit_get_rgctx_method (cfg, context_used,
6385 cmethod, MONO_RGCTX_INFO_METHOD);
6388 g_assert (cmethod->is_inflated);
6389 EMIT_NEW_METHODCONST (cfg, imt_arg, cmethod);
6391 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6395 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6396 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6397 MONO_ADD_INS (bblock, store);
6399 /* FIXME: This should be a managed pointer */
6400 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6402 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6404 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6405 cmethod, MONO_RGCTX_INFO_METHOD);
6406 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6407 addr = mono_emit_jit_icall (cfg,
6408 mono_helper_compile_generic_method, iargs);
6410 EMIT_NEW_METHODCONST (cfg, iargs [1], cmethod);
6411 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6412 addr = mono_emit_jit_icall (cfg, mono_helper_compile_generic_method, iargs);
6415 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6417 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6420 if (!MONO_TYPE_IS_VOID (fsig->ret))
6421 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6428 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6429 supported_tail_call = cmethod && MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
6431 supported_tail_call = cmethod && mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6435 /* FIXME: runtime generic context pointer for jumps? */
6436 /* FIXME: handle this for generic sharing eventually */
6437 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) && supported_tail_call) {
6440 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6443 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6444 /* Handle tail calls similarly to calls */
6445 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE);
6447 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6448 call->tail_call = TRUE;
6449 call->method = cmethod;
6450 call->signature = mono_method_signature (cmethod);
6453 * We implement tail calls by storing the actual arguments into the
6454 * argument variables, then emitting a CEE_JMP.
6456 for (i = 0; i < n; ++i) {
6457 /* Prevent argument from being register allocated */
6458 arg_array [i]->flags |= MONO_INST_VOLATILE;
6459 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6463 ins = (MonoInst*)call;
6464 ins->inst_p0 = cmethod;
6465 ins->inst_p1 = arg_array [0];
6466 MONO_ADD_INS (bblock, ins);
6467 link_bblock (cfg, bblock, end_bblock);
6468 start_new_bblock = 1;
6469 /* skip CEE_RET as well */
6475 /* Conversion to a JIT intrinsic */
6476 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6477 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6478 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6489 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6490 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6491 mono_method_check_inlining (cfg, cmethod) &&
6492 !g_list_find (dont_inline, cmethod)) {
6494 gboolean allways = FALSE;
6496 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6497 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6498 /* Prevent inlining of methods that call wrappers */
6500 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6504 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6506 cfg->real_offset += 5;
6509 if (!MONO_TYPE_IS_VOID (fsig->ret))
6510 /* *sp is already set by inline_method */
6513 inline_costs += costs;
6519 inline_costs += 10 * num_calls++;
6521 /* Tail recursion elimination */
6522 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6523 gboolean has_vtargs = FALSE;
6526 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6529 /* keep it simple */
6530 for (i = fsig->param_count - 1; i >= 0; i--) {
6531 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6536 for (i = 0; i < n; ++i)
6537 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6538 MONO_INST_NEW (cfg, ins, OP_BR);
6539 MONO_ADD_INS (bblock, ins);
6540 tblock = start_bblock->out_bb [0];
6541 link_bblock (cfg, bblock, tblock);
6542 ins->inst_target_bb = tblock;
6543 start_new_bblock = 1;
6545 /* skip the CEE_RET, too */
6546 if (ip_in_bb (cfg, bblock, ip + 5))
6556 /* Generic sharing */
6557 /* FIXME: only do this for generic methods if
6558 they are not shared! */
6559 if (context_used && !imt_arg && !array_rank &&
6560 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6561 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6562 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6563 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6566 g_assert (cfg->generic_sharing_context && cmethod);
6570 * We are compiling a call to a
6571 * generic method from shared code,
6572 * which means that we have to look up
6573 * the method in the rgctx and do an
6576 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6579 /* Indirect calls */
6581 g_assert (!imt_arg);
6583 if (*ip == CEE_CALL)
6584 g_assert (context_used);
6585 else if (*ip == CEE_CALLI)
6586 g_assert (!vtable_arg);
6588 /* FIXME: what the hell is this??? */
6589 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6590 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6592 /* Prevent inlining of methods with indirect calls */
6596 #ifdef MONO_ARCH_RGCTX_REG
6598 int rgctx_reg = mono_alloc_preg (cfg);
6600 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6601 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6602 call = (MonoCallInst*)ins;
6603 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
6604 cfg->uses_rgctx_reg = TRUE;
6605 call->rgctx_reg = TRUE;
6610 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6612 * Instead of emitting an indirect call, emit a direct call
6613 * with the contents of the aotconst as the patch info.
6615 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
6617 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6618 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
6621 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6624 if (!MONO_TYPE_IS_VOID (fsig->ret))
6625 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6636 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6637 if (sp [fsig->param_count]->type == STACK_OBJ) {
6638 MonoInst *iargs [2];
6641 iargs [1] = sp [fsig->param_count];
6643 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6646 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6647 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6648 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6649 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6651 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6654 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6655 if (!cmethod->klass->element_class->valuetype && !readonly)
6656 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6659 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6662 g_assert_not_reached ();
6670 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6672 if (!MONO_TYPE_IS_VOID (fsig->ret))
6673 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6683 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6685 } else if (imt_arg) {
6686 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6688 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6691 if (!MONO_TYPE_IS_VOID (fsig->ret))
6692 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6699 if (cfg->method != method) {
6700 /* return from inlined method */
6702 * If in_count == 0, that means the ret is unreachable due to
6703 * being preceeded by a throw. In that case, inline_method () will
6704 * handle setting the return value
6705 * (test case: test_0_inline_throw ()).
6707 if (return_var && cfg->cbb->in_count) {
6711 //g_assert (returnvar != -1);
6712 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6713 cfg->ret_var_set = TRUE;
6717 MonoType *ret_type = mono_method_signature (method)->ret;
6719 g_assert (!return_var);
6722 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6725 if (!cfg->vret_addr) {
6728 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6730 EMIT_NEW_RETLOADA (cfg, ret_addr);
6732 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6733 ins->klass = mono_class_from_mono_type (ret_type);
6736 #ifdef MONO_ARCH_SOFT_FLOAT
6737 if (!ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6738 MonoInst *iargs [1];
6742 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6743 mono_arch_emit_setret (cfg, method, conv);
6745 mono_arch_emit_setret (cfg, method, *sp);
6748 mono_arch_emit_setret (cfg, method, *sp);
6753 if (sp != stack_start)
6755 MONO_INST_NEW (cfg, ins, OP_BR);
6757 ins->inst_target_bb = end_bblock;
6758 MONO_ADD_INS (bblock, ins);
6759 link_bblock (cfg, bblock, end_bblock);
6760 start_new_bblock = 1;
6764 MONO_INST_NEW (cfg, ins, OP_BR);
6766 target = ip + 1 + (signed char)(*ip);
6768 GET_BBLOCK (cfg, tblock, target);
6769 link_bblock (cfg, bblock, tblock);
6770 ins->inst_target_bb = tblock;
6771 if (sp != stack_start) {
6772 handle_stack_args (cfg, stack_start, sp - stack_start);
6774 CHECK_UNVERIFIABLE (cfg);
6776 MONO_ADD_INS (bblock, ins);
6777 start_new_bblock = 1;
6778 inline_costs += BRANCH_COST;
6792 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6794 target = ip + 1 + *(signed char*)ip;
6800 inline_costs += BRANCH_COST;
6804 MONO_INST_NEW (cfg, ins, OP_BR);
6807 target = ip + 4 + (gint32)read32(ip);
6809 GET_BBLOCK (cfg, tblock, target);
6810 link_bblock (cfg, bblock, tblock);
6811 ins->inst_target_bb = tblock;
6812 if (sp != stack_start) {
6813 handle_stack_args (cfg, stack_start, sp - stack_start);
6815 CHECK_UNVERIFIABLE (cfg);
6818 MONO_ADD_INS (bblock, ins);
6820 start_new_bblock = 1;
6821 inline_costs += BRANCH_COST;
6828 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6829 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6830 guint32 opsize = is_short ? 1 : 4;
6832 CHECK_OPSIZE (opsize);
6834 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6837 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6842 GET_BBLOCK (cfg, tblock, target);
6843 link_bblock (cfg, bblock, tblock);
6844 GET_BBLOCK (cfg, tblock, ip);
6845 link_bblock (cfg, bblock, tblock);
6847 if (sp != stack_start) {
6848 handle_stack_args (cfg, stack_start, sp - stack_start);
6849 CHECK_UNVERIFIABLE (cfg);
6852 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6853 cmp->sreg1 = sp [0]->dreg;
6854 type_from_op (cmp, sp [0], NULL);
6857 #if SIZEOF_REGISTER == 4
6858 if (cmp->opcode == OP_LCOMPARE_IMM) {
6859 /* Convert it to OP_LCOMPARE */
6860 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6861 ins->type = STACK_I8;
6862 ins->dreg = alloc_dreg (cfg, STACK_I8);
6864 MONO_ADD_INS (bblock, ins);
6865 cmp->opcode = OP_LCOMPARE;
6866 cmp->sreg2 = ins->dreg;
6869 MONO_ADD_INS (bblock, cmp);
6871 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
6872 type_from_op (ins, sp [0], NULL);
6873 MONO_ADD_INS (bblock, ins);
6874 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
6875 GET_BBLOCK (cfg, tblock, target);
6876 ins->inst_true_bb = tblock;
6877 GET_BBLOCK (cfg, tblock, ip);
6878 ins->inst_false_bb = tblock;
6879 start_new_bblock = 2;
6882 inline_costs += BRANCH_COST;
6897 MONO_INST_NEW (cfg, ins, *ip);
6899 target = ip + 4 + (gint32)read32(ip);
6905 inline_costs += BRANCH_COST;
6909 MonoBasicBlock **targets;
6910 MonoBasicBlock *default_bblock;
6911 MonoJumpInfoBBTable *table;
6912 int offset_reg = alloc_preg (cfg);
6913 int target_reg = alloc_preg (cfg);
6914 int table_reg = alloc_preg (cfg);
6915 int sum_reg = alloc_preg (cfg);
6916 gboolean use_op_switch;
6920 n = read32 (ip + 1);
6923 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
6927 CHECK_OPSIZE (n * sizeof (guint32));
6928 target = ip + n * sizeof (guint32);
6930 GET_BBLOCK (cfg, default_bblock, target);
6932 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
6933 for (i = 0; i < n; ++i) {
6934 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
6935 targets [i] = tblock;
6939 if (sp != stack_start) {
6941 * Link the current bb with the targets as well, so handle_stack_args
6942 * will set their in_stack correctly.
6944 link_bblock (cfg, bblock, default_bblock);
6945 for (i = 0; i < n; ++i)
6946 link_bblock (cfg, bblock, targets [i]);
6948 handle_stack_args (cfg, stack_start, sp - stack_start);
6950 CHECK_UNVERIFIABLE (cfg);
6953 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
6954 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
6957 for (i = 0; i < n; ++i)
6958 link_bblock (cfg, bblock, targets [i]);
6960 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
6961 table->table = targets;
6962 table->table_size = n;
6964 use_op_switch = FALSE;
6966 /* ARM implements SWITCH statements differently */
6967 /* FIXME: Make it use the generic implementation */
6968 if (!cfg->compile_aot)
6969 use_op_switch = TRUE;
6972 if (COMPILE_LLVM (cfg))
6973 use_op_switch = TRUE;
6975 cfg->cbb->has_jump_table = 1;
6977 if (use_op_switch) {
6978 MONO_INST_NEW (cfg, ins, OP_SWITCH);
6979 ins->sreg1 = src1->dreg;
6980 ins->inst_p0 = table;
6981 ins->inst_many_bb = targets;
6982 ins->klass = GUINT_TO_POINTER (n);
6983 MONO_ADD_INS (cfg->cbb, ins);
6985 if (sizeof (gpointer) == 8)
6986 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
6988 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
6990 #if SIZEOF_REGISTER == 8
6991 /* The upper word might not be zero, and we add it to a 64 bit address later */
6992 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
6995 if (cfg->compile_aot) {
6996 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
6998 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
6999 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
7000 ins->inst_p0 = table;
7001 ins->dreg = table_reg;
7002 MONO_ADD_INS (cfg->cbb, ins);
7005 /* FIXME: Use load_memindex */
7006 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
7007 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
7008 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
7010 start_new_bblock = 1;
7011 inline_costs += (BRANCH_COST * 2);
7031 dreg = alloc_freg (cfg);
7034 dreg = alloc_lreg (cfg);
7037 dreg = alloc_preg (cfg);
7040 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
7041 ins->type = ldind_type [*ip - CEE_LDIND_I1];
7042 ins->flags |= ins_flag;
7044 MONO_ADD_INS (bblock, ins);
7059 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
7060 ins->flags |= ins_flag;
7062 MONO_ADD_INS (bblock, ins);
7064 #if HAVE_WRITE_BARRIERS
7065 if (*ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0))) {
7066 /* insert call to write barrier */
7067 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
7068 mono_emit_method_call (cfg, write_barrier, sp, NULL);
7079 MONO_INST_NEW (cfg, ins, (*ip));
7081 ins->sreg1 = sp [0]->dreg;
7082 ins->sreg2 = sp [1]->dreg;
7083 type_from_op (ins, sp [0], sp [1]);
7085 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7087 /* Use the immediate opcodes if possible */
7088 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
7089 int imm_opcode = mono_op_to_op_imm (ins->opcode);
7090 if (imm_opcode != -1) {
7091 ins->opcode = imm_opcode;
7092 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7095 sp [1]->opcode = OP_NOP;
7099 MONO_ADD_INS ((cfg)->cbb, (ins));
7101 *sp++ = mono_decompose_opcode (cfg, ins);
7118 MONO_INST_NEW (cfg, ins, (*ip));
7120 ins->sreg1 = sp [0]->dreg;
7121 ins->sreg2 = sp [1]->dreg;
7122 type_from_op (ins, sp [0], sp [1]);
7124 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7125 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7127 /* FIXME: Pass opcode to is_inst_imm */
7129 /* Use the immediate opcodes if possible */
7130 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7133 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7134 if (imm_opcode != -1) {
7135 ins->opcode = imm_opcode;
7136 if (sp [1]->opcode == OP_I8CONST) {
7137 #if SIZEOF_REGISTER == 8
7138 ins->inst_imm = sp [1]->inst_l;
7140 ins->inst_ls_word = sp [1]->inst_ls_word;
7141 ins->inst_ms_word = sp [1]->inst_ms_word;
7145 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7148 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7149 if (sp [1]->next == NULL)
7150 sp [1]->opcode = OP_NOP;
7153 MONO_ADD_INS ((cfg)->cbb, (ins));
7155 *sp++ = mono_decompose_opcode (cfg, ins);
7168 case CEE_CONV_OVF_I8:
7169 case CEE_CONV_OVF_U8:
7173 /* Special case this earlier so we have long constants in the IR */
7174 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7175 int data = sp [-1]->inst_c0;
7176 sp [-1]->opcode = OP_I8CONST;
7177 sp [-1]->type = STACK_I8;
7178 #if SIZEOF_REGISTER == 8
7179 if ((*ip) == CEE_CONV_U8)
7180 sp [-1]->inst_c0 = (guint32)data;
7182 sp [-1]->inst_c0 = data;
7184 sp [-1]->inst_ls_word = data;
7185 if ((*ip) == CEE_CONV_U8)
7186 sp [-1]->inst_ms_word = 0;
7188 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7190 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7197 case CEE_CONV_OVF_I4:
7198 case CEE_CONV_OVF_I1:
7199 case CEE_CONV_OVF_I2:
7200 case CEE_CONV_OVF_I:
7201 case CEE_CONV_OVF_U:
7204 if (sp [-1]->type == STACK_R8) {
7205 ADD_UNOP (CEE_CONV_OVF_I8);
7212 case CEE_CONV_OVF_U1:
7213 case CEE_CONV_OVF_U2:
7214 case CEE_CONV_OVF_U4:
7217 if (sp [-1]->type == STACK_R8) {
7218 ADD_UNOP (CEE_CONV_OVF_U8);
7225 case CEE_CONV_OVF_I1_UN:
7226 case CEE_CONV_OVF_I2_UN:
7227 case CEE_CONV_OVF_I4_UN:
7228 case CEE_CONV_OVF_I8_UN:
7229 case CEE_CONV_OVF_U1_UN:
7230 case CEE_CONV_OVF_U2_UN:
7231 case CEE_CONV_OVF_U4_UN:
7232 case CEE_CONV_OVF_U8_UN:
7233 case CEE_CONV_OVF_I_UN:
7234 case CEE_CONV_OVF_U_UN:
7244 case CEE_ADD_OVF_UN:
7246 case CEE_MUL_OVF_UN:
7248 case CEE_SUB_OVF_UN:
7256 token = read32 (ip + 1);
7257 klass = mini_get_class (method, token, generic_context);
7258 CHECK_TYPELOAD (klass);
7260 if (generic_class_is_reference_type (cfg, klass)) {
7261 MonoInst *store, *load;
7262 int dreg = alloc_preg (cfg);
7264 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7265 load->flags |= ins_flag;
7266 MONO_ADD_INS (cfg->cbb, load);
7268 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7269 store->flags |= ins_flag;
7270 MONO_ADD_INS (cfg->cbb, store);
7272 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7284 token = read32 (ip + 1);
7285 klass = mini_get_class (method, token, generic_context);
7286 CHECK_TYPELOAD (klass);
7288 /* Optimize the common ldobj+stloc combination */
7298 loc_index = ip [5] - CEE_STLOC_0;
7305 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7306 CHECK_LOCAL (loc_index);
7308 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7309 ins->dreg = cfg->locals [loc_index]->dreg;
7315 /* Optimize the ldobj+stobj combination */
7316 /* The reference case ends up being a load+store anyway */
7317 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7322 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7329 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7338 CHECK_STACK_OVF (1);
7340 n = read32 (ip + 1);
7342 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7343 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7344 ins->type = STACK_OBJ;
7347 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7348 MonoInst *iargs [1];
7350 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7351 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7353 if (cfg->opt & MONO_OPT_SHARED) {
7354 MonoInst *iargs [3];
7356 if (cfg->compile_aot) {
7357 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7359 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7360 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7361 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7362 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7363 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7365 if (bblock->out_of_line) {
7366 MonoInst *iargs [2];
7368 if (image == mono_defaults.corlib) {
7370 * Avoid relocations in AOT and save some space by using a
7371 * version of helper_ldstr specialized to mscorlib.
7373 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7374 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7376 /* Avoid creating the string object */
7377 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7378 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7379 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7383 if (cfg->compile_aot) {
7384 NEW_LDSTRCONST (cfg, ins, image, n);
7386 MONO_ADD_INS (bblock, ins);
7389 NEW_PCONST (cfg, ins, NULL);
7390 ins->type = STACK_OBJ;
7391 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7393 MONO_ADD_INS (bblock, ins);
7402 MonoInst *iargs [2];
7403 MonoMethodSignature *fsig;
7406 MonoInst *vtable_arg = NULL;
7409 token = read32 (ip + 1);
7410 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7413 fsig = mono_method_get_signature (cmethod, image, token);
7415 mono_save_token_info (cfg, image, token, cmethod);
7417 if (!mono_class_init (cmethod->klass))
7420 if (cfg->generic_sharing_context)
7421 context_used = mono_method_check_context_used (cmethod);
7423 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7424 if (check_linkdemand (cfg, method, cmethod))
7426 CHECK_CFG_EXCEPTION;
7427 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7428 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7431 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7432 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7433 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7435 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7436 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7438 EMIT_NEW_METHOD_RGCTX_CONST (cfg, vtable_arg, cmethod);
7442 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7443 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7445 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7447 CHECK_TYPELOAD (cmethod->klass);
7448 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7453 n = fsig->param_count;
7457 * Generate smaller code for the common newobj <exception> instruction in
7458 * argument checking code.
7460 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7461 is_exception_class (cmethod->klass) && n <= 2 &&
7462 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7463 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7464 MonoInst *iargs [3];
7466 g_assert (!vtable_arg);
7470 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7473 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7477 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7482 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7485 g_assert_not_reached ();
7493 /* move the args to allow room for 'this' in the first position */
7499 /* check_call_signature () requires sp[0] to be set */
7500 this_ins.type = STACK_OBJ;
7502 if (check_call_signature (cfg, fsig, sp))
7507 if (mini_class_is_system_array (cmethod->klass)) {
7508 g_assert (!vtable_arg);
7511 *sp = emit_get_rgctx_method (cfg, context_used,
7512 cmethod, MONO_RGCTX_INFO_METHOD);
7514 EMIT_NEW_METHODCONST (cfg, *sp, cmethod);
7517 /* Avoid varargs in the common case */
7518 if (fsig->param_count == 1)
7519 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7520 else if (fsig->param_count == 2)
7521 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7523 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7524 } else if (cmethod->string_ctor) {
7525 g_assert (!context_used);
7526 g_assert (!vtable_arg);
7527 /* we simply pass a null pointer */
7528 EMIT_NEW_PCONST (cfg, *sp, NULL);
7529 /* now call the string ctor */
7530 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7532 MonoInst* callvirt_this_arg = NULL;
7534 if (cmethod->klass->valuetype) {
7535 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7536 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7537 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7542 * The code generated by mini_emit_virtual_call () expects
7543 * iargs [0] to be a boxed instance, but luckily the vcall
7544 * will be transformed into a normal call there.
7546 } else if (context_used) {
7550 if (cfg->opt & MONO_OPT_SHARED)
7551 rgctx_info = MONO_RGCTX_INFO_KLASS;
7553 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7554 data = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, rgctx_info);
7556 alloc = handle_alloc_from_inst (cfg, cmethod->klass, data, FALSE);
7559 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7561 CHECK_TYPELOAD (cmethod->klass);
7564 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7565 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7566 * As a workaround, we call class cctors before allocating objects.
7568 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7569 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7570 if (cfg->verbose_level > 2)
7571 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7572 class_inits = g_slist_prepend (class_inits, vtable);
7575 alloc = handle_alloc (cfg, cmethod->klass, FALSE);
7580 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7582 /* Now call the actual ctor */
7583 /* Avoid virtual calls to ctors if possible */
7584 if (cmethod->klass->marshalbyref)
7585 callvirt_this_arg = sp [0];
7587 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7588 mono_method_check_inlining (cfg, cmethod) &&
7589 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7590 !g_list_find (dont_inline, cmethod)) {
7593 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7594 cfg->real_offset += 5;
7597 inline_costs += costs - 5;
7600 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7602 } else if (context_used &&
7603 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7604 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7605 MonoInst *cmethod_addr;
7607 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7608 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7610 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7613 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7614 callvirt_this_arg, NULL, vtable_arg);
7618 if (alloc == NULL) {
7620 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7621 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7635 token = read32 (ip + 1);
7636 klass = mini_get_class (method, token, generic_context);
7637 CHECK_TYPELOAD (klass);
7638 if (sp [0]->type != STACK_OBJ)
7641 if (cfg->generic_sharing_context)
7642 context_used = mono_class_check_context_used (klass);
7651 args [1] = emit_get_rgctx_klass (cfg, context_used,
7652 klass, MONO_RGCTX_INFO_KLASS);
7654 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7658 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7659 MonoMethod *mono_castclass;
7660 MonoInst *iargs [1];
7663 mono_castclass = mono_marshal_get_castclass (klass);
7666 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7667 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7668 g_assert (costs > 0);
7671 cfg->real_offset += 5;
7676 inline_costs += costs;
7679 ins = handle_castclass (cfg, klass, *sp);
7689 token = read32 (ip + 1);
7690 klass = mini_get_class (method, token, generic_context);
7691 CHECK_TYPELOAD (klass);
7692 if (sp [0]->type != STACK_OBJ)
7695 if (cfg->generic_sharing_context)
7696 context_used = mono_class_check_context_used (klass);
7705 args [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7707 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7711 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7712 MonoMethod *mono_isinst;
7713 MonoInst *iargs [1];
7716 mono_isinst = mono_marshal_get_isinst (klass);
7719 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7720 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7721 g_assert (costs > 0);
7724 cfg->real_offset += 5;
7729 inline_costs += costs;
7732 ins = handle_isinst (cfg, klass, *sp);
7739 case CEE_UNBOX_ANY: {
7743 token = read32 (ip + 1);
7744 klass = mini_get_class (method, token, generic_context);
7745 CHECK_TYPELOAD (klass);
7747 mono_save_token_info (cfg, image, token, klass);
7749 if (cfg->generic_sharing_context)
7750 context_used = mono_class_check_context_used (klass);
7752 if (generic_class_is_reference_type (cfg, klass)) {
7755 MonoInst *iargs [2];
7760 iargs [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7761 ins = mono_emit_jit_icall (cfg, mono_object_castclass, iargs);
7765 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7766 MonoMethod *mono_castclass;
7767 MonoInst *iargs [1];
7770 mono_castclass = mono_marshal_get_castclass (klass);
7773 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7774 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7776 g_assert (costs > 0);
7779 cfg->real_offset += 5;
7783 inline_costs += costs;
7785 ins = handle_castclass (cfg, klass, *sp);
7793 if (mono_class_is_nullable (klass)) {
7794 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
7801 ins = handle_unbox (cfg, klass, sp, context_used);
7807 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7820 token = read32 (ip + 1);
7821 klass = mini_get_class (method, token, generic_context);
7822 CHECK_TYPELOAD (klass);
7824 mono_save_token_info (cfg, image, token, klass);
7826 if (cfg->generic_sharing_context)
7827 context_used = mono_class_check_context_used (klass);
7829 if (generic_class_is_reference_type (cfg, klass)) {
7835 if (klass == mono_defaults.void_class)
7837 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7839 /* frequent check in generic code: box (struct), brtrue */
7840 if (!mono_class_is_nullable (klass) &&
7841 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) && (ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S)) {
7842 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7844 MONO_INST_NEW (cfg, ins, OP_BR);
7845 if (*ip == CEE_BRTRUE_S) {
7848 target = ip + 1 + (signed char)(*ip);
7853 target = ip + 4 + (gint)(read32 (ip));
7856 GET_BBLOCK (cfg, tblock, target);
7857 link_bblock (cfg, bblock, tblock);
7858 ins->inst_target_bb = tblock;
7859 GET_BBLOCK (cfg, tblock, ip);
7861 * This leads to some inconsistency, since the two bblocks are
7862 * not really connected, but it is needed for handling stack
7863 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
7864 * FIXME: This should only be needed if sp != stack_start, but that
7865 * doesn't work for some reason (test failure in mcs/tests on x86).
7867 link_bblock (cfg, bblock, tblock);
7868 if (sp != stack_start) {
7869 handle_stack_args (cfg, stack_start, sp - stack_start);
7871 CHECK_UNVERIFIABLE (cfg);
7873 MONO_ADD_INS (bblock, ins);
7874 start_new_bblock = 1;
7882 if (cfg->opt & MONO_OPT_SHARED)
7883 rgctx_info = MONO_RGCTX_INFO_KLASS;
7885 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7886 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
7887 *sp++ = handle_box_from_inst (cfg, val, klass, context_used, data);
7889 *sp++ = handle_box (cfg, val, klass);
7900 token = read32 (ip + 1);
7901 klass = mini_get_class (method, token, generic_context);
7902 CHECK_TYPELOAD (klass);
7904 mono_save_token_info (cfg, image, token, klass);
7906 if (cfg->generic_sharing_context)
7907 context_used = mono_class_check_context_used (klass);
7909 if (mono_class_is_nullable (klass)) {
7912 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
7913 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
7917 ins = handle_unbox (cfg, klass, sp, context_used);
7927 MonoClassField *field;
7931 if (*ip == CEE_STFLD) {
7938 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
7940 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
7943 token = read32 (ip + 1);
7944 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7945 field = mono_method_get_wrapper_data (method, token);
7946 klass = field->parent;
7949 field = mono_field_from_token (image, token, &klass, generic_context);
7953 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7954 FIELD_ACCESS_FAILURE;
7955 mono_class_init (klass);
7957 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
7958 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
7959 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
7960 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
7963 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
7964 if (*ip == CEE_STFLD) {
7965 if (target_type_is_incompatible (cfg, field->type, sp [1]))
7967 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7968 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
7969 MonoInst *iargs [5];
7972 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7973 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7974 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
7978 if (cfg->opt & MONO_OPT_INLINE) {
7979 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
7980 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7981 g_assert (costs > 0);
7983 cfg->real_offset += 5;
7986 inline_costs += costs;
7988 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
7993 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
7995 #if HAVE_WRITE_BARRIERS
7996 if (mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
7997 /* insert call to write barrier */
7998 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
7999 MonoInst *iargs [2];
8002 dreg = alloc_preg (cfg);
8003 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8005 mono_emit_method_call (cfg, write_barrier, iargs, NULL);
8009 store->flags |= ins_flag;
8016 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8017 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
8018 MonoInst *iargs [4];
8021 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8022 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8023 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
8024 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8025 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
8026 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8028 g_assert (costs > 0);
8030 cfg->real_offset += 5;
8034 inline_costs += costs;
8036 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
8040 if (sp [0]->type == STACK_VTYPE) {
8043 /* Have to compute the address of the variable */
8045 var = get_vreg_to_inst (cfg, sp [0]->dreg);
8047 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
8049 g_assert (var->klass == klass);
8051 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
8055 if (*ip == CEE_LDFLDA) {
8056 dreg = alloc_preg (cfg);
8058 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8059 ins->klass = mono_class_from_mono_type (field->type);
8060 ins->type = STACK_MP;
8065 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
8066 load->flags |= ins_flag;
8077 MonoClassField *field;
8078 gpointer addr = NULL;
8079 gboolean is_special_static;
8082 token = read32 (ip + 1);
8084 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8085 field = mono_method_get_wrapper_data (method, token);
8086 klass = field->parent;
8089 field = mono_field_from_token (image, token, &klass, generic_context);
8092 mono_class_init (klass);
8093 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8094 FIELD_ACCESS_FAILURE;
8096 /* if the class is Critical then transparent code cannot access it's fields */
8097 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8098 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8101 * We can only support shared generic static
8102 * field access on architectures where the
8103 * trampoline code has been extended to handle
8104 * the generic class init.
8106 #ifndef MONO_ARCH_VTABLE_REG
8107 GENERIC_SHARING_FAILURE (*ip);
8110 if (cfg->generic_sharing_context)
8111 context_used = mono_class_check_context_used (klass);
8113 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
8115 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8116 * to be called here.
8118 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8119 mono_class_vtable (cfg->domain, klass);
8120 CHECK_TYPELOAD (klass);
8122 mono_domain_lock (cfg->domain);
8123 if (cfg->domain->special_static_fields)
8124 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8125 mono_domain_unlock (cfg->domain);
8127 is_special_static = mono_class_field_is_special_static (field);
8129 /* Generate IR to compute the field address */
8131 if ((cfg->opt & MONO_OPT_SHARED) ||
8132 (cfg->compile_aot && is_special_static) ||
8133 (context_used && is_special_static)) {
8134 MonoInst *iargs [2];
8136 g_assert (field->parent);
8137 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8139 iargs [1] = emit_get_rgctx_field (cfg, context_used,
8140 field, MONO_RGCTX_INFO_CLASS_FIELD);
8142 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8144 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8145 } else if (context_used) {
8146 MonoInst *static_data;
8149 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8150 method->klass->name_space, method->klass->name, method->name,
8151 depth, field->offset);
8154 if (mono_class_needs_cctor_run (klass, method)) {
8158 vtable = emit_get_rgctx_klass (cfg, context_used,
8159 klass, MONO_RGCTX_INFO_VTABLE);
8161 // FIXME: This doesn't work since it tries to pass the argument
8162 // in the normal way, instead of using MONO_ARCH_VTABLE_REG
8164 * The vtable pointer is always passed in a register regardless of
8165 * the calling convention, so assign it manually, and make a call
8166 * using a signature without parameters.
8168 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable);
8169 #ifdef MONO_ARCH_VTABLE_REG
8170 mono_call_inst_add_outarg_reg (cfg, call, vtable->dreg, MONO_ARCH_VTABLE_REG, FALSE);
8171 cfg->uses_vtable_reg = TRUE;
8178 * The pointer we're computing here is
8180 * super_info.static_data + field->offset
8182 static_data = emit_get_rgctx_klass (cfg, context_used,
8183 klass, MONO_RGCTX_INFO_STATIC_DATA);
8185 if (field->offset == 0) {
8188 int addr_reg = mono_alloc_preg (cfg);
8189 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8191 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8192 MonoInst *iargs [2];
8194 g_assert (field->parent);
8195 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8196 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8197 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8199 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8201 CHECK_TYPELOAD (klass);
8203 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8204 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8205 if (cfg->verbose_level > 2)
8206 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8207 class_inits = g_slist_prepend (class_inits, vtable);
8209 if (cfg->run_cctors) {
8211 /* This makes so that inline cannot trigger */
8212 /* .cctors: too many apps depend on them */
8213 /* running with a specific order... */
8214 if (! vtable->initialized)
8216 ex = mono_runtime_class_init_full (vtable, FALSE);
8218 set_exception_object (cfg, ex);
8219 goto exception_exit;
8223 addr = (char*)vtable->data + field->offset;
8225 if (cfg->compile_aot)
8226 EMIT_NEW_SFLDACONST (cfg, ins, field);
8228 EMIT_NEW_PCONST (cfg, ins, addr);
8231 * insert call to mono_threads_get_static_data (GPOINTER_TO_UINT (addr))
8232 * This could be later optimized to do just a couple of
8233 * memory dereferences with constant offsets.
8235 MonoInst *iargs [1];
8236 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8237 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8241 /* Generate IR to do the actual load/store operation */
8243 if (*ip == CEE_LDSFLDA) {
8244 ins->klass = mono_class_from_mono_type (field->type);
8245 ins->type = STACK_PTR;
8247 } else if (*ip == CEE_STSFLD) {
8252 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
8253 store->flags |= ins_flag;
8255 gboolean is_const = FALSE;
8256 MonoVTable *vtable = NULL;
8258 if (!context_used) {
8259 vtable = mono_class_vtable (cfg->domain, klass);
8260 CHECK_TYPELOAD (klass);
8262 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8263 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8264 gpointer addr = (char*)vtable->data + field->offset;
8265 int ro_type = field->type->type;
8266 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
8267 ro_type = mono_class_enum_basetype (field->type->data.klass)->type;
8269 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8272 case MONO_TYPE_BOOLEAN:
8274 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8278 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8281 case MONO_TYPE_CHAR:
8283 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8287 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8292 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8296 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8299 #ifndef HAVE_MOVING_COLLECTOR
8302 case MONO_TYPE_STRING:
8303 case MONO_TYPE_OBJECT:
8304 case MONO_TYPE_CLASS:
8305 case MONO_TYPE_SZARRAY:
8307 case MONO_TYPE_FNPTR:
8308 case MONO_TYPE_ARRAY:
8309 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8310 type_to_eval_stack_type ((cfg), field->type, *sp);
8316 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8321 case MONO_TYPE_VALUETYPE:
8331 CHECK_STACK_OVF (1);
8333 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8334 load->flags |= ins_flag;
8347 token = read32 (ip + 1);
8348 klass = mini_get_class (method, token, generic_context);
8349 CHECK_TYPELOAD (klass);
8350 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8351 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8362 const char *data_ptr;
8364 guint32 field_token;
8370 token = read32 (ip + 1);
8372 klass = mini_get_class (method, token, generic_context);
8373 CHECK_TYPELOAD (klass);
8375 if (cfg->generic_sharing_context)
8376 context_used = mono_class_check_context_used (klass);
8381 /* FIXME: Decompose later to help abcrem */
8384 args [0] = emit_get_rgctx_klass (cfg, context_used,
8385 mono_array_class_get (klass, 1), MONO_RGCTX_INFO_VTABLE);
8390 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8392 if (cfg->opt & MONO_OPT_SHARED) {
8393 /* Decompose now to avoid problems with references to the domainvar */
8394 MonoInst *iargs [3];
8396 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8397 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8400 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8402 /* Decompose later since it is needed by abcrem */
8403 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8404 ins->dreg = alloc_preg (cfg);
8405 ins->sreg1 = sp [0]->dreg;
8406 ins->inst_newa_class = klass;
8407 ins->type = STACK_OBJ;
8409 MONO_ADD_INS (cfg->cbb, ins);
8410 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8411 cfg->cbb->has_array_access = TRUE;
8413 /* Needed so mono_emit_load_get_addr () gets called */
8414 mono_get_got_var (cfg);
8424 * we inline/optimize the initialization sequence if possible.
8425 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8426 * for small sizes open code the memcpy
8427 * ensure the rva field is big enough
8429 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8430 MonoMethod *memcpy_method = get_memcpy_method ();
8431 MonoInst *iargs [3];
8432 int add_reg = alloc_preg (cfg);
8434 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8435 if (cfg->compile_aot) {
8436 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8438 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8440 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8441 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8450 if (sp [0]->type != STACK_OBJ)
8453 dreg = alloc_preg (cfg);
8454 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8455 ins->dreg = alloc_preg (cfg);
8456 ins->sreg1 = sp [0]->dreg;
8457 ins->type = STACK_I4;
8458 MONO_ADD_INS (cfg->cbb, ins);
8459 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8460 cfg->cbb->has_array_access = TRUE;
8468 if (sp [0]->type != STACK_OBJ)
8471 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8473 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8474 CHECK_TYPELOAD (klass);
8475 /* we need to make sure that this array is exactly the type it needs
8476 * to be for correctness. the wrappers are lax with their usage
8477 * so we need to ignore them here
8479 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly)
8480 mini_emit_check_array_type (cfg, sp [0], mono_array_class_get (klass, 1));
8483 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8487 case CEE_LDELEM_ANY:
8498 case CEE_LDELEM_REF: {
8504 if (*ip == CEE_LDELEM_ANY) {
8506 token = read32 (ip + 1);
8507 klass = mini_get_class (method, token, generic_context);
8508 CHECK_TYPELOAD (klass);
8509 mono_class_init (klass);
8512 klass = array_access_to_klass (*ip);
8514 if (sp [0]->type != STACK_OBJ)
8517 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8519 if (sp [1]->opcode == OP_ICONST) {
8520 int array_reg = sp [0]->dreg;
8521 int index_reg = sp [1]->dreg;
8522 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8524 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8525 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8527 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8528 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8531 if (*ip == CEE_LDELEM_ANY)
8544 case CEE_STELEM_REF:
8545 case CEE_STELEM_ANY: {
8551 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8553 if (*ip == CEE_STELEM_ANY) {
8555 token = read32 (ip + 1);
8556 klass = mini_get_class (method, token, generic_context);
8557 CHECK_TYPELOAD (klass);
8558 mono_class_init (klass);
8561 klass = array_access_to_klass (*ip);
8563 if (sp [0]->type != STACK_OBJ)
8566 /* storing a NULL doesn't need any of the complex checks in stelemref */
8567 if (generic_class_is_reference_type (cfg, klass) &&
8568 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8569 MonoMethod* helper = mono_marshal_get_stelemref ();
8570 MonoInst *iargs [3];
8572 if (sp [0]->type != STACK_OBJ)
8574 if (sp [2]->type != STACK_OBJ)
8581 mono_emit_method_call (cfg, helper, iargs, NULL);
8583 if (sp [1]->opcode == OP_ICONST) {
8584 int array_reg = sp [0]->dreg;
8585 int index_reg = sp [1]->dreg;
8586 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8588 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8589 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8591 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8592 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8596 if (*ip == CEE_STELEM_ANY)
8603 case CEE_CKFINITE: {
8607 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8608 ins->sreg1 = sp [0]->dreg;
8609 ins->dreg = alloc_freg (cfg);
8610 ins->type = STACK_R8;
8611 MONO_ADD_INS (bblock, ins);
8613 *sp++ = mono_decompose_opcode (cfg, ins);
8618 case CEE_REFANYVAL: {
8619 MonoInst *src_var, *src;
8621 int klass_reg = alloc_preg (cfg);
8622 int dreg = alloc_preg (cfg);
8625 MONO_INST_NEW (cfg, ins, *ip);
8628 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8629 CHECK_TYPELOAD (klass);
8630 mono_class_init (klass);
8632 if (cfg->generic_sharing_context)
8633 context_used = mono_class_check_context_used (klass);
8636 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8638 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8639 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8640 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8643 MonoInst *klass_ins;
8645 klass_ins = emit_get_rgctx_klass (cfg, context_used,
8646 klass, MONO_RGCTX_INFO_KLASS);
8649 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8650 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8652 mini_emit_class_check (cfg, klass_reg, klass);
8654 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8655 ins->type = STACK_MP;
8660 case CEE_MKREFANY: {
8661 MonoInst *loc, *addr;
8664 MONO_INST_NEW (cfg, ins, *ip);
8667 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8668 CHECK_TYPELOAD (klass);
8669 mono_class_init (klass);
8671 if (cfg->generic_sharing_context)
8672 context_used = mono_class_check_context_used (klass);
8674 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8675 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8678 MonoInst *const_ins;
8679 int type_reg = alloc_preg (cfg);
8681 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
8682 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8683 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8684 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8685 } else if (cfg->compile_aot) {
8686 int const_reg = alloc_preg (cfg);
8687 int type_reg = alloc_preg (cfg);
8689 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8690 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8691 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8692 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8694 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8695 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8697 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8699 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8700 ins->type = STACK_VTYPE;
8701 ins->klass = mono_defaults.typed_reference_class;
8708 MonoClass *handle_class;
8710 CHECK_STACK_OVF (1);
8713 n = read32 (ip + 1);
8715 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
8716 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8717 handle = mono_method_get_wrapper_data (method, n);
8718 handle_class = mono_method_get_wrapper_data (method, n + 1);
8719 if (handle_class == mono_defaults.typehandle_class)
8720 handle = &((MonoClass*)handle)->byval_arg;
8723 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8727 mono_class_init (handle_class);
8728 if (cfg->generic_sharing_context) {
8729 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
8730 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
8731 /* This case handles ldtoken
8732 of an open type, like for
8735 } else if (handle_class == mono_defaults.typehandle_class) {
8736 /* If we get a MONO_TYPE_CLASS
8737 then we need to provide the
8739 instantiation of it. */
8740 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8743 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8744 } else if (handle_class == mono_defaults.fieldhandle_class)
8745 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8746 else if (handle_class == mono_defaults.methodhandle_class)
8747 context_used = mono_method_check_context_used (handle);
8749 g_assert_not_reached ();
8752 if ((cfg->opt & MONO_OPT_SHARED) &&
8753 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
8754 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
8755 MonoInst *addr, *vtvar, *iargs [3];
8756 int method_context_used;
8758 if (cfg->generic_sharing_context)
8759 method_context_used = mono_method_check_context_used (method);
8761 method_context_used = 0;
8763 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8765 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8766 EMIT_NEW_ICONST (cfg, iargs [1], n);
8767 if (method_context_used) {
8768 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
8769 method, MONO_RGCTX_INFO_METHOD);
8770 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
8772 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
8773 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
8775 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8777 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8779 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8781 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
8782 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
8783 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
8784 (cmethod->klass == mono_defaults.monotype_class->parent) &&
8785 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
8786 MonoClass *tclass = mono_class_from_mono_type (handle);
8788 mono_class_init (tclass);
8790 ins = emit_get_rgctx_klass (cfg, context_used,
8791 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
8792 } else if (cfg->compile_aot) {
8793 if (method->wrapper_type) {
8794 /* FIXME: n is not a normal token */
8795 cfg->disable_aot = TRUE;
8796 EMIT_NEW_PCONST (cfg, ins, NULL);
8798 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
8801 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
8803 ins->type = STACK_OBJ;
8804 ins->klass = cmethod->klass;
8807 MonoInst *addr, *vtvar;
8809 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8812 if (handle_class == mono_defaults.typehandle_class) {
8813 ins = emit_get_rgctx_klass (cfg, context_used,
8814 mono_class_from_mono_type (handle),
8815 MONO_RGCTX_INFO_TYPE);
8816 } else if (handle_class == mono_defaults.methodhandle_class) {
8817 ins = emit_get_rgctx_method (cfg, context_used,
8818 handle, MONO_RGCTX_INFO_METHOD);
8819 } else if (handle_class == mono_defaults.fieldhandle_class) {
8820 ins = emit_get_rgctx_field (cfg, context_used,
8821 handle, MONO_RGCTX_INFO_CLASS_FIELD);
8823 g_assert_not_reached ();
8825 } else if (cfg->compile_aot) {
8826 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
8828 EMIT_NEW_PCONST (cfg, ins, handle);
8830 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8831 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8832 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8842 MONO_INST_NEW (cfg, ins, OP_THROW);
8844 ins->sreg1 = sp [0]->dreg;
8846 bblock->out_of_line = TRUE;
8847 MONO_ADD_INS (bblock, ins);
8848 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
8849 MONO_ADD_INS (bblock, ins);
8852 link_bblock (cfg, bblock, end_bblock);
8853 start_new_bblock = 1;
8855 case CEE_ENDFINALLY:
8856 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
8857 MONO_ADD_INS (bblock, ins);
8859 start_new_bblock = 1;
8862 * Control will leave the method so empty the stack, otherwise
8863 * the next basic block will start with a nonempty stack.
8865 while (sp != stack_start) {
8873 if (*ip == CEE_LEAVE) {
8875 target = ip + 5 + (gint32)read32(ip + 1);
8878 target = ip + 2 + (signed char)(ip [1]);
8881 /* empty the stack */
8882 while (sp != stack_start) {
8887 * If this leave statement is in a catch block, check for a
8888 * pending exception, and rethrow it if necessary.
8890 for (i = 0; i < header->num_clauses; ++i) {
8891 MonoExceptionClause *clause = &header->clauses [i];
8894 * Use <= in the final comparison to handle clauses with multiple
8895 * leave statements, like in bug #78024.
8896 * The ordering of the exception clauses guarantees that we find the
8899 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len)) {
8901 MonoBasicBlock *dont_throw;
8906 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
8909 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
8911 NEW_BBLOCK (cfg, dont_throw);
8914 * Currently, we allways rethrow the abort exception, despite the
8915 * fact that this is not correct. See thread6.cs for an example.
8916 * But propagating the abort exception is more important than
8917 * getting the sematics right.
8919 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
8920 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
8921 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
8923 MONO_START_BB (cfg, dont_throw);
8928 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
8930 for (tmp = handlers; tmp; tmp = tmp->next) {
8932 link_bblock (cfg, bblock, tblock);
8933 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
8934 ins->inst_target_bb = tblock;
8935 MONO_ADD_INS (bblock, ins);
8936 bblock->has_call_handler = 1;
8938 g_list_free (handlers);
8941 MONO_INST_NEW (cfg, ins, OP_BR);
8942 MONO_ADD_INS (bblock, ins);
8943 GET_BBLOCK (cfg, tblock, target);
8944 link_bblock (cfg, bblock, tblock);
8945 ins->inst_target_bb = tblock;
8946 start_new_bblock = 1;
8948 if (*ip == CEE_LEAVE)
8957 * Mono specific opcodes
8959 case MONO_CUSTOM_PREFIX: {
8961 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
8965 case CEE_MONO_ICALL: {
8967 MonoJitICallInfo *info;
8969 token = read32 (ip + 2);
8970 func = mono_method_get_wrapper_data (method, token);
8971 info = mono_find_jit_icall_by_addr (func);
8974 CHECK_STACK (info->sig->param_count);
8975 sp -= info->sig->param_count;
8977 ins = mono_emit_jit_icall (cfg, info->func, sp);
8978 if (!MONO_TYPE_IS_VOID (info->sig->ret))
8982 inline_costs += 10 * num_calls++;
8986 case CEE_MONO_LDPTR: {
8989 CHECK_STACK_OVF (1);
8991 token = read32 (ip + 2);
8993 ptr = mono_method_get_wrapper_data (method, token);
8994 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
8995 MonoJitICallInfo *callinfo;
8996 const char *icall_name;
8998 icall_name = method->name + strlen ("__icall_wrapper_");
8999 g_assert (icall_name);
9000 callinfo = mono_find_jit_icall_by_name (icall_name);
9001 g_assert (callinfo);
9003 if (ptr == callinfo->func) {
9004 /* Will be transformed into an AOTCONST later */
9005 EMIT_NEW_PCONST (cfg, ins, ptr);
9011 /* FIXME: Generalize this */
9012 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
9013 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
9018 EMIT_NEW_PCONST (cfg, ins, ptr);
9021 inline_costs += 10 * num_calls++;
9022 /* Can't embed random pointers into AOT code */
9023 cfg->disable_aot = 1;
9026 case CEE_MONO_ICALL_ADDR: {
9027 MonoMethod *cmethod;
9030 CHECK_STACK_OVF (1);
9032 token = read32 (ip + 2);
9034 cmethod = mono_method_get_wrapper_data (method, token);
9036 if (cfg->compile_aot) {
9037 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
9039 ptr = mono_lookup_internal_call (cmethod);
9041 EMIT_NEW_PCONST (cfg, ins, ptr);
9047 case CEE_MONO_VTADDR: {
9048 MonoInst *src_var, *src;
9054 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9055 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
9060 case CEE_MONO_NEWOBJ: {
9061 MonoInst *iargs [2];
9063 CHECK_STACK_OVF (1);
9065 token = read32 (ip + 2);
9066 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9067 mono_class_init (klass);
9068 NEW_DOMAINCONST (cfg, iargs [0]);
9069 MONO_ADD_INS (cfg->cbb, iargs [0]);
9070 NEW_CLASSCONST (cfg, iargs [1], klass);
9071 MONO_ADD_INS (cfg->cbb, iargs [1]);
9072 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
9074 inline_costs += 10 * num_calls++;
9077 case CEE_MONO_OBJADDR:
9080 MONO_INST_NEW (cfg, ins, OP_MOVE);
9081 ins->dreg = alloc_preg (cfg);
9082 ins->sreg1 = sp [0]->dreg;
9083 ins->type = STACK_MP;
9084 MONO_ADD_INS (cfg->cbb, ins);
9088 case CEE_MONO_LDNATIVEOBJ:
9090 * Similar to LDOBJ, but instead load the unmanaged
9091 * representation of the vtype to the stack.
9096 token = read32 (ip + 2);
9097 klass = mono_method_get_wrapper_data (method, token);
9098 g_assert (klass->valuetype);
9099 mono_class_init (klass);
9102 MonoInst *src, *dest, *temp;
9105 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
9106 temp->backend.is_pinvoke = 1;
9107 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
9108 mini_emit_stobj (cfg, dest, src, klass, TRUE);
9110 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
9111 dest->type = STACK_VTYPE;
9112 dest->klass = klass;
9118 case CEE_MONO_RETOBJ: {
9120 * Same as RET, but return the native representation of a vtype
9123 g_assert (cfg->ret);
9124 g_assert (mono_method_signature (method)->pinvoke);
9129 token = read32 (ip + 2);
9130 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9132 if (!cfg->vret_addr) {
9133 g_assert (cfg->ret_var_is_local);
9135 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
9137 EMIT_NEW_RETLOADA (cfg, ins);
9139 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
9141 if (sp != stack_start)
9144 MONO_INST_NEW (cfg, ins, OP_BR);
9145 ins->inst_target_bb = end_bblock;
9146 MONO_ADD_INS (bblock, ins);
9147 link_bblock (cfg, bblock, end_bblock);
9148 start_new_bblock = 1;
9152 case CEE_MONO_CISINST:
9153 case CEE_MONO_CCASTCLASS: {
9158 token = read32 (ip + 2);
9159 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9160 if (ip [1] == CEE_MONO_CISINST)
9161 ins = handle_cisinst (cfg, klass, sp [0]);
9163 ins = handle_ccastclass (cfg, klass, sp [0]);
9169 case CEE_MONO_SAVE_LMF:
9170 case CEE_MONO_RESTORE_LMF:
9171 #ifdef MONO_ARCH_HAVE_LMF_OPS
9172 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9173 MONO_ADD_INS (bblock, ins);
9174 cfg->need_lmf_area = TRUE;
9178 case CEE_MONO_CLASSCONST:
9179 CHECK_STACK_OVF (1);
9181 token = read32 (ip + 2);
9182 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9185 inline_costs += 10 * num_calls++;
9187 case CEE_MONO_NOT_TAKEN:
9188 bblock->out_of_line = TRUE;
9192 CHECK_STACK_OVF (1);
9194 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9195 ins->dreg = alloc_preg (cfg);
9196 ins->inst_offset = (gint32)read32 (ip + 2);
9197 ins->type = STACK_PTR;
9198 MONO_ADD_INS (bblock, ins);
9203 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9213 /* somewhat similar to LDTOKEN */
9214 MonoInst *addr, *vtvar;
9215 CHECK_STACK_OVF (1);
9216 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9218 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9219 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9221 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9222 ins->type = STACK_VTYPE;
9223 ins->klass = mono_defaults.argumenthandle_class;
9236 * The following transforms:
9237 * CEE_CEQ into OP_CEQ
9238 * CEE_CGT into OP_CGT
9239 * CEE_CGT_UN into OP_CGT_UN
9240 * CEE_CLT into OP_CLT
9241 * CEE_CLT_UN into OP_CLT_UN
9243 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9245 MONO_INST_NEW (cfg, ins, cmp->opcode);
9247 cmp->sreg1 = sp [0]->dreg;
9248 cmp->sreg2 = sp [1]->dreg;
9249 type_from_op (cmp, sp [0], sp [1]);
9251 if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9252 cmp->opcode = OP_LCOMPARE;
9253 else if (sp [0]->type == STACK_R8)
9254 cmp->opcode = OP_FCOMPARE;
9256 cmp->opcode = OP_ICOMPARE;
9257 MONO_ADD_INS (bblock, cmp);
9258 ins->type = STACK_I4;
9259 ins->dreg = alloc_dreg (cfg, ins->type);
9260 type_from_op (ins, sp [0], sp [1]);
9262 if (cmp->opcode == OP_FCOMPARE) {
9264 * The backends expect the fceq opcodes to do the
9267 cmp->opcode = OP_NOP;
9268 ins->sreg1 = cmp->sreg1;
9269 ins->sreg2 = cmp->sreg2;
9271 MONO_ADD_INS (bblock, ins);
9278 MonoMethod *cil_method;
9279 gboolean needs_static_rgctx_invoke;
9281 CHECK_STACK_OVF (1);
9283 n = read32 (ip + 2);
9284 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9287 mono_class_init (cmethod->klass);
9289 mono_save_token_info (cfg, image, n, cmethod);
9291 if (cfg->generic_sharing_context)
9292 context_used = mono_method_check_context_used (cmethod);
9294 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9296 cil_method = cmethod;
9297 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9298 METHOD_ACCESS_FAILURE;
9300 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9301 if (check_linkdemand (cfg, method, cmethod))
9303 CHECK_CFG_EXCEPTION;
9304 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9305 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9309 * Optimize the common case of ldftn+delegate creation
9311 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
9312 /* FIXME: SGEN support */
9313 /* FIXME: handle shared static generic methods */
9314 /* FIXME: handle this in shared code */
9315 if (!needs_static_rgctx_invoke && !context_used && (sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9316 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9317 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9318 MonoInst *target_ins;
9321 invoke = mono_get_delegate_invoke (ctor_method->klass);
9322 if (!invoke || !mono_method_signature (invoke))
9326 if (cfg->verbose_level > 3)
9327 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9328 target_ins = sp [-1];
9330 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod);
9339 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
9341 EMIT_NEW_METHODCONST (cfg, argconst, cmethod);
9343 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
9347 inline_costs += 10 * num_calls++;
9350 case CEE_LDVIRTFTN: {
9355 n = read32 (ip + 2);
9356 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9359 mono_class_init (cmethod->klass);
9361 if (cfg->generic_sharing_context)
9362 context_used = mono_method_check_context_used (cmethod);
9364 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9365 if (check_linkdemand (cfg, method, cmethod))
9367 CHECK_CFG_EXCEPTION;
9368 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9369 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9376 args [1] = emit_get_rgctx_method (cfg, context_used,
9377 cmethod, MONO_RGCTX_INFO_METHOD);
9378 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
9380 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
9381 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
9385 inline_costs += 10 * num_calls++;
9389 CHECK_STACK_OVF (1);
9391 n = read16 (ip + 2);
9393 EMIT_NEW_ARGLOAD (cfg, ins, n);
9398 CHECK_STACK_OVF (1);
9400 n = read16 (ip + 2);
9402 NEW_ARGLOADA (cfg, ins, n);
9403 MONO_ADD_INS (cfg->cbb, ins);
9411 n = read16 (ip + 2);
9413 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9415 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9419 CHECK_STACK_OVF (1);
9421 n = read16 (ip + 2);
9423 EMIT_NEW_LOCLOAD (cfg, ins, n);
9428 unsigned char *tmp_ip;
9429 CHECK_STACK_OVF (1);
9431 n = read16 (ip + 2);
9434 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9440 EMIT_NEW_LOCLOADA (cfg, ins, n);
9449 n = read16 (ip + 2);
9451 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9453 emit_stloc_ir (cfg, sp, header, n);
9460 if (sp != stack_start)
9462 if (cfg->method != method)
9464 * Inlining this into a loop in a parent could lead to
9465 * stack overflows which is different behavior than the
9466 * non-inlined case, thus disable inlining in this case.
9468 goto inline_failure;
9470 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9471 ins->dreg = alloc_preg (cfg);
9472 ins->sreg1 = sp [0]->dreg;
9473 ins->type = STACK_PTR;
9474 MONO_ADD_INS (cfg->cbb, ins);
9476 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9478 ins->flags |= MONO_INST_INIT;
9483 case CEE_ENDFILTER: {
9484 MonoExceptionClause *clause, *nearest;
9485 int cc, nearest_num;
9489 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9491 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9492 ins->sreg1 = (*sp)->dreg;
9493 MONO_ADD_INS (bblock, ins);
9494 start_new_bblock = 1;
9499 for (cc = 0; cc < header->num_clauses; ++cc) {
9500 clause = &header->clauses [cc];
9501 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9502 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9503 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9509 if ((ip - header->code) != nearest->handler_offset)
9514 case CEE_UNALIGNED_:
9515 ins_flag |= MONO_INST_UNALIGNED;
9516 /* FIXME: record alignment? we can assume 1 for now */
9521 ins_flag |= MONO_INST_VOLATILE;
9525 ins_flag |= MONO_INST_TAILCALL;
9526 cfg->flags |= MONO_CFG_HAS_TAIL;
9527 /* Can't inline tail calls at this time */
9528 inline_costs += 100000;
9535 token = read32 (ip + 2);
9536 klass = mini_get_class (method, token, generic_context);
9537 CHECK_TYPELOAD (klass);
9538 if (generic_class_is_reference_type (cfg, klass))
9539 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9541 mini_emit_initobj (cfg, *sp, NULL, klass);
9545 case CEE_CONSTRAINED_:
9547 token = read32 (ip + 2);
9548 constrained_call = mono_class_get_full (image, token, generic_context);
9549 CHECK_TYPELOAD (constrained_call);
9554 MonoInst *iargs [3];
9558 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9559 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9560 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9561 /* emit_memset only works when val == 0 */
9562 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9567 if (ip [1] == CEE_CPBLK) {
9568 MonoMethod *memcpy_method = get_memcpy_method ();
9569 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9571 MonoMethod *memset_method = get_memset_method ();
9572 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9582 ins_flag |= MONO_INST_NOTYPECHECK;
9584 ins_flag |= MONO_INST_NORANGECHECK;
9585 /* we ignore the no-nullcheck for now since we
9586 * really do it explicitly only when doing callvirt->call
9592 int handler_offset = -1;
9594 for (i = 0; i < header->num_clauses; ++i) {
9595 MonoExceptionClause *clause = &header->clauses [i];
9596 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9597 handler_offset = clause->handler_offset;
9602 bblock->flags |= BB_EXCEPTION_UNSAFE;
9604 g_assert (handler_offset != -1);
9606 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9607 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9608 ins->sreg1 = load->dreg;
9609 MONO_ADD_INS (bblock, ins);
9611 link_bblock (cfg, bblock, end_bblock);
9612 start_new_bblock = 1;
9620 CHECK_STACK_OVF (1);
9622 token = read32 (ip + 2);
9623 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC) {
9624 MonoType *type = mono_type_create_from_typespec (image, token);
9625 token = mono_type_size (type, &ialign);
9627 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9628 CHECK_TYPELOAD (klass);
9629 mono_class_init (klass);
9630 token = mono_class_value_size (klass, &align);
9632 EMIT_NEW_ICONST (cfg, ins, token);
9637 case CEE_REFANYTYPE: {
9638 MonoInst *src_var, *src;
9644 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9646 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9647 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9648 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9658 g_error ("opcode 0xfe 0x%02x not handled", ip [1]);
9663 g_error ("opcode 0x%02x not handled", *ip);
9666 if (start_new_bblock != 1)
9669 bblock->cil_length = ip - bblock->cil_code;
9670 bblock->next_bb = end_bblock;
9672 if (cfg->method == method && cfg->domainvar) {
9674 MonoInst *get_domain;
9676 cfg->cbb = init_localsbb;
9678 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9679 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9682 get_domain->dreg = alloc_preg (cfg);
9683 MONO_ADD_INS (cfg->cbb, get_domain);
9685 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9686 MONO_ADD_INS (cfg->cbb, store);
9689 #ifdef TARGET_POWERPC
9690 if (cfg->compile_aot)
9691 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
9692 mono_get_got_var (cfg);
9695 if (cfg->method == method && cfg->got_var)
9696 mono_emit_load_got_addr (cfg);
9701 cfg->cbb = init_localsbb;
9703 for (i = 0; i < header->num_locals; ++i) {
9704 MonoType *ptype = header->locals [i];
9705 int t = ptype->type;
9706 dreg = cfg->locals [i]->dreg;
9708 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
9709 t = mono_class_enum_basetype (ptype->data.klass)->type;
9711 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9712 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
9713 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
9714 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
9715 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
9716 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
9717 MONO_INST_NEW (cfg, ins, OP_R8CONST);
9718 ins->type = STACK_R8;
9719 ins->inst_p0 = (void*)&r8_0;
9720 ins->dreg = alloc_dreg (cfg, STACK_R8);
9721 MONO_ADD_INS (init_localsbb, ins);
9722 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
9723 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
9724 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
9725 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
9727 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9734 if (cfg->method == method) {
9736 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9737 bb->region = mono_find_block_region (cfg, bb->real_offset);
9739 mono_create_spvar_for_region (cfg, bb->region);
9740 if (cfg->verbose_level > 2)
9741 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
9745 g_slist_free (class_inits);
9746 dont_inline = g_list_remove (dont_inline, method);
9748 if (inline_costs < 0) {
9751 /* Method is too large */
9752 mname = mono_method_full_name (method, TRUE);
9753 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
9754 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
9759 if ((cfg->verbose_level > 2) && (cfg->method == method))
9760 mono_print_code (cfg, "AFTER METHOD-TO-IR");
9762 return inline_costs;
9765 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
9766 g_slist_free (class_inits);
9767 dont_inline = g_list_remove (dont_inline, method);
9771 g_slist_free (class_inits);
9772 dont_inline = g_list_remove (dont_inline, method);
9776 g_slist_free (class_inits);
9777 dont_inline = g_list_remove (dont_inline, method);
9778 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
9782 g_slist_free (class_inits);
9783 dont_inline = g_list_remove (dont_inline, method);
9784 set_exception_type_from_invalid_il (cfg, method, ip);
9789 store_membase_reg_to_store_membase_imm (int opcode)
9792 case OP_STORE_MEMBASE_REG:
9793 return OP_STORE_MEMBASE_IMM;
9794 case OP_STOREI1_MEMBASE_REG:
9795 return OP_STOREI1_MEMBASE_IMM;
9796 case OP_STOREI2_MEMBASE_REG:
9797 return OP_STOREI2_MEMBASE_IMM;
9798 case OP_STOREI4_MEMBASE_REG:
9799 return OP_STOREI4_MEMBASE_IMM;
9800 case OP_STOREI8_MEMBASE_REG:
9801 return OP_STOREI8_MEMBASE_IMM;
9803 g_assert_not_reached ();
9809 #endif /* DISABLE_JIT */
9812 mono_op_to_op_imm (int opcode)
9822 return OP_IDIV_UN_IMM;
9826 return OP_IREM_UN_IMM;
9840 return OP_ISHR_UN_IMM;
9857 return OP_LSHR_UN_IMM;
9860 return OP_COMPARE_IMM;
9862 return OP_ICOMPARE_IMM;
9864 return OP_LCOMPARE_IMM;
9866 case OP_STORE_MEMBASE_REG:
9867 return OP_STORE_MEMBASE_IMM;
9868 case OP_STOREI1_MEMBASE_REG:
9869 return OP_STOREI1_MEMBASE_IMM;
9870 case OP_STOREI2_MEMBASE_REG:
9871 return OP_STOREI2_MEMBASE_IMM;
9872 case OP_STOREI4_MEMBASE_REG:
9873 return OP_STOREI4_MEMBASE_IMM;
9875 #if defined(TARGET_X86) || defined (TARGET_AMD64)
9877 return OP_X86_PUSH_IMM;
9878 case OP_X86_COMPARE_MEMBASE_REG:
9879 return OP_X86_COMPARE_MEMBASE_IMM;
9881 #if defined(TARGET_AMD64)
9882 case OP_AMD64_ICOMPARE_MEMBASE_REG:
9883 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9885 case OP_VOIDCALL_REG:
9894 return OP_LOCALLOC_IMM;
9901 ldind_to_load_membase (int opcode)
9905 return OP_LOADI1_MEMBASE;
9907 return OP_LOADU1_MEMBASE;
9909 return OP_LOADI2_MEMBASE;
9911 return OP_LOADU2_MEMBASE;
9913 return OP_LOADI4_MEMBASE;
9915 return OP_LOADU4_MEMBASE;
9917 return OP_LOAD_MEMBASE;
9919 return OP_LOAD_MEMBASE;
9921 return OP_LOADI8_MEMBASE;
9923 return OP_LOADR4_MEMBASE;
9925 return OP_LOADR8_MEMBASE;
9927 g_assert_not_reached ();
9934 stind_to_store_membase (int opcode)
9938 return OP_STOREI1_MEMBASE_REG;
9940 return OP_STOREI2_MEMBASE_REG;
9942 return OP_STOREI4_MEMBASE_REG;
9945 return OP_STORE_MEMBASE_REG;
9947 return OP_STOREI8_MEMBASE_REG;
9949 return OP_STORER4_MEMBASE_REG;
9951 return OP_STORER8_MEMBASE_REG;
9953 g_assert_not_reached ();
9960 mono_load_membase_to_load_mem (int opcode)
9962 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
9963 #if defined(TARGET_X86) || defined(TARGET_AMD64)
9965 case OP_LOAD_MEMBASE:
9967 case OP_LOADU1_MEMBASE:
9968 return OP_LOADU1_MEM;
9969 case OP_LOADU2_MEMBASE:
9970 return OP_LOADU2_MEM;
9971 case OP_LOADI4_MEMBASE:
9972 return OP_LOADI4_MEM;
9973 case OP_LOADU4_MEMBASE:
9974 return OP_LOADU4_MEM;
9975 #if SIZEOF_REGISTER == 8
9976 case OP_LOADI8_MEMBASE:
9977 return OP_LOADI8_MEM;
9986 op_to_op_dest_membase (int store_opcode, int opcode)
9988 #if defined(TARGET_X86)
9989 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
9994 return OP_X86_ADD_MEMBASE_REG;
9996 return OP_X86_SUB_MEMBASE_REG;
9998 return OP_X86_AND_MEMBASE_REG;
10000 return OP_X86_OR_MEMBASE_REG;
10002 return OP_X86_XOR_MEMBASE_REG;
10005 return OP_X86_ADD_MEMBASE_IMM;
10008 return OP_X86_SUB_MEMBASE_IMM;
10011 return OP_X86_AND_MEMBASE_IMM;
10014 return OP_X86_OR_MEMBASE_IMM;
10017 return OP_X86_XOR_MEMBASE_IMM;
10023 #if defined(TARGET_AMD64)
10024 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
10029 return OP_X86_ADD_MEMBASE_REG;
10031 return OP_X86_SUB_MEMBASE_REG;
10033 return OP_X86_AND_MEMBASE_REG;
10035 return OP_X86_OR_MEMBASE_REG;
10037 return OP_X86_XOR_MEMBASE_REG;
10039 return OP_X86_ADD_MEMBASE_IMM;
10041 return OP_X86_SUB_MEMBASE_IMM;
10043 return OP_X86_AND_MEMBASE_IMM;
10045 return OP_X86_OR_MEMBASE_IMM;
10047 return OP_X86_XOR_MEMBASE_IMM;
10049 return OP_AMD64_ADD_MEMBASE_REG;
10051 return OP_AMD64_SUB_MEMBASE_REG;
10053 return OP_AMD64_AND_MEMBASE_REG;
10055 return OP_AMD64_OR_MEMBASE_REG;
10057 return OP_AMD64_XOR_MEMBASE_REG;
10060 return OP_AMD64_ADD_MEMBASE_IMM;
10063 return OP_AMD64_SUB_MEMBASE_IMM;
10066 return OP_AMD64_AND_MEMBASE_IMM;
10069 return OP_AMD64_OR_MEMBASE_IMM;
10072 return OP_AMD64_XOR_MEMBASE_IMM;
10082 op_to_op_store_membase (int store_opcode, int opcode)
10084 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10087 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10088 return OP_X86_SETEQ_MEMBASE;
10090 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10091 return OP_X86_SETNE_MEMBASE;
10099 op_to_op_src1_membase (int load_opcode, int opcode)
10102 /* FIXME: This has sign extension issues */
10104 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10105 return OP_X86_COMPARE_MEMBASE8_IMM;
10108 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10113 return OP_X86_PUSH_MEMBASE;
10114 case OP_COMPARE_IMM:
10115 case OP_ICOMPARE_IMM:
10116 return OP_X86_COMPARE_MEMBASE_IMM;
10119 return OP_X86_COMPARE_MEMBASE_REG;
10123 #ifdef TARGET_AMD64
10124 /* FIXME: This has sign extension issues */
10126 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10127 return OP_X86_COMPARE_MEMBASE8_IMM;
10132 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10133 return OP_X86_PUSH_MEMBASE;
10135 /* FIXME: This only works for 32 bit immediates
10136 case OP_COMPARE_IMM:
10137 case OP_LCOMPARE_IMM:
10138 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10139 return OP_AMD64_COMPARE_MEMBASE_IMM;
10141 case OP_ICOMPARE_IMM:
10142 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10143 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10147 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10148 return OP_AMD64_COMPARE_MEMBASE_REG;
10151 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10152 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10161 op_to_op_src2_membase (int load_opcode, int opcode)
10164 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10170 return OP_X86_COMPARE_REG_MEMBASE;
10172 return OP_X86_ADD_REG_MEMBASE;
10174 return OP_X86_SUB_REG_MEMBASE;
10176 return OP_X86_AND_REG_MEMBASE;
10178 return OP_X86_OR_REG_MEMBASE;
10180 return OP_X86_XOR_REG_MEMBASE;
10184 #ifdef TARGET_AMD64
10187 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10188 return OP_AMD64_ICOMPARE_REG_MEMBASE;
10192 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10193 return OP_AMD64_COMPARE_REG_MEMBASE;
10196 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10197 return OP_X86_ADD_REG_MEMBASE;
10199 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10200 return OP_X86_SUB_REG_MEMBASE;
10202 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10203 return OP_X86_AND_REG_MEMBASE;
10205 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10206 return OP_X86_OR_REG_MEMBASE;
10208 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10209 return OP_X86_XOR_REG_MEMBASE;
10211 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10212 return OP_AMD64_ADD_REG_MEMBASE;
10214 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10215 return OP_AMD64_SUB_REG_MEMBASE;
10217 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10218 return OP_AMD64_AND_REG_MEMBASE;
10220 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10221 return OP_AMD64_OR_REG_MEMBASE;
10223 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10224 return OP_AMD64_XOR_REG_MEMBASE;
10232 mono_op_to_op_imm_noemul (int opcode)
10235 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10240 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10248 return mono_op_to_op_imm (opcode);
10252 #ifndef DISABLE_JIT
10255 * mono_handle_global_vregs:
10257 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10261 mono_handle_global_vregs (MonoCompile *cfg)
10263 gint32 *vreg_to_bb;
10264 MonoBasicBlock *bb;
10267 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
10269 #ifdef MONO_ARCH_SIMD_INTRINSICS
10270 if (cfg->uses_simd_intrinsics)
10271 mono_simd_simplify_indirection (cfg);
10274 /* Find local vregs used in more than one bb */
10275 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10276 MonoInst *ins = bb->code;
10277 int block_num = bb->block_num;
10279 if (cfg->verbose_level > 2)
10280 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
10283 for (; ins; ins = ins->next) {
10284 const char *spec = INS_INFO (ins->opcode);
10285 int regtype, regindex;
10288 if (G_UNLIKELY (cfg->verbose_level > 2))
10289 mono_print_ins (ins);
10291 g_assert (ins->opcode >= MONO_CEE_LAST);
10293 for (regindex = 0; regindex < 4; regindex ++) {
10296 if (regindex == 0) {
10297 regtype = spec [MONO_INST_DEST];
10298 if (regtype == ' ')
10301 } else if (regindex == 1) {
10302 regtype = spec [MONO_INST_SRC1];
10303 if (regtype == ' ')
10306 } else if (regindex == 2) {
10307 regtype = spec [MONO_INST_SRC2];
10308 if (regtype == ' ')
10311 } else if (regindex == 3) {
10312 regtype = spec [MONO_INST_SRC3];
10313 if (regtype == ' ')
10318 #if SIZEOF_REGISTER == 4
10319 /* In the LLVM case, the long opcodes are not decomposed */
10320 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
10322 * Since some instructions reference the original long vreg,
10323 * and some reference the two component vregs, it is quite hard
10324 * to determine when it needs to be global. So be conservative.
10326 if (!get_vreg_to_inst (cfg, vreg)) {
10327 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10329 if (cfg->verbose_level > 2)
10330 printf ("LONG VREG R%d made global.\n", vreg);
10334 * Make the component vregs volatile since the optimizations can
10335 * get confused otherwise.
10337 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
10338 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
10342 g_assert (vreg != -1);
10344 prev_bb = vreg_to_bb [vreg];
10345 if (prev_bb == 0) {
10346 /* 0 is a valid block num */
10347 vreg_to_bb [vreg] = block_num + 1;
10348 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
10349 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
10352 if (!get_vreg_to_inst (cfg, vreg)) {
10353 if (G_UNLIKELY (cfg->verbose_level > 2))
10354 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
10358 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
10361 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
10364 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
10367 g_assert_not_reached ();
10371 /* Flag as having been used in more than one bb */
10372 vreg_to_bb [vreg] = -1;
10378 /* If a variable is used in only one bblock, convert it into a local vreg */
10379 for (i = 0; i < cfg->num_varinfo; i++) {
10380 MonoInst *var = cfg->varinfo [i];
10381 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
10383 switch (var->type) {
10389 #if SIZEOF_REGISTER == 8
10392 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
10393 /* Enabling this screws up the fp stack on x86 */
10396 /* Arguments are implicitly global */
10397 /* Putting R4 vars into registers doesn't work currently */
10398 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
10400 * Make that the variable's liveness interval doesn't contain a call, since
10401 * that would cause the lvreg to be spilled, making the whole optimization
10404 /* This is too slow for JIT compilation */
10406 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
10408 int def_index, call_index, ins_index;
10409 gboolean spilled = FALSE;
10414 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
10415 const char *spec = INS_INFO (ins->opcode);
10417 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
10418 def_index = ins_index;
10420 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
10421 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
10422 if (call_index > def_index) {
10428 if (MONO_IS_CALL (ins))
10429 call_index = ins_index;
10439 if (G_UNLIKELY (cfg->verbose_level > 2))
10440 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10441 var->flags |= MONO_INST_IS_DEAD;
10442 cfg->vreg_to_inst [var->dreg] = NULL;
10449 * Compress the varinfo and vars tables so the liveness computation is faster and
10450 * takes up less space.
10453 for (i = 0; i < cfg->num_varinfo; ++i) {
10454 MonoInst *var = cfg->varinfo [i];
10455 if (pos < i && cfg->locals_start == i)
10456 cfg->locals_start = pos;
10457 if (!(var->flags & MONO_INST_IS_DEAD)) {
10459 cfg->varinfo [pos] = cfg->varinfo [i];
10460 cfg->varinfo [pos]->inst_c0 = pos;
10461 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10462 cfg->vars [pos].idx = pos;
10463 #if SIZEOF_REGISTER == 4
10464 if (cfg->varinfo [pos]->type == STACK_I8) {
10465 /* Modify the two component vars too */
10468 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10469 var1->inst_c0 = pos;
10470 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10471 var1->inst_c0 = pos;
10478 cfg->num_varinfo = pos;
10479 if (cfg->locals_start > cfg->num_varinfo)
10480 cfg->locals_start = cfg->num_varinfo;
10484 * mono_spill_global_vars:
10486 * Generate spill code for variables which are not allocated to registers,
10487 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10488 * code is generated which could be optimized by the local optimization passes.
10491 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10493 MonoBasicBlock *bb;
10495 int orig_next_vreg;
10496 guint32 *vreg_to_lvreg;
10498 guint32 i, lvregs_len;
10499 gboolean dest_has_lvreg = FALSE;
10500 guint32 stacktypes [128];
10501 MonoInst **live_range_start, **live_range_end;
10502 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
10504 *need_local_opts = FALSE;
10506 memset (spec2, 0, sizeof (spec2));
10508 /* FIXME: Move this function to mini.c */
10509 stacktypes ['i'] = STACK_PTR;
10510 stacktypes ['l'] = STACK_I8;
10511 stacktypes ['f'] = STACK_R8;
10512 #ifdef MONO_ARCH_SIMD_INTRINSICS
10513 stacktypes ['x'] = STACK_VTYPE;
10516 #if SIZEOF_REGISTER == 4
10517 /* Create MonoInsts for longs */
10518 for (i = 0; i < cfg->num_varinfo; i++) {
10519 MonoInst *ins = cfg->varinfo [i];
10521 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10522 switch (ins->type) {
10523 #ifdef MONO_ARCH_SOFT_FLOAT
10529 g_assert (ins->opcode == OP_REGOFFSET);
10531 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10533 tree->opcode = OP_REGOFFSET;
10534 tree->inst_basereg = ins->inst_basereg;
10535 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10537 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10539 tree->opcode = OP_REGOFFSET;
10540 tree->inst_basereg = ins->inst_basereg;
10541 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10551 /* FIXME: widening and truncation */
10554 * As an optimization, when a variable allocated to the stack is first loaded into
10555 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10556 * the variable again.
10558 orig_next_vreg = cfg->next_vreg;
10559 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10560 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10564 * These arrays contain the first and last instructions accessing a given
10566 * Since we emit bblocks in the same order we process them here, and we
10567 * don't split live ranges, these will precisely describe the live range of
10568 * the variable, i.e. the instruction range where a valid value can be found
10569 * in the variables location.
10571 /* FIXME: Only do this if debugging info is requested */
10572 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
10573 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
10574 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10575 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10577 /* Add spill loads/stores */
10578 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10581 if (cfg->verbose_level > 2)
10582 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10584 /* Clear vreg_to_lvreg array */
10585 for (i = 0; i < lvregs_len; i++)
10586 vreg_to_lvreg [lvregs [i]] = 0;
10590 MONO_BB_FOR_EACH_INS (bb, ins) {
10591 const char *spec = INS_INFO (ins->opcode);
10592 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
10593 gboolean store, no_lvreg;
10594 int sregs [MONO_MAX_SRC_REGS];
10596 if (G_UNLIKELY (cfg->verbose_level > 2))
10597 mono_print_ins (ins);
10599 if (ins->opcode == OP_NOP)
10603 * We handle LDADDR here as well, since it can only be decomposed
10604 * when variable addresses are known.
10606 if (ins->opcode == OP_LDADDR) {
10607 MonoInst *var = ins->inst_p0;
10609 if (var->opcode == OP_VTARG_ADDR) {
10610 /* Happens on SPARC/S390 where vtypes are passed by reference */
10611 MonoInst *vtaddr = var->inst_left;
10612 if (vtaddr->opcode == OP_REGVAR) {
10613 ins->opcode = OP_MOVE;
10614 ins->sreg1 = vtaddr->dreg;
10616 else if (var->inst_left->opcode == OP_REGOFFSET) {
10617 ins->opcode = OP_LOAD_MEMBASE;
10618 ins->inst_basereg = vtaddr->inst_basereg;
10619 ins->inst_offset = vtaddr->inst_offset;
10623 g_assert (var->opcode == OP_REGOFFSET);
10625 ins->opcode = OP_ADD_IMM;
10626 ins->sreg1 = var->inst_basereg;
10627 ins->inst_imm = var->inst_offset;
10630 *need_local_opts = TRUE;
10631 spec = INS_INFO (ins->opcode);
10634 if (ins->opcode < MONO_CEE_LAST) {
10635 mono_print_ins (ins);
10636 g_assert_not_reached ();
10640 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10644 if (MONO_IS_STORE_MEMBASE (ins)) {
10645 tmp_reg = ins->dreg;
10646 ins->dreg = ins->sreg2;
10647 ins->sreg2 = tmp_reg;
10650 spec2 [MONO_INST_DEST] = ' ';
10651 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10652 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10653 spec2 [MONO_INST_SRC3] = ' ';
10655 } else if (MONO_IS_STORE_MEMINDEX (ins))
10656 g_assert_not_reached ();
10661 if (G_UNLIKELY (cfg->verbose_level > 2)) {
10662 printf ("\t %.3s %d", spec, ins->dreg);
10663 num_sregs = mono_inst_get_src_registers (ins, sregs);
10664 for (srcindex = 0; srcindex < 3; ++srcindex)
10665 printf (" %d", sregs [srcindex]);
10672 regtype = spec [MONO_INST_DEST];
10673 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
10676 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
10677 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
10678 MonoInst *store_ins;
10680 MonoInst *def_ins = ins;
10681 int dreg = ins->dreg; /* The original vreg */
10683 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
10685 if (var->opcode == OP_REGVAR) {
10686 ins->dreg = var->dreg;
10687 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
10689 * Instead of emitting a load+store, use a _membase opcode.
10691 g_assert (var->opcode == OP_REGOFFSET);
10692 if (ins->opcode == OP_MOVE) {
10696 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
10697 ins->inst_basereg = var->inst_basereg;
10698 ins->inst_offset = var->inst_offset;
10701 spec = INS_INFO (ins->opcode);
10705 g_assert (var->opcode == OP_REGOFFSET);
10707 prev_dreg = ins->dreg;
10709 /* Invalidate any previous lvreg for this vreg */
10710 vreg_to_lvreg [ins->dreg] = 0;
10714 #ifdef MONO_ARCH_SOFT_FLOAT
10715 if (store_opcode == OP_STORER8_MEMBASE_REG) {
10717 store_opcode = OP_STOREI8_MEMBASE_REG;
10721 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
10723 if (regtype == 'l') {
10724 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
10725 mono_bblock_insert_after_ins (bb, ins, store_ins);
10726 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
10727 mono_bblock_insert_after_ins (bb, ins, store_ins);
10728 def_ins = store_ins;
10731 g_assert (store_opcode != OP_STOREV_MEMBASE);
10733 /* Try to fuse the store into the instruction itself */
10734 /* FIXME: Add more instructions */
10735 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
10736 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
10737 ins->inst_imm = ins->inst_c0;
10738 ins->inst_destbasereg = var->inst_basereg;
10739 ins->inst_offset = var->inst_offset;
10740 spec = INS_INFO (ins->opcode);
10741 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
10742 ins->opcode = store_opcode;
10743 ins->inst_destbasereg = var->inst_basereg;
10744 ins->inst_offset = var->inst_offset;
10748 tmp_reg = ins->dreg;
10749 ins->dreg = ins->sreg2;
10750 ins->sreg2 = tmp_reg;
10753 spec2 [MONO_INST_DEST] = ' ';
10754 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10755 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10756 spec2 [MONO_INST_SRC3] = ' ';
10758 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
10759 // FIXME: The backends expect the base reg to be in inst_basereg
10760 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
10762 ins->inst_basereg = var->inst_basereg;
10763 ins->inst_offset = var->inst_offset;
10764 spec = INS_INFO (ins->opcode);
10766 /* printf ("INS: "); mono_print_ins (ins); */
10767 /* Create a store instruction */
10768 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
10770 /* Insert it after the instruction */
10771 mono_bblock_insert_after_ins (bb, ins, store_ins);
10773 def_ins = store_ins;
10776 * We can't assign ins->dreg to var->dreg here, since the
10777 * sregs could use it. So set a flag, and do it after
10780 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
10781 dest_has_lvreg = TRUE;
10786 if (def_ins && !live_range_start [dreg]) {
10787 live_range_start [dreg] = def_ins;
10788 live_range_start_bb [dreg] = bb;
10795 num_sregs = mono_inst_get_src_registers (ins, sregs);
10796 for (srcindex = 0; srcindex < 3; ++srcindex) {
10797 regtype = spec [MONO_INST_SRC1 + srcindex];
10798 sreg = sregs [srcindex];
10800 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
10801 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
10802 MonoInst *var = get_vreg_to_inst (cfg, sreg);
10803 MonoInst *use_ins = ins;
10804 MonoInst *load_ins;
10805 guint32 load_opcode;
10807 if (var->opcode == OP_REGVAR) {
10808 sregs [srcindex] = var->dreg;
10809 //mono_inst_set_src_registers (ins, sregs);
10810 live_range_end [sreg] = use_ins;
10811 live_range_end_bb [sreg] = bb;
10815 g_assert (var->opcode == OP_REGOFFSET);
10817 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
10819 g_assert (load_opcode != OP_LOADV_MEMBASE);
10821 if (vreg_to_lvreg [sreg]) {
10822 g_assert (vreg_to_lvreg [sreg] != -1);
10824 /* The variable is already loaded to an lvreg */
10825 if (G_UNLIKELY (cfg->verbose_level > 2))
10826 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
10827 sregs [srcindex] = vreg_to_lvreg [sreg];
10828 //mono_inst_set_src_registers (ins, sregs);
10832 /* Try to fuse the load into the instruction */
10833 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
10834 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
10835 sregs [0] = var->inst_basereg;
10836 //mono_inst_set_src_registers (ins, sregs);
10837 ins->inst_offset = var->inst_offset;
10838 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
10839 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
10840 sregs [1] = var->inst_basereg;
10841 //mono_inst_set_src_registers (ins, sregs);
10842 ins->inst_offset = var->inst_offset;
10844 if (MONO_IS_REAL_MOVE (ins)) {
10845 ins->opcode = OP_NOP;
10848 //printf ("%d ", srcindex); mono_print_ins (ins);
10850 sreg = alloc_dreg (cfg, stacktypes [regtype]);
10852 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
10853 if (var->dreg == prev_dreg) {
10855 * sreg refers to the value loaded by the load
10856 * emitted below, but we need to use ins->dreg
10857 * since it refers to the store emitted earlier.
10861 g_assert (sreg != -1);
10862 vreg_to_lvreg [var->dreg] = sreg;
10863 g_assert (lvregs_len < 1024);
10864 lvregs [lvregs_len ++] = var->dreg;
10868 sregs [srcindex] = sreg;
10869 //mono_inst_set_src_registers (ins, sregs);
10871 if (regtype == 'l') {
10872 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
10873 mono_bblock_insert_before_ins (bb, ins, load_ins);
10874 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
10875 mono_bblock_insert_before_ins (bb, ins, load_ins);
10876 use_ins = load_ins;
10879 #if SIZEOF_REGISTER == 4
10880 g_assert (load_opcode != OP_LOADI8_MEMBASE);
10882 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
10883 mono_bblock_insert_before_ins (bb, ins, load_ins);
10884 use_ins = load_ins;
10888 if (var->dreg < orig_next_vreg) {
10889 live_range_end [var->dreg] = use_ins;
10890 live_range_end_bb [var->dreg] = bb;
10894 mono_inst_set_src_registers (ins, sregs);
10896 if (dest_has_lvreg) {
10897 g_assert (ins->dreg != -1);
10898 vreg_to_lvreg [prev_dreg] = ins->dreg;
10899 g_assert (lvregs_len < 1024);
10900 lvregs [lvregs_len ++] = prev_dreg;
10901 dest_has_lvreg = FALSE;
10905 tmp_reg = ins->dreg;
10906 ins->dreg = ins->sreg2;
10907 ins->sreg2 = tmp_reg;
10910 if (MONO_IS_CALL (ins)) {
10911 /* Clear vreg_to_lvreg array */
10912 for (i = 0; i < lvregs_len; i++)
10913 vreg_to_lvreg [lvregs [i]] = 0;
10915 } else if (ins->opcode == OP_NOP) {
10917 MONO_INST_NULLIFY_SREGS (ins);
10920 if (cfg->verbose_level > 2)
10921 mono_print_ins_index (1, ins);
10925 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
10927 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
10928 * by storing the current native offset into MonoMethodVar->live_range_start/end.
10930 for (i = 0; i < cfg->num_varinfo; ++i) {
10931 int vreg = MONO_VARINFO (cfg, i)->vreg;
10934 if (live_range_start [vreg]) {
10935 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
10937 ins->inst_c1 = vreg;
10938 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
10940 if (live_range_end [vreg]) {
10941 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
10943 ins->inst_c1 = vreg;
10944 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
10949 g_free (live_range_start);
10950 g_free (live_range_end);
10951 g_free (live_range_start_bb);
10952 g_free (live_range_end_bb);
10957 * - use 'iadd' instead of 'int_add'
10958 * - handling ovf opcodes: decompose in method_to_ir.
10959 * - unify iregs/fregs
10960 * -> partly done, the missing parts are:
10961 * - a more complete unification would involve unifying the hregs as well, so
10962 * code wouldn't need if (fp) all over the place. but that would mean the hregs
10963 * would no longer map to the machine hregs, so the code generators would need to
10964 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
10965 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
10966 * fp/non-fp branches speeds it up by about 15%.
10967 * - use sext/zext opcodes instead of shifts
10969 * - get rid of TEMPLOADs if possible and use vregs instead
10970 * - clean up usage of OP_P/OP_ opcodes
10971 * - cleanup usage of DUMMY_USE
10972 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
10974 * - set the stack type and allocate a dreg in the EMIT_NEW macros
10975 * - get rid of all the <foo>2 stuff when the new JIT is ready.
10976 * - make sure handle_stack_args () is called before the branch is emitted
10977 * - when the new IR is done, get rid of all unused stuff
10978 * - COMPARE/BEQ as separate instructions or unify them ?
10979 * - keeping them separate allows specialized compare instructions like
10980 * compare_imm, compare_membase
10981 * - most back ends unify fp compare+branch, fp compare+ceq
10982 * - integrate mono_save_args into inline_method
10983 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
10984 * - handle long shift opts on 32 bit platforms somehow: they require
10985 * 3 sregs (2 for arg1 and 1 for arg2)
10986 * - make byref a 'normal' type.
10987 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
10988 * variable if needed.
10989 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
10990 * like inline_method.
10991 * - remove inlining restrictions
10992 * - fix LNEG and enable cfold of INEG
10993 * - generalize x86 optimizations like ldelema as a peephole optimization
10994 * - add store_mem_imm for amd64
10995 * - optimize the loading of the interruption flag in the managed->native wrappers
10996 * - avoid special handling of OP_NOP in passes
10997 * - move code inserting instructions into one function/macro.
10998 * - try a coalescing phase after liveness analysis
10999 * - add float -> vreg conversion + local optimizations on !x86
11000 * - figure out how to handle decomposed branches during optimizations, ie.
11001 * compare+branch, op_jump_table+op_br etc.
11002 * - promote RuntimeXHandles to vregs
11003 * - vtype cleanups:
11004 * - add a NEW_VARLOADA_VREG macro
11005 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11006 * accessing vtype fields.
11007 * - get rid of I8CONST on 64 bit platforms
11008 * - dealing with the increase in code size due to branches created during opcode
11010 * - use extended basic blocks
11011 * - all parts of the JIT
11012 * - handle_global_vregs () && local regalloc
11013 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
11014 * - sources of increase in code size:
11017 * - isinst and castclass
11018 * - lvregs not allocated to global registers even if used multiple times
11019 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
11021 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
11022 * - add all micro optimizations from the old JIT
11023 * - put tree optimizations into the deadce pass
11024 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
11025 * specific function.
11026 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
11027 * fcompare + branchCC.
11028 * - create a helper function for allocating a stack slot, taking into account
11029 * MONO_CFG_HAS_SPILLUP.
11031 * - merge the ia64 switch changes.
11032 * - optimize mono_regstate2_alloc_int/float.
11033 * - fix the pessimistic handling of variables accessed in exception handler blocks.
11034 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
11035 * parts of the tree could be separated by other instructions, killing the tree
11036 * arguments, or stores killing loads etc. Also, should we fold loads into other
11037 * instructions if the result of the load is used multiple times ?
11038 * - make the REM_IMM optimization in mini-x86.c arch-independent.
11039 * - LAST MERGE: 108395.
11040 * - when returning vtypes in registers, generate IR and append it to the end of the
11041 * last bb instead of doing it in the epilog.
11042 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
11050 - When to decompose opcodes:
11051 - earlier: this makes some optimizations hard to implement, since the low level IR
11052 no longer contains the neccessary information. But it is easier to do.
11053 - later: harder to implement, enables more optimizations.
11054 - Branches inside bblocks:
11055 - created when decomposing complex opcodes.
11056 - branches to another bblock: harmless, but not tracked by the branch
11057 optimizations, so need to branch to a label at the start of the bblock.
11058 - branches to inside the same bblock: very problematic, trips up the local
11059 reg allocator. Can be fixed by spitting the current bblock, but that is a
11060 complex operation, since some local vregs can become global vregs etc.
11061 - Local/global vregs:
11062 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11063 local register allocator.
11064 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11065 structure, created by mono_create_var (). Assigned to hregs or the stack by
11066 the global register allocator.
11067 - When to do optimizations like alu->alu_imm:
11068 - earlier -> saves work later on since the IR will be smaller/simpler
11069 - later -> can work on more instructions
11070 - Handling of valuetypes:
11071 - When a vtype is pushed on the stack, a new temporary is created, an
11072 instruction computing its address (LDADDR) is emitted and pushed on
11073 the stack. Need to optimize cases when the vtype is used immediately as in
11074 argument passing, stloc etc.
11075 - Instead of the to_end stuff in the old JIT, simply call the function handling
11076 the values on the stack before emitting the last instruction of the bb.
11079 #endif /* DISABLE_JIT */