2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
30 #ifdef HAVE_VALGRIND_MEMCHECK_H
31 #include <valgrind/memcheck.h>
34 #include <mono/metadata/assembly.h>
35 #include <mono/metadata/loader.h>
36 #include <mono/metadata/tabledefs.h>
37 #include <mono/metadata/class.h>
38 #include <mono/metadata/object.h>
39 #include <mono/metadata/exception.h>
40 #include <mono/metadata/opcodes.h>
41 #include <mono/metadata/mono-endian.h>
42 #include <mono/metadata/tokentype.h>
43 #include <mono/metadata/tabledefs.h>
44 #include <mono/metadata/marshal.h>
45 #include <mono/metadata/debug-helpers.h>
46 #include <mono/metadata/mono-debug.h>
47 #include <mono/metadata/gc-internal.h>
48 #include <mono/metadata/security-manager.h>
49 #include <mono/metadata/threads-types.h>
50 #include <mono/metadata/security-core-clr.h>
51 #include <mono/metadata/monitor.h>
52 #include <mono/utils/mono-compiler.h>
59 #include "jit-icalls.h"
61 #define BRANCH_COST 100
62 #define INLINE_LENGTH_LIMIT 20
63 #define INLINE_FAILURE do {\
64 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
67 #define CHECK_CFG_EXCEPTION do {\
68 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
71 #define METHOD_ACCESS_FAILURE do { \
72 char *method_fname = mono_method_full_name (method, TRUE); \
73 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
74 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
75 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
76 g_free (method_fname); \
77 g_free (cil_method_fname); \
78 goto exception_exit; \
80 #define FIELD_ACCESS_FAILURE do { \
81 char *method_fname = mono_method_full_name (method, TRUE); \
82 char *field_fname = mono_field_full_name (field); \
83 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
84 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
85 g_free (method_fname); \
86 g_free (field_fname); \
87 goto exception_exit; \
89 #define GENERIC_SHARING_FAILURE(opcode) do { \
90 if (cfg->generic_sharing_context) { \
91 if (cfg->verbose_level > 2) \
92 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
93 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
94 goto exception_exit; \
98 /* Determine whenever 'ins' represents a load of the 'this' argument */
99 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
101 static int ldind_to_load_membase (int opcode);
102 static int stind_to_store_membase (int opcode);
104 int mono_op_to_op_imm (int opcode);
105 int mono_op_to_op_imm_noemul (int opcode);
107 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
108 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
109 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
111 /* helper methods signature */
112 extern MonoMethodSignature *helper_sig_class_init_trampoline;
113 extern MonoMethodSignature *helper_sig_domain_get;
114 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
115 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
116 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
119 * Instruction metadata
127 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
128 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
134 #if SIZEOF_REGISTER == 8
139 /* keep in sync with the enum in mini.h */
142 #include "mini-ops.h"
147 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
148 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
150 * This should contain the index of the last sreg + 1. This is not the same
151 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
153 const gint8 ins_sreg_counts[] = {
154 #include "mini-ops.h"
159 extern GHashTable *jit_icall_name_hash;
161 #define MONO_INIT_VARINFO(vi,id) do { \
162 (vi)->range.first_use.pos.bid = 0xffff; \
168 mono_inst_set_src_registers (MonoInst *ins, int *regs)
170 ins->sreg1 = regs [0];
171 ins->sreg2 = regs [1];
172 ins->sreg3 = regs [2];
176 mono_alloc_ireg (MonoCompile *cfg)
178 return alloc_ireg (cfg);
182 mono_alloc_freg (MonoCompile *cfg)
184 return alloc_freg (cfg);
188 mono_alloc_preg (MonoCompile *cfg)
190 return alloc_preg (cfg);
194 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
196 return alloc_dreg (cfg, stack_type);
200 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
206 switch (type->type) {
209 case MONO_TYPE_BOOLEAN:
221 case MONO_TYPE_FNPTR:
223 case MONO_TYPE_CLASS:
224 case MONO_TYPE_STRING:
225 case MONO_TYPE_OBJECT:
226 case MONO_TYPE_SZARRAY:
227 case MONO_TYPE_ARRAY:
231 #if SIZEOF_REGISTER == 8
240 case MONO_TYPE_VALUETYPE:
241 if (type->data.klass->enumtype) {
242 type = mono_class_enum_basetype (type->data.klass);
245 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
248 case MONO_TYPE_TYPEDBYREF:
250 case MONO_TYPE_GENERICINST:
251 type = &type->data.generic_class->container_class->byval_arg;
255 g_assert (cfg->generic_sharing_context);
258 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
264 mono_print_bb (MonoBasicBlock *bb, const char *msg)
269 printf ("\n%s %d: [IN: ", msg, bb->block_num);
270 for (i = 0; i < bb->in_count; ++i)
271 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
273 for (i = 0; i < bb->out_count; ++i)
274 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
276 for (tree = bb->code; tree; tree = tree->next)
277 mono_print_ins_index (-1, tree);
281 * Can't put this at the beginning, since other files reference stuff from this
286 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
288 #define GET_BBLOCK(cfg,tblock,ip) do { \
289 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
291 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
292 NEW_BBLOCK (cfg, (tblock)); \
293 (tblock)->cil_code = (ip); \
294 ADD_BBLOCK (cfg, (tblock)); \
298 #if defined(TARGET_X86) || defined(TARGET_AMD64)
299 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
300 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
301 (dest)->dreg = alloc_preg ((cfg)); \
302 (dest)->sreg1 = (sr1); \
303 (dest)->sreg2 = (sr2); \
304 (dest)->inst_imm = (imm); \
305 (dest)->backend.shift_amount = (shift); \
306 MONO_ADD_INS ((cfg)->cbb, (dest)); \
310 #if SIZEOF_REGISTER == 8
311 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
312 /* FIXME: Need to add many more cases */ \
313 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
315 int dr = alloc_preg (cfg); \
316 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
317 (ins)->sreg2 = widen->dreg; \
321 #define ADD_WIDEN_OP(ins, arg1, arg2)
324 #define ADD_BINOP(op) do { \
325 MONO_INST_NEW (cfg, ins, (op)); \
327 ins->sreg1 = sp [0]->dreg; \
328 ins->sreg2 = sp [1]->dreg; \
329 type_from_op (ins, sp [0], sp [1]); \
331 /* Have to insert a widening op */ \
332 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
333 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
334 MONO_ADD_INS ((cfg)->cbb, (ins)); \
335 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
338 #define ADD_UNOP(op) do { \
339 MONO_INST_NEW (cfg, ins, (op)); \
341 ins->sreg1 = sp [0]->dreg; \
342 type_from_op (ins, sp [0], NULL); \
344 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
345 MONO_ADD_INS ((cfg)->cbb, (ins)); \
346 *sp++ = mono_decompose_opcode (cfg, ins); \
349 #define ADD_BINCOND(next_block) do { \
352 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
353 cmp->sreg1 = sp [0]->dreg; \
354 cmp->sreg2 = sp [1]->dreg; \
355 type_from_op (cmp, sp [0], sp [1]); \
357 type_from_op (ins, sp [0], sp [1]); \
358 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
359 GET_BBLOCK (cfg, tblock, target); \
360 link_bblock (cfg, bblock, tblock); \
361 ins->inst_true_bb = tblock; \
362 if ((next_block)) { \
363 link_bblock (cfg, bblock, (next_block)); \
364 ins->inst_false_bb = (next_block); \
365 start_new_bblock = 1; \
367 GET_BBLOCK (cfg, tblock, ip); \
368 link_bblock (cfg, bblock, tblock); \
369 ins->inst_false_bb = tblock; \
370 start_new_bblock = 2; \
372 if (sp != stack_start) { \
373 handle_stack_args (cfg, stack_start, sp - stack_start); \
374 CHECK_UNVERIFIABLE (cfg); \
376 MONO_ADD_INS (bblock, cmp); \
377 MONO_ADD_INS (bblock, ins); \
381 * link_bblock: Links two basic blocks
383 * links two basic blocks in the control flow graph, the 'from'
384 * argument is the starting block and the 'to' argument is the block
385 * the control flow ends to after 'from'.
388 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
390 MonoBasicBlock **newa;
394 if (from->cil_code) {
396 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
398 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
401 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
403 printf ("edge from entry to exit\n");
408 for (i = 0; i < from->out_count; ++i) {
409 if (to == from->out_bb [i]) {
415 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
416 for (i = 0; i < from->out_count; ++i) {
417 newa [i] = from->out_bb [i];
425 for (i = 0; i < to->in_count; ++i) {
426 if (from == to->in_bb [i]) {
432 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
433 for (i = 0; i < to->in_count; ++i) {
434 newa [i] = to->in_bb [i];
443 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
445 link_bblock (cfg, from, to);
449 * mono_find_block_region:
451 * We mark each basic block with a region ID. We use that to avoid BB
452 * optimizations when blocks are in different regions.
455 * A region token that encodes where this region is, and information
456 * about the clause owner for this block.
458 * The region encodes the try/catch/filter clause that owns this block
459 * as well as the type. -1 is a special value that represents a block
460 * that is in none of try/catch/filter.
463 mono_find_block_region (MonoCompile *cfg, int offset)
465 MonoMethod *method = cfg->method;
466 MonoMethodHeader *header = mono_method_get_header (method);
467 MonoExceptionClause *clause;
470 for (i = 0; i < header->num_clauses; ++i) {
471 clause = &header->clauses [i];
472 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
473 (offset < (clause->handler_offset)))
474 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
476 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
477 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
478 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
479 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
480 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
482 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
485 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
486 return ((i + 1) << 8) | clause->flags;
493 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
495 MonoMethod *method = cfg->method;
496 MonoMethodHeader *header = mono_method_get_header (method);
497 MonoExceptionClause *clause;
498 MonoBasicBlock *handler;
502 for (i = 0; i < header->num_clauses; ++i) {
503 clause = &header->clauses [i];
504 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
505 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
506 if (clause->flags == type) {
507 handler = cfg->cil_offset_to_bb [clause->handler_offset];
509 res = g_list_append (res, handler);
517 mono_create_spvar_for_region (MonoCompile *cfg, int region)
521 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
525 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
526 /* prevent it from being register allocated */
527 var->flags |= MONO_INST_INDIRECT;
529 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
533 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
535 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
539 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
543 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
547 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
548 /* prevent it from being register allocated */
549 var->flags |= MONO_INST_INDIRECT;
551 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
557 * Returns the type used in the eval stack when @type is loaded.
558 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
561 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
565 inst->klass = klass = mono_class_from_mono_type (type);
567 inst->type = STACK_MP;
572 switch (type->type) {
574 inst->type = STACK_INV;
578 case MONO_TYPE_BOOLEAN:
584 inst->type = STACK_I4;
589 case MONO_TYPE_FNPTR:
590 inst->type = STACK_PTR;
592 case MONO_TYPE_CLASS:
593 case MONO_TYPE_STRING:
594 case MONO_TYPE_OBJECT:
595 case MONO_TYPE_SZARRAY:
596 case MONO_TYPE_ARRAY:
597 inst->type = STACK_OBJ;
601 inst->type = STACK_I8;
605 inst->type = STACK_R8;
607 case MONO_TYPE_VALUETYPE:
608 if (type->data.klass->enumtype) {
609 type = mono_class_enum_basetype (type->data.klass);
613 inst->type = STACK_VTYPE;
616 case MONO_TYPE_TYPEDBYREF:
617 inst->klass = mono_defaults.typed_reference_class;
618 inst->type = STACK_VTYPE;
620 case MONO_TYPE_GENERICINST:
621 type = &type->data.generic_class->container_class->byval_arg;
624 case MONO_TYPE_MVAR :
625 /* FIXME: all the arguments must be references for now,
626 * later look inside cfg and see if the arg num is
629 g_assert (cfg->generic_sharing_context);
630 inst->type = STACK_OBJ;
633 g_error ("unknown type 0x%02x in eval stack type", type->type);
638 * The following tables are used to quickly validate the IL code in type_from_op ().
641 bin_num_table [STACK_MAX] [STACK_MAX] = {
642 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
643 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
644 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
645 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
646 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
647 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
648 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
649 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
654 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
657 /* reduce the size of this table */
659 bin_int_table [STACK_MAX] [STACK_MAX] = {
660 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
661 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
662 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
663 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
664 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
665 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
666 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
667 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
671 bin_comp_table [STACK_MAX] [STACK_MAX] = {
672 /* Inv i L p F & O vt */
674 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
675 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
676 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
677 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
678 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
679 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
680 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
683 /* reduce the size of this table */
685 shift_table [STACK_MAX] [STACK_MAX] = {
686 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
687 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
688 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
689 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
690 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
691 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
692 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
693 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
697 * Tables to map from the non-specific opcode to the matching
698 * type-specific opcode.
700 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
702 binops_op_map [STACK_MAX] = {
703 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
706 /* handles from CEE_NEG to CEE_CONV_U8 */
708 unops_op_map [STACK_MAX] = {
709 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
712 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
714 ovfops_op_map [STACK_MAX] = {
715 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
718 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
720 ovf2ops_op_map [STACK_MAX] = {
721 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
724 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
726 ovf3ops_op_map [STACK_MAX] = {
727 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
730 /* handles from CEE_BEQ to CEE_BLT_UN */
732 beqops_op_map [STACK_MAX] = {
733 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
736 /* handles from CEE_CEQ to CEE_CLT_UN */
738 ceqops_op_map [STACK_MAX] = {
739 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
743 * Sets ins->type (the type on the eval stack) according to the
744 * type of the opcode and the arguments to it.
745 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
747 * FIXME: this function sets ins->type unconditionally in some cases, but
748 * it should set it to invalid for some types (a conv.x on an object)
751 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
753 switch (ins->opcode) {
760 /* FIXME: check unverifiable args for STACK_MP */
761 ins->type = bin_num_table [src1->type] [src2->type];
762 ins->opcode += binops_op_map [ins->type];
769 ins->type = bin_int_table [src1->type] [src2->type];
770 ins->opcode += binops_op_map [ins->type];
775 ins->type = shift_table [src1->type] [src2->type];
776 ins->opcode += binops_op_map [ins->type];
781 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
782 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
783 ins->opcode = OP_LCOMPARE;
784 else if (src1->type == STACK_R8)
785 ins->opcode = OP_FCOMPARE;
787 ins->opcode = OP_ICOMPARE;
789 case OP_ICOMPARE_IMM:
790 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
791 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
792 ins->opcode = OP_LCOMPARE_IMM;
804 ins->opcode += beqops_op_map [src1->type];
807 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
808 ins->opcode += ceqops_op_map [src1->type];
814 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
815 ins->opcode += ceqops_op_map [src1->type];
819 ins->type = neg_table [src1->type];
820 ins->opcode += unops_op_map [ins->type];
823 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
824 ins->type = src1->type;
826 ins->type = STACK_INV;
827 ins->opcode += unops_op_map [ins->type];
833 ins->type = STACK_I4;
834 ins->opcode += unops_op_map [src1->type];
837 ins->type = STACK_R8;
838 switch (src1->type) {
841 ins->opcode = OP_ICONV_TO_R_UN;
844 ins->opcode = OP_LCONV_TO_R_UN;
848 case CEE_CONV_OVF_I1:
849 case CEE_CONV_OVF_U1:
850 case CEE_CONV_OVF_I2:
851 case CEE_CONV_OVF_U2:
852 case CEE_CONV_OVF_I4:
853 case CEE_CONV_OVF_U4:
854 ins->type = STACK_I4;
855 ins->opcode += ovf3ops_op_map [src1->type];
857 case CEE_CONV_OVF_I_UN:
858 case CEE_CONV_OVF_U_UN:
859 ins->type = STACK_PTR;
860 ins->opcode += ovf2ops_op_map [src1->type];
862 case CEE_CONV_OVF_I1_UN:
863 case CEE_CONV_OVF_I2_UN:
864 case CEE_CONV_OVF_I4_UN:
865 case CEE_CONV_OVF_U1_UN:
866 case CEE_CONV_OVF_U2_UN:
867 case CEE_CONV_OVF_U4_UN:
868 ins->type = STACK_I4;
869 ins->opcode += ovf2ops_op_map [src1->type];
872 ins->type = STACK_PTR;
873 switch (src1->type) {
875 ins->opcode = OP_ICONV_TO_U;
879 #if SIZEOF_REGISTER == 8
880 ins->opcode = OP_LCONV_TO_U;
882 ins->opcode = OP_MOVE;
886 ins->opcode = OP_LCONV_TO_U;
889 ins->opcode = OP_FCONV_TO_U;
895 ins->type = STACK_I8;
896 ins->opcode += unops_op_map [src1->type];
898 case CEE_CONV_OVF_I8:
899 case CEE_CONV_OVF_U8:
900 ins->type = STACK_I8;
901 ins->opcode += ovf3ops_op_map [src1->type];
903 case CEE_CONV_OVF_U8_UN:
904 case CEE_CONV_OVF_I8_UN:
905 ins->type = STACK_I8;
906 ins->opcode += ovf2ops_op_map [src1->type];
910 ins->type = STACK_R8;
911 ins->opcode += unops_op_map [src1->type];
914 ins->type = STACK_R8;
918 ins->type = STACK_I4;
919 ins->opcode += ovfops_op_map [src1->type];
924 ins->type = STACK_PTR;
925 ins->opcode += ovfops_op_map [src1->type];
933 ins->type = bin_num_table [src1->type] [src2->type];
934 ins->opcode += ovfops_op_map [src1->type];
935 if (ins->type == STACK_R8)
936 ins->type = STACK_INV;
938 case OP_LOAD_MEMBASE:
939 ins->type = STACK_PTR;
941 case OP_LOADI1_MEMBASE:
942 case OP_LOADU1_MEMBASE:
943 case OP_LOADI2_MEMBASE:
944 case OP_LOADU2_MEMBASE:
945 case OP_LOADI4_MEMBASE:
946 case OP_LOADU4_MEMBASE:
947 ins->type = STACK_PTR;
949 case OP_LOADI8_MEMBASE:
950 ins->type = STACK_I8;
952 case OP_LOADR4_MEMBASE:
953 case OP_LOADR8_MEMBASE:
954 ins->type = STACK_R8;
957 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
961 if (ins->type == STACK_MP)
962 ins->klass = mono_defaults.object_class;
967 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
973 param_table [STACK_MAX] [STACK_MAX] = {
978 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
982 switch (args->type) {
992 for (i = 0; i < sig->param_count; ++i) {
993 switch (args [i].type) {
997 if (!sig->params [i]->byref)
1001 if (sig->params [i]->byref)
1003 switch (sig->params [i]->type) {
1004 case MONO_TYPE_CLASS:
1005 case MONO_TYPE_STRING:
1006 case MONO_TYPE_OBJECT:
1007 case MONO_TYPE_SZARRAY:
1008 case MONO_TYPE_ARRAY:
1015 if (sig->params [i]->byref)
1017 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1026 /*if (!param_table [args [i].type] [sig->params [i]->type])
1034 * When we need a pointer to the current domain many times in a method, we
1035 * call mono_domain_get() once and we store the result in a local variable.
1036 * This function returns the variable that represents the MonoDomain*.
1038 inline static MonoInst *
1039 mono_get_domainvar (MonoCompile *cfg)
1041 if (!cfg->domainvar)
1042 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1043 return cfg->domainvar;
1047 * The got_var contains the address of the Global Offset Table when AOT
1051 mono_get_got_var (MonoCompile *cfg)
1053 #ifdef MONO_ARCH_NEED_GOT_VAR
1054 if (!cfg->compile_aot)
1056 if (!cfg->got_var) {
1057 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1059 return cfg->got_var;
1066 mono_get_vtable_var (MonoCompile *cfg)
1068 g_assert (cfg->generic_sharing_context);
1070 if (!cfg->rgctx_var) {
1071 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1072 /* force the var to be stack allocated */
1073 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1076 return cfg->rgctx_var;
1080 type_from_stack_type (MonoInst *ins) {
1081 switch (ins->type) {
1082 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1083 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1084 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1085 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1087 return &ins->klass->this_arg;
1088 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1089 case STACK_VTYPE: return &ins->klass->byval_arg;
1091 g_error ("stack type %d to monotype not handled\n", ins->type);
1096 static G_GNUC_UNUSED int
1097 type_to_stack_type (MonoType *t)
1099 switch (mono_type_get_underlying_type (t)->type) {
1102 case MONO_TYPE_BOOLEAN:
1105 case MONO_TYPE_CHAR:
1112 case MONO_TYPE_FNPTR:
1114 case MONO_TYPE_CLASS:
1115 case MONO_TYPE_STRING:
1116 case MONO_TYPE_OBJECT:
1117 case MONO_TYPE_SZARRAY:
1118 case MONO_TYPE_ARRAY:
1126 case MONO_TYPE_VALUETYPE:
1127 case MONO_TYPE_TYPEDBYREF:
1129 case MONO_TYPE_GENERICINST:
1130 if (mono_type_generic_inst_is_valuetype (t))
1136 g_assert_not_reached ();
1143 array_access_to_klass (int opcode)
1147 return mono_defaults.byte_class;
1149 return mono_defaults.uint16_class;
1152 return mono_defaults.int_class;
1155 return mono_defaults.sbyte_class;
1158 return mono_defaults.int16_class;
1161 return mono_defaults.int32_class;
1163 return mono_defaults.uint32_class;
1166 return mono_defaults.int64_class;
1169 return mono_defaults.single_class;
1172 return mono_defaults.double_class;
1173 case CEE_LDELEM_REF:
1174 case CEE_STELEM_REF:
1175 return mono_defaults.object_class;
1177 g_assert_not_reached ();
1183 * We try to share variables when possible
1186 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1191 /* inlining can result in deeper stacks */
1192 if (slot >= mono_method_get_header (cfg->method)->max_stack)
1193 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1195 pos = ins->type - 1 + slot * STACK_MAX;
1197 switch (ins->type) {
1204 if ((vnum = cfg->intvars [pos]))
1205 return cfg->varinfo [vnum];
1206 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1207 cfg->intvars [pos] = res->inst_c0;
1210 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1216 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1219 * Don't use this if a generic_context is set, since that means AOT can't
1220 * look up the method using just the image+token.
1221 * table == 0 means this is a reference made from a wrapper.
1223 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1224 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1225 jump_info_token->image = image;
1226 jump_info_token->token = token;
1227 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1232 * This function is called to handle items that are left on the evaluation stack
1233 * at basic block boundaries. What happens is that we save the values to local variables
1234 * and we reload them later when first entering the target basic block (with the
1235 * handle_loaded_temps () function).
1236 * A single joint point will use the same variables (stored in the array bb->out_stack or
1237 * bb->in_stack, if the basic block is before or after the joint point).
1239 * This function needs to be called _before_ emitting the last instruction of
1240 * the bb (i.e. before emitting a branch).
1241 * If the stack merge fails at a join point, cfg->unverifiable is set.
1244 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1247 MonoBasicBlock *bb = cfg->cbb;
1248 MonoBasicBlock *outb;
1249 MonoInst *inst, **locals;
1254 if (cfg->verbose_level > 3)
1255 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1256 if (!bb->out_scount) {
1257 bb->out_scount = count;
1258 //printf ("bblock %d has out:", bb->block_num);
1260 for (i = 0; i < bb->out_count; ++i) {
1261 outb = bb->out_bb [i];
1262 /* exception handlers are linked, but they should not be considered for stack args */
1263 if (outb->flags & BB_EXCEPTION_HANDLER)
1265 //printf (" %d", outb->block_num);
1266 if (outb->in_stack) {
1268 bb->out_stack = outb->in_stack;
1274 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1275 for (i = 0; i < count; ++i) {
1277 * try to reuse temps already allocated for this purpouse, if they occupy the same
1278 * stack slot and if they are of the same type.
1279 * This won't cause conflicts since if 'local' is used to
1280 * store one of the values in the in_stack of a bblock, then
1281 * the same variable will be used for the same outgoing stack
1283 * This doesn't work when inlining methods, since the bblocks
1284 * in the inlined methods do not inherit their in_stack from
1285 * the bblock they are inlined to. See bug #58863 for an
1288 if (cfg->inlined_method)
1289 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1291 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1296 for (i = 0; i < bb->out_count; ++i) {
1297 outb = bb->out_bb [i];
1298 /* exception handlers are linked, but they should not be considered for stack args */
1299 if (outb->flags & BB_EXCEPTION_HANDLER)
1301 if (outb->in_scount) {
1302 if (outb->in_scount != bb->out_scount) {
1303 cfg->unverifiable = TRUE;
1306 continue; /* check they are the same locals */
1308 outb->in_scount = count;
1309 outb->in_stack = bb->out_stack;
1312 locals = bb->out_stack;
1314 for (i = 0; i < count; ++i) {
1315 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1316 inst->cil_code = sp [i]->cil_code;
1317 sp [i] = locals [i];
1318 if (cfg->verbose_level > 3)
1319 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1323 * It is possible that the out bblocks already have in_stack assigned, and
1324 * the in_stacks differ. In this case, we will store to all the different
1331 /* Find a bblock which has a different in_stack */
1333 while (bindex < bb->out_count) {
1334 outb = bb->out_bb [bindex];
1335 /* exception handlers are linked, but they should not be considered for stack args */
1336 if (outb->flags & BB_EXCEPTION_HANDLER) {
1340 if (outb->in_stack != locals) {
1341 for (i = 0; i < count; ++i) {
1342 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1343 inst->cil_code = sp [i]->cil_code;
1344 sp [i] = locals [i];
1345 if (cfg->verbose_level > 3)
1346 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1348 locals = outb->in_stack;
1357 /* Emit code which loads interface_offsets [klass->interface_id]
1358 * The array is stored in memory before vtable.
1361 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1363 if (cfg->compile_aot) {
1364 int ioffset_reg = alloc_preg (cfg);
1365 int iid_reg = alloc_preg (cfg);
1367 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1368 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1369 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1372 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1377 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1378 * stored in "klass_reg" implements the interface "klass".
1381 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1383 int ibitmap_reg = alloc_preg (cfg);
1384 int ibitmap_byte_reg = alloc_preg (cfg);
1386 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap));
1388 if (cfg->compile_aot) {
1389 int iid_reg = alloc_preg (cfg);
1390 int shifted_iid_reg = alloc_preg (cfg);
1391 int ibitmap_byte_address_reg = alloc_preg (cfg);
1392 int masked_iid_reg = alloc_preg (cfg);
1393 int iid_one_bit_reg = alloc_preg (cfg);
1394 int iid_bit_reg = alloc_preg (cfg);
1395 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1396 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1397 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1398 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1399 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1400 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1401 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1402 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1404 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1405 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1410 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1411 * stored in "vtable_reg" implements the interface "klass".
1414 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1416 int ibitmap_reg = alloc_preg (cfg);
1417 int ibitmap_byte_reg = alloc_preg (cfg);
1419 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap));
1421 if (cfg->compile_aot) {
1422 int iid_reg = alloc_preg (cfg);
1423 int shifted_iid_reg = alloc_preg (cfg);
1424 int ibitmap_byte_address_reg = alloc_preg (cfg);
1425 int masked_iid_reg = alloc_preg (cfg);
1426 int iid_one_bit_reg = alloc_preg (cfg);
1427 int iid_bit_reg = alloc_preg (cfg);
1428 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1429 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, shifted_iid_reg, iid_reg, 3);
1430 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1431 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1432 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, masked_iid_reg, iid_reg, 7);
1433 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1434 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1435 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1437 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1438 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1443 * Emit code which checks whenever the interface id of @klass is smaller than
1444 * than the value given by max_iid_reg.
1447 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1448 MonoBasicBlock *false_target)
1450 if (cfg->compile_aot) {
1451 int iid_reg = alloc_preg (cfg);
1452 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1453 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1456 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1458 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1460 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1463 /* Same as above, but obtains max_iid from a vtable */
1465 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1466 MonoBasicBlock *false_target)
1468 int max_iid_reg = alloc_preg (cfg);
1470 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1471 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1474 /* Same as above, but obtains max_iid from a klass */
1476 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1477 MonoBasicBlock *false_target)
1479 int max_iid_reg = alloc_preg (cfg);
1481 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1482 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1486 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1488 int idepth_reg = alloc_preg (cfg);
1489 int stypes_reg = alloc_preg (cfg);
1490 int stype = alloc_preg (cfg);
1492 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1493 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1494 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1495 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1497 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1498 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1499 if (cfg->compile_aot) {
1500 int const_reg = alloc_preg (cfg);
1501 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1502 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1504 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1506 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1510 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1512 int intf_reg = alloc_preg (cfg);
1514 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1515 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1516 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1518 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1520 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1524 * Variant of the above that takes a register to the class, not the vtable.
1527 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1529 int intf_bit_reg = alloc_preg (cfg);
1531 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1532 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1533 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1535 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1537 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1541 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1543 if (cfg->compile_aot) {
1544 int const_reg = alloc_preg (cfg);
1545 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1546 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1548 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1550 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1554 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1556 if (cfg->compile_aot) {
1557 int const_reg = alloc_preg (cfg);
1558 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1559 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1561 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1563 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1567 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1570 int rank_reg = alloc_preg (cfg);
1571 int eclass_reg = alloc_preg (cfg);
1573 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1574 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1575 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1576 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1577 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1578 if (klass->cast_class == mono_defaults.object_class) {
1579 int parent_reg = alloc_preg (cfg);
1580 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1581 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1582 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1583 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1584 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1585 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1586 } else if (klass->cast_class == mono_defaults.enum_class) {
1587 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1588 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1589 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1591 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1592 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1595 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1596 /* Check that the object is a vector too */
1597 int bounds_reg = alloc_preg (cfg);
1598 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1599 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1600 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1603 int idepth_reg = alloc_preg (cfg);
1604 int stypes_reg = alloc_preg (cfg);
1605 int stype = alloc_preg (cfg);
1607 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1608 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1609 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1610 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1612 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1613 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1614 mini_emit_class_check (cfg, stype, klass);
1619 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1623 g_assert (val == 0);
1628 if ((size <= 4) && (size <= align)) {
1631 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1634 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1637 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1639 #if SIZEOF_REGISTER == 8
1641 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1647 val_reg = alloc_preg (cfg);
1649 if (SIZEOF_REGISTER == 8)
1650 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1652 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1655 /* This could be optimized further if neccesary */
1657 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1664 #if !NO_UNALIGNED_ACCESS
1665 if (SIZEOF_REGISTER == 8) {
1667 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1672 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1680 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1685 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1690 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1696 #endif /* DISABLE_JIT */
1699 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1707 /* This could be optimized further if neccesary */
1709 cur_reg = alloc_preg (cfg);
1710 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1711 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1718 #if !NO_UNALIGNED_ACCESS
1719 if (SIZEOF_REGISTER == 8) {
1721 cur_reg = alloc_preg (cfg);
1722 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1723 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1732 cur_reg = alloc_preg (cfg);
1733 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1734 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1740 cur_reg = alloc_preg (cfg);
1741 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1742 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1748 cur_reg = alloc_preg (cfg);
1749 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1750 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1760 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1763 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1766 type = mini_get_basic_type_from_generic (gsctx, type);
1767 switch (type->type) {
1768 case MONO_TYPE_VOID:
1769 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1772 case MONO_TYPE_BOOLEAN:
1775 case MONO_TYPE_CHAR:
1778 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1782 case MONO_TYPE_FNPTR:
1783 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1784 case MONO_TYPE_CLASS:
1785 case MONO_TYPE_STRING:
1786 case MONO_TYPE_OBJECT:
1787 case MONO_TYPE_SZARRAY:
1788 case MONO_TYPE_ARRAY:
1789 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1792 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1795 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1796 case MONO_TYPE_VALUETYPE:
1797 if (type->data.klass->enumtype) {
1798 type = mono_class_enum_basetype (type->data.klass);
1801 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1802 case MONO_TYPE_TYPEDBYREF:
1803 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1804 case MONO_TYPE_GENERICINST:
1805 type = &type->data.generic_class->container_class->byval_arg;
1808 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1814 * target_type_is_incompatible:
1815 * @cfg: MonoCompile context
1817 * Check that the item @arg on the evaluation stack can be stored
1818 * in the target type (can be a local, or field, etc).
1819 * The cfg arg can be used to check if we need verification or just
1822 * Returns: non-0 value if arg can't be stored on a target.
1825 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1827 MonoType *simple_type;
1830 if (target->byref) {
1831 /* FIXME: check that the pointed to types match */
1832 if (arg->type == STACK_MP)
1833 return arg->klass != mono_class_from_mono_type (target);
1834 if (arg->type == STACK_PTR)
1839 simple_type = mono_type_get_underlying_type (target);
1840 switch (simple_type->type) {
1841 case MONO_TYPE_VOID:
1845 case MONO_TYPE_BOOLEAN:
1848 case MONO_TYPE_CHAR:
1851 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1855 /* STACK_MP is needed when setting pinned locals */
1856 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1861 case MONO_TYPE_FNPTR:
1862 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1865 case MONO_TYPE_CLASS:
1866 case MONO_TYPE_STRING:
1867 case MONO_TYPE_OBJECT:
1868 case MONO_TYPE_SZARRAY:
1869 case MONO_TYPE_ARRAY:
1870 if (arg->type != STACK_OBJ)
1872 /* FIXME: check type compatibility */
1876 if (arg->type != STACK_I8)
1881 if (arg->type != STACK_R8)
1884 case MONO_TYPE_VALUETYPE:
1885 if (arg->type != STACK_VTYPE)
1887 klass = mono_class_from_mono_type (simple_type);
1888 if (klass != arg->klass)
1891 case MONO_TYPE_TYPEDBYREF:
1892 if (arg->type != STACK_VTYPE)
1894 klass = mono_class_from_mono_type (simple_type);
1895 if (klass != arg->klass)
1898 case MONO_TYPE_GENERICINST:
1899 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1900 if (arg->type != STACK_VTYPE)
1902 klass = mono_class_from_mono_type (simple_type);
1903 if (klass != arg->klass)
1907 if (arg->type != STACK_OBJ)
1909 /* FIXME: check type compatibility */
1913 case MONO_TYPE_MVAR:
1914 /* FIXME: all the arguments must be references for now,
1915 * later look inside cfg and see if the arg num is
1916 * really a reference
1918 g_assert (cfg->generic_sharing_context);
1919 if (arg->type != STACK_OBJ)
1923 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1929 * Prepare arguments for passing to a function call.
1930 * Return a non-zero value if the arguments can't be passed to the given
1932 * The type checks are not yet complete and some conversions may need
1933 * casts on 32 or 64 bit architectures.
1935 * FIXME: implement this using target_type_is_incompatible ()
1938 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1940 MonoType *simple_type;
1944 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1948 for (i = 0; i < sig->param_count; ++i) {
1949 if (sig->params [i]->byref) {
1950 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1954 simple_type = sig->params [i];
1955 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1957 switch (simple_type->type) {
1958 case MONO_TYPE_VOID:
1963 case MONO_TYPE_BOOLEAN:
1966 case MONO_TYPE_CHAR:
1969 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
1975 case MONO_TYPE_FNPTR:
1976 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
1979 case MONO_TYPE_CLASS:
1980 case MONO_TYPE_STRING:
1981 case MONO_TYPE_OBJECT:
1982 case MONO_TYPE_SZARRAY:
1983 case MONO_TYPE_ARRAY:
1984 if (args [i]->type != STACK_OBJ)
1989 if (args [i]->type != STACK_I8)
1994 if (args [i]->type != STACK_R8)
1997 case MONO_TYPE_VALUETYPE:
1998 if (simple_type->data.klass->enumtype) {
1999 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2002 if (args [i]->type != STACK_VTYPE)
2005 case MONO_TYPE_TYPEDBYREF:
2006 if (args [i]->type != STACK_VTYPE)
2009 case MONO_TYPE_GENERICINST:
2010 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2014 g_error ("unknown type 0x%02x in check_call_signature",
2022 callvirt_to_call (int opcode)
2027 case OP_VOIDCALLVIRT:
2036 g_assert_not_reached ();
2043 callvirt_to_call_membase (int opcode)
2047 return OP_CALL_MEMBASE;
2048 case OP_VOIDCALLVIRT:
2049 return OP_VOIDCALL_MEMBASE;
2051 return OP_FCALL_MEMBASE;
2053 return OP_LCALL_MEMBASE;
2055 return OP_VCALL_MEMBASE;
2057 g_assert_not_reached ();
2063 #ifdef MONO_ARCH_HAVE_IMT
2065 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2067 #ifdef MONO_ARCH_IMT_REG
2068 int method_reg = alloc_preg (cfg);
2071 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2072 } else if (cfg->compile_aot) {
2073 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2076 MONO_INST_NEW (cfg, ins, OP_PCONST);
2077 ins->inst_p0 = call->method;
2078 ins->dreg = method_reg;
2079 MONO_ADD_INS (cfg->cbb, ins);
2082 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2084 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2089 static MonoJumpInfo *
2090 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2092 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2096 ji->data.target = target;
2101 inline static MonoInst*
2102 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args);
2104 inline static MonoCallInst *
2105 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2106 MonoInst **args, int calli, int virtual, int tail)
2109 #ifdef MONO_ARCH_SOFT_FLOAT
2114 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2116 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2119 call->signature = sig;
2121 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2124 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2125 call->vret_var = cfg->vret_addr;
2126 //g_assert_not_reached ();
2128 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2129 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2132 temp->backend.is_pinvoke = sig->pinvoke;
2135 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2136 * address of return value to increase optimization opportunities.
2137 * Before vtype decomposition, the dreg of the call ins itself represents the
2138 * fact the call modifies the return value. After decomposition, the call will
2139 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2140 * will be transformed into an LDADDR.
2142 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2143 loada->dreg = alloc_preg (cfg);
2144 loada->inst_p0 = temp;
2145 /* We reference the call too since call->dreg could change during optimization */
2146 loada->inst_p1 = call;
2147 MONO_ADD_INS (cfg->cbb, loada);
2149 call->inst.dreg = temp->dreg;
2151 call->vret_var = loada;
2152 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2153 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2155 #ifdef MONO_ARCH_SOFT_FLOAT
2157 * If the call has a float argument, we would need to do an r8->r4 conversion using
2158 * an icall, but that cannot be done during the call sequence since it would clobber
2159 * the call registers + the stack. So we do it before emitting the call.
2161 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2163 MonoInst *in = call->args [i];
2165 if (i >= sig->hasthis)
2166 t = sig->params [i - sig->hasthis];
2168 t = &mono_defaults.int_class->byval_arg;
2169 t = mono_type_get_underlying_type (t);
2171 if (!t->byref && t->type == MONO_TYPE_R4) {
2172 MonoInst *iargs [1];
2176 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2178 /* The result will be in an int vreg */
2179 call->args [i] = conv;
2185 if (COMPILE_LLVM (cfg))
2186 mono_llvm_emit_call (cfg, call);
2188 mono_arch_emit_call (cfg, call);
2190 mono_arch_emit_call (cfg, call);
2193 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2194 cfg->flags |= MONO_CFG_HAS_CALLS;
2199 inline static MonoInst*
2200 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2202 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE);
2204 call->inst.sreg1 = addr->dreg;
2206 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2208 return (MonoInst*)call;
2211 inline static MonoInst*
2212 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2214 #ifdef MONO_ARCH_RGCTX_REG
2219 rgctx_reg = mono_alloc_preg (cfg);
2220 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2222 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2224 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2225 cfg->uses_rgctx_reg = TRUE;
2226 call->rgctx_reg = TRUE;
2228 return (MonoInst*)call;
2230 g_assert_not_reached ();
2236 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, int rgctx_type);
2238 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, int rgctx_type);
2241 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2242 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2244 gboolean might_be_remote;
2245 gboolean virtual = this != NULL;
2246 gboolean enable_for_aot = TRUE;
2250 if (method->string_ctor) {
2251 /* Create the real signature */
2252 /* FIXME: Cache these */
2253 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2254 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2259 might_be_remote = this && sig->hasthis &&
2260 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2261 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this);
2263 context_used = mono_method_check_context_used (method);
2264 if (might_be_remote && context_used) {
2267 g_assert (cfg->generic_sharing_context);
2269 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2271 return mono_emit_calli (cfg, sig, args, addr);
2274 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE);
2276 if (might_be_remote)
2277 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2279 call->method = method;
2280 call->inst.flags |= MONO_INST_HAS_METHOD;
2281 call->inst.inst_left = this;
2284 int vtable_reg, slot_reg, this_reg;
2286 this_reg = this->dreg;
2288 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2289 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2290 /* Make a call to delegate->invoke_impl */
2291 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2292 call->inst.inst_basereg = this_reg;
2293 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2294 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2296 return (MonoInst*)call;
2300 if ((!cfg->compile_aot || enable_for_aot) &&
2301 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2302 (MONO_METHOD_IS_FINAL (method) &&
2303 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK))) {
2305 * the method is not virtual, we just need to ensure this is not null
2306 * and then we can call the method directly.
2308 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2309 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2312 if (!method->string_ctor) {
2313 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2314 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2315 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2318 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2320 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2322 return (MonoInst*)call;
2325 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2327 * the method is virtual, but we can statically dispatch since either
2328 * it's class or the method itself are sealed.
2329 * But first we need to ensure it's not a null reference.
2331 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2332 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2333 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2335 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2336 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2338 return (MonoInst*)call;
2341 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2343 vtable_reg = alloc_preg (cfg);
2344 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2345 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2347 #ifdef MONO_ARCH_HAVE_IMT
2349 guint32 imt_slot = mono_method_get_imt_slot (method);
2350 emit_imt_argument (cfg, call, imt_arg);
2351 slot_reg = vtable_reg;
2352 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2355 if (slot_reg == -1) {
2356 slot_reg = alloc_preg (cfg);
2357 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2358 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2361 slot_reg = vtable_reg;
2362 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2363 (mono_method_get_vtable_index (method) * SIZEOF_VOID_P);
2364 #ifdef MONO_ARCH_HAVE_IMT
2366 g_assert (mono_method_signature (method)->generic_param_count);
2367 emit_imt_argument (cfg, call, imt_arg);
2372 call->inst.sreg1 = slot_reg;
2373 call->virtual = TRUE;
2376 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2378 return (MonoInst*)call;
2382 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2383 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2390 #ifdef MONO_ARCH_RGCTX_REG
2391 rgctx_reg = mono_alloc_preg (cfg);
2392 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2397 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2399 call = (MonoCallInst*)ins;
2401 #ifdef MONO_ARCH_RGCTX_REG
2402 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2403 cfg->uses_rgctx_reg = TRUE;
2404 call->rgctx_reg = TRUE;
2413 static inline MonoInst*
2414 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2416 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2420 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2427 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE);
2430 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2432 return (MonoInst*)call;
2435 inline static MonoInst*
2436 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2438 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2442 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2446 * mono_emit_abs_call:
2448 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2450 inline static MonoInst*
2451 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2452 MonoMethodSignature *sig, MonoInst **args)
2454 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2458 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2461 if (cfg->abs_patches == NULL)
2462 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2463 g_hash_table_insert (cfg->abs_patches, ji, ji);
2464 ins = mono_emit_native_call (cfg, ji, sig, args);
2465 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2470 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2472 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2473 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2477 * Native code might return non register sized integers
2478 * without initializing the upper bits.
2480 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2481 case OP_LOADI1_MEMBASE:
2482 widen_op = OP_ICONV_TO_I1;
2484 case OP_LOADU1_MEMBASE:
2485 widen_op = OP_ICONV_TO_U1;
2487 case OP_LOADI2_MEMBASE:
2488 widen_op = OP_ICONV_TO_I2;
2490 case OP_LOADU2_MEMBASE:
2491 widen_op = OP_ICONV_TO_U2;
2497 if (widen_op != -1) {
2498 int dreg = alloc_preg (cfg);
2501 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2502 widen->type = ins->type;
2512 get_memcpy_method (void)
2514 static MonoMethod *memcpy_method = NULL;
2515 if (!memcpy_method) {
2516 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2518 g_error ("Old corlib found. Install a new one");
2520 return memcpy_method;
2524 * Emit code to copy a valuetype of type @klass whose address is stored in
2525 * @src->dreg to memory whose address is stored at @dest->dreg.
2528 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2530 MonoInst *iargs [3];
2533 MonoMethod *memcpy_method;
2537 * This check breaks with spilled vars... need to handle it during verification anyway.
2538 * g_assert (klass && klass == src->klass && klass == dest->klass);
2542 n = mono_class_native_size (klass, &align);
2544 n = mono_class_value_size (klass, &align);
2546 #if HAVE_WRITE_BARRIERS
2547 /* if native is true there should be no references in the struct */
2548 if (klass->has_references && !native) {
2549 /* Avoid barriers when storing to the stack */
2550 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2551 (dest->opcode == OP_LDADDR))) {
2552 int context_used = 0;
2557 if (cfg->generic_sharing_context)
2558 context_used = mono_class_check_context_used (klass);
2560 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2562 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2563 mono_class_compute_gc_descriptor (klass);
2566 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2571 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2572 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2573 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2577 EMIT_NEW_ICONST (cfg, iargs [2], n);
2579 memcpy_method = get_memcpy_method ();
2580 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2585 get_memset_method (void)
2587 static MonoMethod *memset_method = NULL;
2588 if (!memset_method) {
2589 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2591 g_error ("Old corlib found. Install a new one");
2593 return memset_method;
2597 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2599 MonoInst *iargs [3];
2602 MonoMethod *memset_method;
2604 /* FIXME: Optimize this for the case when dest is an LDADDR */
2606 mono_class_init (klass);
2607 n = mono_class_value_size (klass, &align);
2609 if (n <= sizeof (gpointer) * 5) {
2610 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2613 memset_method = get_memset_method ();
2615 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2616 EMIT_NEW_ICONST (cfg, iargs [2], n);
2617 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2622 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2624 MonoInst *this = NULL;
2626 g_assert (cfg->generic_sharing_context);
2628 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2629 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2630 !method->klass->valuetype)
2631 EMIT_NEW_ARGLOAD (cfg, this, 0);
2633 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2634 MonoInst *mrgctx_loc, *mrgctx_var;
2637 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2639 mrgctx_loc = mono_get_vtable_var (cfg);
2640 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2643 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2644 MonoInst *vtable_loc, *vtable_var;
2648 vtable_loc = mono_get_vtable_var (cfg);
2649 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2651 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2652 MonoInst *mrgctx_var = vtable_var;
2655 vtable_reg = alloc_preg (cfg);
2656 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2657 vtable_var->type = STACK_PTR;
2663 int vtable_reg, res_reg;
2665 vtable_reg = alloc_preg (cfg);
2666 res_reg = alloc_preg (cfg);
2667 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2672 static MonoJumpInfoRgctxEntry *
2673 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2675 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2676 res->method = method;
2677 res->in_mrgctx = in_mrgctx;
2678 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2679 res->data->type = patch_type;
2680 res->data->data.target = patch_data;
2681 res->info_type = info_type;
2686 static inline MonoInst*
2687 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2689 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2693 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2694 MonoClass *klass, int rgctx_type)
2696 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2697 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2699 return emit_rgctx_fetch (cfg, rgctx, entry);
2703 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2704 MonoMethod *cmethod, int rgctx_type)
2706 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2707 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2709 return emit_rgctx_fetch (cfg, rgctx, entry);
2713 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2714 MonoClassField *field, int rgctx_type)
2716 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2717 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2719 return emit_rgctx_fetch (cfg, rgctx, entry);
2723 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
2725 MonoInst *vtable_arg;
2727 int context_used = 0;
2729 if (cfg->generic_sharing_context)
2730 context_used = mono_class_check_context_used (klass);
2733 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
2734 klass, MONO_RGCTX_INFO_VTABLE);
2736 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2740 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
2743 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
2744 #ifdef MONO_ARCH_VTABLE_REG
2745 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
2746 cfg->uses_vtable_reg = TRUE;
2753 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
2755 int vtable_reg = alloc_preg (cfg);
2756 int context_used = 0;
2758 if (cfg->generic_sharing_context)
2759 context_used = mono_class_check_context_used (array_class);
2761 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2763 if (cfg->opt & MONO_OPT_SHARED) {
2764 int class_reg = alloc_preg (cfg);
2765 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2766 if (cfg->compile_aot) {
2767 int klass_reg = alloc_preg (cfg);
2768 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
2769 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
2771 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
2773 } else if (context_used) {
2774 MonoInst *vtable_ins;
2776 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
2777 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
2779 if (cfg->compile_aot) {
2780 int vt_reg = alloc_preg (cfg);
2781 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, mono_class_vtable (cfg->domain, array_class));
2782 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
2784 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, mono_class_vtable (cfg->domain, array_class));
2788 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
2792 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
2794 if (mini_get_debug_options ()->better_cast_details) {
2795 int to_klass_reg = alloc_preg (cfg);
2796 int vtable_reg = alloc_preg (cfg);
2797 int klass_reg = alloc_preg (cfg);
2798 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2801 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2805 MONO_ADD_INS (cfg->cbb, tls_get);
2806 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2807 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2809 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
2810 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
2811 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
2816 reset_cast_details (MonoCompile *cfg)
2818 /* Reset the variables holding the cast details */
2819 if (mini_get_debug_options ()->better_cast_details) {
2820 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2822 MONO_ADD_INS (cfg->cbb, tls_get);
2823 /* It is enough to reset the from field */
2824 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
2829 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
2830 * generic code is generated.
2833 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
2835 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
2838 MonoInst *rgctx, *addr;
2840 /* FIXME: What if the class is shared? We might not
2841 have to get the address of the method from the
2843 addr = emit_get_rgctx_method (cfg, context_used, method,
2844 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2846 rgctx = emit_get_rgctx (cfg, method, context_used);
2848 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2850 return mono_emit_method_call (cfg, method, &val, NULL);
2855 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
2859 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
2860 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
2861 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
2862 int rank_reg = alloc_dreg (cfg ,STACK_I4);
2864 obj_reg = sp [0]->dreg;
2865 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2866 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2868 /* FIXME: generics */
2869 g_assert (klass->rank == 0);
2872 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
2873 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2875 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2876 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
2879 MonoInst *element_class;
2881 /* This assertion is from the unboxcast insn */
2882 g_assert (klass->rank == 0);
2884 element_class = emit_get_rgctx_klass (cfg, context_used,
2885 klass->element_class, MONO_RGCTX_INFO_KLASS);
2887 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
2888 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2890 save_cast_details (cfg, klass->element_class, obj_reg);
2891 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
2892 reset_cast_details (cfg);
2895 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
2896 MONO_ADD_INS (cfg->cbb, add);
2897 add->type = STACK_MP;
2904 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box)
2906 MonoInst *iargs [2];
2909 if (cfg->opt & MONO_OPT_SHARED) {
2910 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2911 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
2913 alloc_ftn = mono_object_new;
2914 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
2915 /* This happens often in argument checking code, eg. throw new FooException... */
2916 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
2917 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
2918 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
2920 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2921 #ifdef MONO_CROSS_COMPILE
2922 MonoMethod *managed_alloc = NULL;
2924 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2928 if (managed_alloc) {
2929 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2930 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2932 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
2934 guint32 lw = vtable->klass->instance_size;
2935 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
2936 EMIT_NEW_ICONST (cfg, iargs [0], lw);
2937 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
2940 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2944 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2948 handle_alloc_from_inst (MonoCompile *cfg, MonoClass *klass, MonoInst *data_inst,
2951 MonoInst *iargs [2];
2952 MonoMethod *managed_alloc = NULL;
2956 FIXME: we cannot get managed_alloc here because we can't get
2957 the class's vtable (because it's not a closed class)
2959 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2960 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2963 if (cfg->opt & MONO_OPT_SHARED) {
2964 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2965 iargs [1] = data_inst;
2966 alloc_ftn = mono_object_new;
2968 if (managed_alloc) {
2969 iargs [0] = data_inst;
2970 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2973 iargs [0] = data_inst;
2974 alloc_ftn = mono_object_new_specific;
2977 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2981 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass)
2983 MonoInst *alloc, *ins;
2985 if (mono_class_is_nullable (klass)) {
2986 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
2987 return mono_emit_method_call (cfg, method, &val, NULL);
2990 alloc = handle_alloc (cfg, klass, TRUE);
2992 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
2998 handle_box_from_inst (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoInst *data_inst)
3000 MonoInst *alloc, *ins;
3002 if (mono_class_is_nullable (klass)) {
3003 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3004 /* FIXME: What if the class is shared? We might not
3005 have to get the method address from the RGCTX. */
3006 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3007 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3008 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3010 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3012 alloc = handle_alloc_from_inst (cfg, klass, data_inst, TRUE);
3014 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3021 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3023 MonoBasicBlock *is_null_bb;
3024 int obj_reg = src->dreg;
3025 int vtable_reg = alloc_preg (cfg);
3027 NEW_BBLOCK (cfg, is_null_bb);
3029 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3030 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3032 save_cast_details (cfg, klass, obj_reg);
3034 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3035 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3036 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3038 int klass_reg = alloc_preg (cfg);
3040 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3042 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3043 /* the remoting code is broken, access the class for now */
3045 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3046 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3048 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3049 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3051 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3053 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3054 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, is_null_bb);
3058 MONO_START_BB (cfg, is_null_bb);
3060 reset_cast_details (cfg);
3066 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3069 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3070 int obj_reg = src->dreg;
3071 int vtable_reg = alloc_preg (cfg);
3072 int res_reg = alloc_preg (cfg);
3074 NEW_BBLOCK (cfg, is_null_bb);
3075 NEW_BBLOCK (cfg, false_bb);
3076 NEW_BBLOCK (cfg, end_bb);
3078 /* Do the assignment at the beginning, so the other assignment can be if converted */
3079 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3080 ins->type = STACK_OBJ;
3083 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3084 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3086 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3087 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3088 /* the is_null_bb target simply copies the input register to the output */
3089 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3091 int klass_reg = alloc_preg (cfg);
3093 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3096 int rank_reg = alloc_preg (cfg);
3097 int eclass_reg = alloc_preg (cfg);
3099 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3100 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3101 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3102 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3103 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3104 if (klass->cast_class == mono_defaults.object_class) {
3105 int parent_reg = alloc_preg (cfg);
3106 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3107 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3108 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3109 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3110 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3111 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3112 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3113 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3114 } else if (klass->cast_class == mono_defaults.enum_class) {
3115 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3116 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3117 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3118 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3120 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3121 /* Check that the object is a vector too */
3122 int bounds_reg = alloc_preg (cfg);
3123 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3124 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3125 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3128 /* the is_null_bb target simply copies the input register to the output */
3129 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3131 } else if (mono_class_is_nullable (klass)) {
3132 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3133 /* the is_null_bb target simply copies the input register to the output */
3134 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3136 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3137 /* the remoting code is broken, access the class for now */
3139 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3140 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3142 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3143 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3145 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3146 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3148 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3149 /* the is_null_bb target simply copies the input register to the output */
3150 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, is_null_bb);
3155 MONO_START_BB (cfg, false_bb);
3157 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3158 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3160 MONO_START_BB (cfg, is_null_bb);
3162 MONO_START_BB (cfg, end_bb);
3168 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3170 /* This opcode takes as input an object reference and a class, and returns:
3171 0) if the object is an instance of the class,
3172 1) if the object is not instance of the class,
3173 2) if the object is a proxy whose type cannot be determined */
3176 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3177 int obj_reg = src->dreg;
3178 int dreg = alloc_ireg (cfg);
3180 int klass_reg = alloc_preg (cfg);
3182 NEW_BBLOCK (cfg, true_bb);
3183 NEW_BBLOCK (cfg, false_bb);
3184 NEW_BBLOCK (cfg, false2_bb);
3185 NEW_BBLOCK (cfg, end_bb);
3186 NEW_BBLOCK (cfg, no_proxy_bb);
3188 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3189 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3191 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3192 NEW_BBLOCK (cfg, interface_fail_bb);
3194 tmp_reg = alloc_preg (cfg);
3195 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3196 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3197 MONO_START_BB (cfg, interface_fail_bb);
3198 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3200 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3202 tmp_reg = alloc_preg (cfg);
3203 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3204 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3205 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3207 tmp_reg = alloc_preg (cfg);
3208 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3209 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3211 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3212 tmp_reg = alloc_preg (cfg);
3213 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3214 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3216 tmp_reg = alloc_preg (cfg);
3217 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3218 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3219 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3221 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3222 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3224 MONO_START_BB (cfg, no_proxy_bb);
3226 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3229 MONO_START_BB (cfg, false_bb);
3231 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3232 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3234 MONO_START_BB (cfg, false2_bb);
3236 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3237 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3239 MONO_START_BB (cfg, true_bb);
3241 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3243 MONO_START_BB (cfg, end_bb);
3246 MONO_INST_NEW (cfg, ins, OP_ICONST);
3248 ins->type = STACK_I4;
3254 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3256 /* This opcode takes as input an object reference and a class, and returns:
3257 0) if the object is an instance of the class,
3258 1) if the object is a proxy whose type cannot be determined
3259 an InvalidCastException exception is thrown otherwhise*/
3262 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3263 int obj_reg = src->dreg;
3264 int dreg = alloc_ireg (cfg);
3265 int tmp_reg = alloc_preg (cfg);
3266 int klass_reg = alloc_preg (cfg);
3268 NEW_BBLOCK (cfg, end_bb);
3269 NEW_BBLOCK (cfg, ok_result_bb);
3271 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3272 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3274 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3275 NEW_BBLOCK (cfg, interface_fail_bb);
3277 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3278 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3279 MONO_START_BB (cfg, interface_fail_bb);
3280 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3282 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3284 tmp_reg = alloc_preg (cfg);
3285 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3286 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3287 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3289 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3290 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3293 NEW_BBLOCK (cfg, no_proxy_bb);
3295 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3296 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3297 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3299 tmp_reg = alloc_preg (cfg);
3300 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3301 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3303 tmp_reg = alloc_preg (cfg);
3304 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3305 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3306 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3308 NEW_BBLOCK (cfg, fail_1_bb);
3310 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3312 MONO_START_BB (cfg, fail_1_bb);
3314 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3315 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3317 MONO_START_BB (cfg, no_proxy_bb);
3319 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3322 MONO_START_BB (cfg, ok_result_bb);
3324 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3326 MONO_START_BB (cfg, end_bb);
3329 MONO_INST_NEW (cfg, ins, OP_ICONST);
3331 ins->type = STACK_I4;
3336 static G_GNUC_UNUSED MonoInst*
3337 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method)
3339 gpointer *trampoline;
3340 MonoInst *obj, *method_ins, *tramp_ins;
3344 obj = handle_alloc (cfg, klass, FALSE);
3346 /* Inline the contents of mono_delegate_ctor */
3348 /* Set target field */
3349 /* Optimize away setting of NULL target */
3350 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3351 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3353 /* Set method field */
3354 EMIT_NEW_METHODCONST (cfg, method_ins, method);
3355 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3358 * To avoid looking up the compiled code belonging to the target method
3359 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3360 * store it, and we fill it after the method has been compiled.
3362 if (!cfg->compile_aot && !method->dynamic) {
3363 MonoInst *code_slot_ins;
3365 domain = mono_domain_get ();
3366 mono_domain_lock (domain);
3367 if (!domain_jit_info (domain)->method_code_hash)
3368 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3369 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3371 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3372 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3374 mono_domain_unlock (domain);
3376 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3377 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3380 /* Set invoke_impl field */
3381 if (cfg->compile_aot) {
3382 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3384 trampoline = mono_create_delegate_trampoline (klass);
3385 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3387 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3389 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3395 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3397 MonoJitICallInfo *info;
3399 /* Need to register the icall so it gets an icall wrapper */
3400 info = mono_get_array_new_va_icall (rank);
3402 cfg->flags |= MONO_CFG_HAS_VARARGS;
3404 /* mono_array_new_va () needs a vararg calling convention */
3405 cfg->disable_llvm = TRUE;
3407 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3408 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3412 mono_emit_load_got_addr (MonoCompile *cfg)
3414 MonoInst *getaddr, *dummy_use;
3416 if (!cfg->got_var || cfg->got_var_allocated)
3419 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3420 getaddr->dreg = cfg->got_var->dreg;
3422 /* Add it to the start of the first bblock */
3423 if (cfg->bb_entry->code) {
3424 getaddr->next = cfg->bb_entry->code;
3425 cfg->bb_entry->code = getaddr;
3428 MONO_ADD_INS (cfg->bb_entry, getaddr);
3430 cfg->got_var_allocated = TRUE;
3433 * Add a dummy use to keep the got_var alive, since real uses might
3434 * only be generated by the back ends.
3435 * Add it to end_bblock, so the variable's lifetime covers the whole
3437 * It would be better to make the usage of the got var explicit in all
3438 * cases when the backend needs it (i.e. calls, throw etc.), so this
3439 * wouldn't be needed.
3441 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3442 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3445 static int inline_limit;
3446 static gboolean inline_limit_inited;
3449 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3451 MonoMethodHeader *header;
3453 #ifdef MONO_ARCH_SOFT_FLOAT
3454 MonoMethodSignature *sig = mono_method_signature (method);
3458 if (cfg->generic_sharing_context)
3461 #ifdef MONO_ARCH_HAVE_LMF_OPS
3462 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3463 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3464 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3468 if (method->is_inflated)
3469 /* Avoid inflating the header */
3470 header = mono_method_get_header (((MonoMethodInflated*)method)->declaring);
3472 header = mono_method_get_header (method);
3474 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME) ||
3475 (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3476 (method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3477 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3478 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) ||
3479 (method->klass->marshalbyref) ||
3480 !header || header->num_clauses)
3483 /* also consider num_locals? */
3484 /* Do the size check early to avoid creating vtables */
3485 if (!inline_limit_inited) {
3486 if (getenv ("MONO_INLINELIMIT"))
3487 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3489 inline_limit = INLINE_LENGTH_LIMIT;
3490 inline_limit_inited = TRUE;
3492 if (header->code_size >= inline_limit)
3496 * if we can initialize the class of the method right away, we do,
3497 * otherwise we don't allow inlining if the class needs initialization,
3498 * since it would mean inserting a call to mono_runtime_class_init()
3499 * inside the inlined code
3501 if (!(cfg->opt & MONO_OPT_SHARED)) {
3502 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3503 if (cfg->run_cctors && method->klass->has_cctor) {
3504 if (!method->klass->runtime_info)
3505 /* No vtable created yet */
3507 vtable = mono_class_vtable (cfg->domain, method->klass);
3510 /* This makes so that inline cannot trigger */
3511 /* .cctors: too many apps depend on them */
3512 /* running with a specific order... */
3513 if (! vtable->initialized)
3515 mono_runtime_class_init (vtable);
3517 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3518 if (!method->klass->runtime_info)
3519 /* No vtable created yet */
3521 vtable = mono_class_vtable (cfg->domain, method->klass);
3524 if (!vtable->initialized)
3529 * If we're compiling for shared code
3530 * the cctor will need to be run at aot method load time, for example,
3531 * or at the end of the compilation of the inlining method.
3533 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3538 * CAS - do not inline methods with declarative security
3539 * Note: this has to be before any possible return TRUE;
3541 if (mono_method_has_declsec (method))
3544 #ifdef MONO_ARCH_SOFT_FLOAT
3546 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3548 for (i = 0; i < sig->param_count; ++i)
3549 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3557 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3559 if (vtable->initialized && !cfg->compile_aot)
3562 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3565 if (!mono_class_needs_cctor_run (vtable->klass, method))
3568 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3569 /* The initialization is already done before the method is called */
3576 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index)
3580 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3582 mono_class_init (klass);
3583 size = mono_class_array_element_size (klass);
3585 mult_reg = alloc_preg (cfg);
3586 array_reg = arr->dreg;
3587 index_reg = index->dreg;
3589 #if SIZEOF_REGISTER == 8
3590 /* The array reg is 64 bits but the index reg is only 32 */
3591 index2_reg = alloc_preg (cfg);
3592 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3594 if (index->type == STACK_I8) {
3595 index2_reg = alloc_preg (cfg);
3596 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
3598 index2_reg = index_reg;
3602 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3604 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3605 if (size == 1 || size == 2 || size == 4 || size == 8) {
3606 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3608 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3609 ins->type = STACK_PTR;
3615 add_reg = alloc_preg (cfg);
3617 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3618 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3619 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3620 ins->type = STACK_PTR;
3621 MONO_ADD_INS (cfg->cbb, ins);
3626 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3628 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3630 int bounds_reg = alloc_preg (cfg);
3631 int add_reg = alloc_preg (cfg);
3632 int mult_reg = alloc_preg (cfg);
3633 int mult2_reg = alloc_preg (cfg);
3634 int low1_reg = alloc_preg (cfg);
3635 int low2_reg = alloc_preg (cfg);
3636 int high1_reg = alloc_preg (cfg);
3637 int high2_reg = alloc_preg (cfg);
3638 int realidx1_reg = alloc_preg (cfg);
3639 int realidx2_reg = alloc_preg (cfg);
3640 int sum_reg = alloc_preg (cfg);
3645 mono_class_init (klass);
3646 size = mono_class_array_element_size (klass);
3648 index1 = index_ins1->dreg;
3649 index2 = index_ins2->dreg;
3651 /* range checking */
3652 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3653 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3655 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3656 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3657 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3658 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3659 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3660 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3661 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3663 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3664 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3665 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3666 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3667 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3668 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3669 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3671 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3672 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3673 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3674 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3675 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3677 ins->type = STACK_MP;
3679 MONO_ADD_INS (cfg->cbb, ins);
3686 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3690 MonoMethod *addr_method;
3693 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3696 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1]);
3698 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3699 /* emit_ldelema_2 depends on OP_LMUL */
3700 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3701 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3705 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3706 addr_method = mono_marshal_get_array_address (rank, element_size);
3707 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
3713 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
3715 MonoInst *ins = NULL;
3717 static MonoClass *runtime_helpers_class = NULL;
3718 if (! runtime_helpers_class)
3719 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
3720 "System.Runtime.CompilerServices", "RuntimeHelpers");
3722 if (cmethod->klass == mono_defaults.string_class) {
3723 if (strcmp (cmethod->name, "get_Chars") == 0) {
3724 int dreg = alloc_ireg (cfg);
3725 int index_reg = alloc_preg (cfg);
3726 int mult_reg = alloc_preg (cfg);
3727 int add_reg = alloc_preg (cfg);
3729 #if SIZEOF_REGISTER == 8
3730 /* The array reg is 64 bits but the index reg is only 32 */
3731 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
3733 index_reg = args [1]->dreg;
3735 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
3737 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3738 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
3739 add_reg = ins->dreg;
3740 /* Avoid a warning */
3742 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3745 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
3746 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3747 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3748 add_reg, G_STRUCT_OFFSET (MonoString, chars));
3750 type_from_op (ins, NULL, NULL);
3752 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3753 int dreg = alloc_ireg (cfg);
3754 /* Decompose later to allow more optimizations */
3755 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
3756 ins->type = STACK_I4;
3757 cfg->cbb->has_array_access = TRUE;
3758 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
3761 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
3762 int mult_reg = alloc_preg (cfg);
3763 int add_reg = alloc_preg (cfg);
3765 /* The corlib functions check for oob already. */
3766 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
3767 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3768 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
3771 } else if (cmethod->klass == mono_defaults.object_class) {
3773 if (strcmp (cmethod->name, "GetType") == 0) {
3774 int dreg = alloc_preg (cfg);
3775 int vt_reg = alloc_preg (cfg);
3776 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3777 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
3778 type_from_op (ins, NULL, NULL);
3781 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
3782 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0) {
3783 int dreg = alloc_ireg (cfg);
3784 int t1 = alloc_ireg (cfg);
3786 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
3787 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
3788 ins->type = STACK_I4;
3792 } else if (strcmp (cmethod->name, ".ctor") == 0) {
3793 MONO_INST_NEW (cfg, ins, OP_NOP);
3794 MONO_ADD_INS (cfg->cbb, ins);
3798 } else if (cmethod->klass == mono_defaults.array_class) {
3799 if (cmethod->name [0] != 'g')
3802 if (strcmp (cmethod->name, "get_Rank") == 0) {
3803 int dreg = alloc_ireg (cfg);
3804 int vtable_reg = alloc_preg (cfg);
3805 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, vtable_reg,
3806 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3807 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
3808 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3809 type_from_op (ins, NULL, NULL);
3812 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3813 int dreg = alloc_ireg (cfg);
3815 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
3816 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
3817 type_from_op (ins, NULL, NULL);
3822 } else if (cmethod->klass == runtime_helpers_class) {
3824 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
3825 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
3829 } else if (cmethod->klass == mono_defaults.thread_class) {
3830 if (strcmp (cmethod->name, "get_CurrentThread") == 0 && (ins = mono_arch_get_thread_intrinsic (cfg))) {
3831 ins->dreg = alloc_preg (cfg);
3832 ins->type = STACK_OBJ;
3833 MONO_ADD_INS (cfg->cbb, ins);
3835 } else if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
3836 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
3837 MONO_ADD_INS (cfg->cbb, ins);
3839 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
3840 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
3841 MONO_ADD_INS (cfg->cbb, ins);
3844 } else if (cmethod->klass == mono_defaults.monitor_class) {
3845 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
3846 if (strcmp (cmethod->name, "Enter") == 0) {
3849 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
3850 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
3851 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
3852 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
3854 return (MonoInst*)call;
3855 } else if (strcmp (cmethod->name, "Exit") == 0) {
3858 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
3859 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
3860 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
3861 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
3863 return (MonoInst*)call;
3865 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
3866 MonoMethod *fast_method = NULL;
3868 /* Avoid infinite recursion */
3869 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
3870 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
3871 strcmp (cfg->method->name, "FastMonitorExit") == 0))
3874 if (strcmp (cmethod->name, "Enter") == 0 ||
3875 strcmp (cmethod->name, "Exit") == 0)
3876 fast_method = mono_monitor_get_fast_path (cmethod);
3880 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
3882 } else if (mini_class_is_system_array (cmethod->klass) &&
3883 strcmp (cmethod->name, "GetGenericValueImpl") == 0) {
3884 MonoInst *addr, *store, *load;
3885 MonoClass *eklass = mono_class_from_mono_type (fsig->params [1]);
3887 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1]);
3888 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
3889 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
3891 } else if (cmethod->klass->image == mono_defaults.corlib &&
3892 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
3893 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
3896 #if SIZEOF_REGISTER == 8
3897 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
3898 /* 64 bit reads are already atomic */
3899 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
3900 ins->dreg = mono_alloc_preg (cfg);
3901 ins->inst_basereg = args [0]->dreg;
3902 ins->inst_offset = 0;
3903 MONO_ADD_INS (cfg->cbb, ins);
3907 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
3908 if (strcmp (cmethod->name, "Increment") == 0) {
3909 MonoInst *ins_iconst;
3912 if (fsig->params [0]->type == MONO_TYPE_I4)
3913 opcode = OP_ATOMIC_ADD_NEW_I4;
3914 #if SIZEOF_REGISTER == 8
3915 else if (fsig->params [0]->type == MONO_TYPE_I8)
3916 opcode = OP_ATOMIC_ADD_NEW_I8;
3919 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3920 ins_iconst->inst_c0 = 1;
3921 ins_iconst->dreg = mono_alloc_ireg (cfg);
3922 MONO_ADD_INS (cfg->cbb, ins_iconst);
3924 MONO_INST_NEW (cfg, ins, opcode);
3925 ins->dreg = mono_alloc_ireg (cfg);
3926 ins->inst_basereg = args [0]->dreg;
3927 ins->inst_offset = 0;
3928 ins->sreg2 = ins_iconst->dreg;
3929 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3930 MONO_ADD_INS (cfg->cbb, ins);
3932 } else if (strcmp (cmethod->name, "Decrement") == 0) {
3933 MonoInst *ins_iconst;
3936 if (fsig->params [0]->type == MONO_TYPE_I4)
3937 opcode = OP_ATOMIC_ADD_NEW_I4;
3938 #if SIZEOF_REGISTER == 8
3939 else if (fsig->params [0]->type == MONO_TYPE_I8)
3940 opcode = OP_ATOMIC_ADD_NEW_I8;
3943 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3944 ins_iconst->inst_c0 = -1;
3945 ins_iconst->dreg = mono_alloc_ireg (cfg);
3946 MONO_ADD_INS (cfg->cbb, ins_iconst);
3948 MONO_INST_NEW (cfg, ins, opcode);
3949 ins->dreg = mono_alloc_ireg (cfg);
3950 ins->inst_basereg = args [0]->dreg;
3951 ins->inst_offset = 0;
3952 ins->sreg2 = ins_iconst->dreg;
3953 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3954 MONO_ADD_INS (cfg->cbb, ins);
3956 } else if (strcmp (cmethod->name, "Add") == 0) {
3959 if (fsig->params [0]->type == MONO_TYPE_I4)
3960 opcode = OP_ATOMIC_ADD_NEW_I4;
3961 #if SIZEOF_REGISTER == 8
3962 else if (fsig->params [0]->type == MONO_TYPE_I8)
3963 opcode = OP_ATOMIC_ADD_NEW_I8;
3967 MONO_INST_NEW (cfg, ins, opcode);
3968 ins->dreg = mono_alloc_ireg (cfg);
3969 ins->inst_basereg = args [0]->dreg;
3970 ins->inst_offset = 0;
3971 ins->sreg2 = args [1]->dreg;
3972 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3973 MONO_ADD_INS (cfg->cbb, ins);
3976 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
3978 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
3979 if (strcmp (cmethod->name, "Exchange") == 0) {
3981 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
3983 if (fsig->params [0]->type == MONO_TYPE_I4)
3984 opcode = OP_ATOMIC_EXCHANGE_I4;
3985 #if SIZEOF_REGISTER == 8
3986 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
3987 (fsig->params [0]->type == MONO_TYPE_I))
3988 opcode = OP_ATOMIC_EXCHANGE_I8;
3990 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
3991 opcode = OP_ATOMIC_EXCHANGE_I4;
3996 MONO_INST_NEW (cfg, ins, opcode);
3997 ins->dreg = mono_alloc_ireg (cfg);
3998 ins->inst_basereg = args [0]->dreg;
3999 ins->inst_offset = 0;
4000 ins->sreg2 = args [1]->dreg;
4001 MONO_ADD_INS (cfg->cbb, ins);
4003 switch (fsig->params [0]->type) {
4005 ins->type = STACK_I4;
4009 ins->type = STACK_I8;
4011 case MONO_TYPE_OBJECT:
4012 ins->type = STACK_OBJ;
4015 g_assert_not_reached ();
4018 #if HAVE_WRITE_BARRIERS
4020 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4021 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4025 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4027 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4028 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4030 gboolean is_ref = MONO_TYPE_IS_REFERENCE (fsig->params [1]);
4031 if (fsig->params [1]->type == MONO_TYPE_I4)
4033 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4034 size = sizeof (gpointer);
4035 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I4)
4038 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4039 ins->dreg = alloc_ireg (cfg);
4040 ins->sreg1 = args [0]->dreg;
4041 ins->sreg2 = args [1]->dreg;
4042 ins->sreg3 = args [2]->dreg;
4043 ins->type = STACK_I4;
4044 MONO_ADD_INS (cfg->cbb, ins);
4045 } else if (size == 8) {
4046 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4047 ins->dreg = alloc_ireg (cfg);
4048 ins->sreg1 = args [0]->dreg;
4049 ins->sreg2 = args [1]->dreg;
4050 ins->sreg3 = args [2]->dreg;
4051 ins->type = STACK_I8;
4052 MONO_ADD_INS (cfg->cbb, ins);
4054 /* g_assert_not_reached (); */
4056 #if HAVE_WRITE_BARRIERS
4058 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4059 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4063 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4067 } else if (cmethod->klass->image == mono_defaults.corlib) {
4068 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4069 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4070 MONO_INST_NEW (cfg, ins, OP_BREAK);
4071 MONO_ADD_INS (cfg->cbb, ins);
4074 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4075 && strcmp (cmethod->klass->name, "Environment") == 0) {
4076 #ifdef PLATFORM_WIN32
4077 EMIT_NEW_ICONST (cfg, ins, 1);
4079 EMIT_NEW_ICONST (cfg, ins, 0);
4083 } else if (cmethod->klass == mono_defaults.math_class) {
4085 * There is general branches code for Min/Max, but it does not work for
4087 * http://everything2.com/?node_id=1051618
4091 #ifdef MONO_ARCH_SIMD_INTRINSICS
4092 if (cfg->opt & MONO_OPT_SIMD) {
4093 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4099 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4103 * This entry point could be used later for arbitrary method
4106 inline static MonoInst*
4107 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4108 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4110 if (method->klass == mono_defaults.string_class) {
4111 /* managed string allocation support */
4112 if (strcmp (method->name, "InternalAllocateStr") == 0) {
4113 MonoInst *iargs [2];
4114 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4115 #ifdef MONO_CROSS_COMPILE
4116 MonoMethod *managed_alloc = NULL;
4118 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4122 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4123 iargs [1] = args [0];
4124 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4131 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4133 MonoInst *store, *temp;
4136 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4137 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4140 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4141 * would be different than the MonoInst's used to represent arguments, and
4142 * the ldelema implementation can't deal with that.
4143 * Solution: When ldelema is used on an inline argument, create a var for
4144 * it, emit ldelema on that var, and emit the saving code below in
4145 * inline_method () if needed.
4147 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4148 cfg->args [i] = temp;
4149 /* This uses cfg->args [i] which is set by the preceeding line */
4150 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4151 store->cil_code = sp [0]->cil_code;
4156 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4157 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4159 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4161 check_inline_called_method_name_limit (MonoMethod *called_method)
4164 static char *limit = NULL;
4166 if (limit == NULL) {
4167 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4169 if (limit_string != NULL)
4170 limit = limit_string;
4172 limit = (char *) "";
4175 if (limit [0] != '\0') {
4176 char *called_method_name = mono_method_full_name (called_method, TRUE);
4178 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4179 g_free (called_method_name);
4181 //return (strncmp_result <= 0);
4182 return (strncmp_result == 0);
4189 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4191 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4194 static char *limit = NULL;
4196 if (limit == NULL) {
4197 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4198 if (limit_string != NULL) {
4199 limit = limit_string;
4201 limit = (char *) "";
4205 if (limit [0] != '\0') {
4206 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4208 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4209 g_free (caller_method_name);
4211 //return (strncmp_result <= 0);
4212 return (strncmp_result == 0);
4220 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4221 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4223 MonoInst *ins, *rvar = NULL;
4224 MonoMethodHeader *cheader;
4225 MonoBasicBlock *ebblock, *sbblock;
4227 MonoMethod *prev_inlined_method;
4228 MonoInst **prev_locals, **prev_args;
4229 MonoType **prev_arg_types;
4230 guint prev_real_offset;
4231 GHashTable *prev_cbb_hash;
4232 MonoBasicBlock **prev_cil_offset_to_bb;
4233 MonoBasicBlock *prev_cbb;
4234 unsigned char* prev_cil_start;
4235 guint32 prev_cil_offset_to_bb_len;
4236 MonoMethod *prev_current_method;
4237 MonoGenericContext *prev_generic_context;
4238 gboolean ret_var_set, prev_ret_var_set;
4240 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4242 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4243 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4246 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4247 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4251 if (cfg->verbose_level > 2)
4252 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4254 if (!cmethod->inline_info) {
4255 mono_jit_stats.inlineable_methods++;
4256 cmethod->inline_info = 1;
4258 /* allocate space to store the return value */
4259 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4260 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4263 /* allocate local variables */
4264 cheader = mono_method_get_header (cmethod);
4265 prev_locals = cfg->locals;
4266 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4267 for (i = 0; i < cheader->num_locals; ++i)
4268 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4270 /* allocate start and end blocks */
4271 /* This is needed so if the inline is aborted, we can clean up */
4272 NEW_BBLOCK (cfg, sbblock);
4273 sbblock->real_offset = real_offset;
4275 NEW_BBLOCK (cfg, ebblock);
4276 ebblock->block_num = cfg->num_bblocks++;
4277 ebblock->real_offset = real_offset;
4279 prev_args = cfg->args;
4280 prev_arg_types = cfg->arg_types;
4281 prev_inlined_method = cfg->inlined_method;
4282 cfg->inlined_method = cmethod;
4283 cfg->ret_var_set = FALSE;
4284 prev_real_offset = cfg->real_offset;
4285 prev_cbb_hash = cfg->cbb_hash;
4286 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4287 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4288 prev_cil_start = cfg->cil_start;
4289 prev_cbb = cfg->cbb;
4290 prev_current_method = cfg->current_method;
4291 prev_generic_context = cfg->generic_context;
4292 prev_ret_var_set = cfg->ret_var_set;
4294 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4296 ret_var_set = cfg->ret_var_set;
4298 cfg->inlined_method = prev_inlined_method;
4299 cfg->real_offset = prev_real_offset;
4300 cfg->cbb_hash = prev_cbb_hash;
4301 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4302 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4303 cfg->cil_start = prev_cil_start;
4304 cfg->locals = prev_locals;
4305 cfg->args = prev_args;
4306 cfg->arg_types = prev_arg_types;
4307 cfg->current_method = prev_current_method;
4308 cfg->generic_context = prev_generic_context;
4309 cfg->ret_var_set = prev_ret_var_set;
4311 if ((costs >= 0 && costs < 60) || inline_allways) {
4312 if (cfg->verbose_level > 2)
4313 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4315 mono_jit_stats.inlined_methods++;
4317 /* always add some code to avoid block split failures */
4318 MONO_INST_NEW (cfg, ins, OP_NOP);
4319 MONO_ADD_INS (prev_cbb, ins);
4321 prev_cbb->next_bb = sbblock;
4322 link_bblock (cfg, prev_cbb, sbblock);
4325 * Get rid of the begin and end bblocks if possible to aid local
4328 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4330 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4331 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4333 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4334 MonoBasicBlock *prev = ebblock->in_bb [0];
4335 mono_merge_basic_blocks (cfg, prev, ebblock);
4337 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4338 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4339 cfg->cbb = prev_cbb;
4347 * If the inlined method contains only a throw, then the ret var is not
4348 * set, so set it to a dummy value.
4351 static double r8_0 = 0.0;
4353 switch (rvar->type) {
4355 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4358 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4363 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4366 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4367 ins->type = STACK_R8;
4368 ins->inst_p0 = (void*)&r8_0;
4369 ins->dreg = rvar->dreg;
4370 MONO_ADD_INS (cfg->cbb, ins);
4373 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4376 g_assert_not_reached ();
4380 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4385 if (cfg->verbose_level > 2)
4386 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4387 cfg->exception_type = MONO_EXCEPTION_NONE;
4388 mono_loader_clear_error ();
4390 /* This gets rid of the newly added bblocks */
4391 cfg->cbb = prev_cbb;
4397 * Some of these comments may well be out-of-date.
4398 * Design decisions: we do a single pass over the IL code (and we do bblock
4399 * splitting/merging in the few cases when it's required: a back jump to an IL
4400 * address that was not already seen as bblock starting point).
4401 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4402 * Complex operations are decomposed in simpler ones right away. We need to let the
4403 * arch-specific code peek and poke inside this process somehow (except when the
4404 * optimizations can take advantage of the full semantic info of coarse opcodes).
4405 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4406 * MonoInst->opcode initially is the IL opcode or some simplification of that
4407 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4408 * opcode with value bigger than OP_LAST.
4409 * At this point the IR can be handed over to an interpreter, a dumb code generator
4410 * or to the optimizing code generator that will translate it to SSA form.
4412 * Profiling directed optimizations.
4413 * We may compile by default with few or no optimizations and instrument the code
4414 * or the user may indicate what methods to optimize the most either in a config file
4415 * or through repeated runs where the compiler applies offline the optimizations to
4416 * each method and then decides if it was worth it.
4419 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4420 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4421 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4422 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4423 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4424 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4425 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4426 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4428 /* offset from br.s -> br like opcodes */
4429 #define BIG_BRANCH_OFFSET 13
4432 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4434 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4436 return b == NULL || b == bb;
4440 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4442 unsigned char *ip = start;
4443 unsigned char *target;
4446 MonoBasicBlock *bblock;
4447 const MonoOpcode *opcode;
4450 cli_addr = ip - start;
4451 i = mono_opcode_value ((const guint8 **)&ip, end);
4454 opcode = &mono_opcodes [i];
4455 switch (opcode->argument) {
4456 case MonoInlineNone:
4459 case MonoInlineString:
4460 case MonoInlineType:
4461 case MonoInlineField:
4462 case MonoInlineMethod:
4465 case MonoShortInlineR:
4472 case MonoShortInlineVar:
4473 case MonoShortInlineI:
4476 case MonoShortInlineBrTarget:
4477 target = start + cli_addr + 2 + (signed char)ip [1];
4478 GET_BBLOCK (cfg, bblock, target);
4481 GET_BBLOCK (cfg, bblock, ip);
4483 case MonoInlineBrTarget:
4484 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4485 GET_BBLOCK (cfg, bblock, target);
4488 GET_BBLOCK (cfg, bblock, ip);
4490 case MonoInlineSwitch: {
4491 guint32 n = read32 (ip + 1);
4494 cli_addr += 5 + 4 * n;
4495 target = start + cli_addr;
4496 GET_BBLOCK (cfg, bblock, target);
4498 for (j = 0; j < n; ++j) {
4499 target = start + cli_addr + (gint32)read32 (ip);
4500 GET_BBLOCK (cfg, bblock, target);
4510 g_assert_not_reached ();
4513 if (i == CEE_THROW) {
4514 unsigned char *bb_start = ip - 1;
4516 /* Find the start of the bblock containing the throw */
4518 while ((bb_start >= start) && !bblock) {
4519 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4523 bblock->out_of_line = 1;
4532 static inline MonoMethod *
4533 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4537 if (m->wrapper_type != MONO_WRAPPER_NONE)
4538 return mono_method_get_wrapper_data (m, token);
4540 method = mono_get_method_full (m->klass->image, token, klass, context);
4545 static inline MonoMethod *
4546 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4548 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4550 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4556 static inline MonoClass*
4557 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4561 if (method->wrapper_type != MONO_WRAPPER_NONE)
4562 klass = mono_method_get_wrapper_data (method, token);
4564 klass = mono_class_get_full (method->klass->image, token, context);
4566 mono_class_init (klass);
4571 * Returns TRUE if the JIT should abort inlining because "callee"
4572 * is influenced by security attributes.
4575 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4579 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
4583 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
4584 if (result == MONO_JIT_SECURITY_OK)
4587 if (result == MONO_JIT_LINKDEMAND_ECMA) {
4588 /* Generate code to throw a SecurityException before the actual call/link */
4589 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4592 NEW_ICONST (cfg, args [0], 4);
4593 NEW_METHODCONST (cfg, args [1], caller);
4594 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
4595 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
4596 /* don't hide previous results */
4597 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
4598 cfg->exception_data = result;
4606 method_access_exception (void)
4608 static MonoMethod *method = NULL;
4611 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4612 method = mono_class_get_method_from_name (secman->securitymanager,
4613 "MethodAccessException", 2);
4620 emit_throw_method_access_exception (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4621 MonoBasicBlock *bblock, unsigned char *ip)
4623 MonoMethod *thrower = method_access_exception ();
4626 EMIT_NEW_METHODCONST (cfg, args [0], caller);
4627 EMIT_NEW_METHODCONST (cfg, args [1], callee);
4628 mono_emit_method_call (cfg, thrower, args, NULL);
4632 field_access_exception (void)
4634 static MonoMethod *method = NULL;
4637 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4638 method = mono_class_get_method_from_name (secman->securitymanager,
4639 "FieldAccessException", 2);
4646 emit_throw_field_access_exception (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
4647 MonoBasicBlock *bblock, unsigned char *ip)
4649 MonoMethod *thrower = field_access_exception ();
4652 EMIT_NEW_METHODCONST (cfg, args [0], caller);
4653 EMIT_NEW_METHODCONST (cfg, args [1], field);
4654 mono_emit_method_call (cfg, thrower, args, NULL);
4658 * Return the original method is a wrapper is specified. We can only access
4659 * the custom attributes from the original method.
4662 get_original_method (MonoMethod *method)
4664 if (method->wrapper_type == MONO_WRAPPER_NONE)
4667 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
4668 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
4671 /* in other cases we need to find the original method */
4672 return mono_marshal_method_from_wrapper (method);
4676 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
4677 MonoBasicBlock *bblock, unsigned char *ip)
4679 /* there's no restriction to access Transparent or SafeCritical fields, so we only check calls to Critical methods */
4680 if (mono_security_core_clr_class_level (mono_field_get_parent (field)) != MONO_SECURITY_CORE_CLR_CRITICAL)
4683 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4684 caller = get_original_method (caller);
4688 /* caller is Critical! only SafeCritical and Critical callers can access the field, so we throw if caller is Transparent */
4689 if (mono_security_core_clr_method_level (caller, TRUE) == MONO_SECURITY_CORE_CLR_TRANSPARENT)
4690 emit_throw_field_access_exception (cfg, caller, field, bblock, ip);
4694 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4695 MonoBasicBlock *bblock, unsigned char *ip)
4697 /* there's no restriction to call Transparent or SafeCritical code, so we only check calls to Critical methods */
4698 if (mono_security_core_clr_method_level (callee, TRUE) != MONO_SECURITY_CORE_CLR_CRITICAL)
4701 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4702 caller = get_original_method (caller);
4706 /* caller is Critical! only SafeCritical and Critical callers can call it, so we throw if the caller is Transparent */
4707 if (mono_security_core_clr_method_level (caller, TRUE) == MONO_SECURITY_CORE_CLR_TRANSPARENT)
4708 emit_throw_method_access_exception (cfg, caller, callee, bblock, ip);
4712 * Check that the IL instructions at ip are the array initialization
4713 * sequence and return the pointer to the data and the size.
4716 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
4719 * newarr[System.Int32]
4721 * ldtoken field valuetype ...
4722 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
4724 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
4725 guint32 token = read32 (ip + 7);
4726 guint32 field_token = read32 (ip + 2);
4727 guint32 field_index = field_token & 0xffffff;
4729 const char *data_ptr;
4731 MonoMethod *cmethod;
4732 MonoClass *dummy_class;
4733 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
4739 *out_field_token = field_token;
4741 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
4744 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
4746 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
4747 case MONO_TYPE_BOOLEAN:
4751 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
4752 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
4753 case MONO_TYPE_CHAR:
4763 return NULL; /* stupid ARM FP swapped format */
4773 if (size > mono_type_size (field->type, &dummy_align))
4776 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
4777 if (!method->klass->image->dynamic) {
4778 field_index = read32 (ip + 2) & 0xffffff;
4779 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
4780 data_ptr = mono_image_rva_map (method->klass->image, rva);
4781 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
4782 /* for aot code we do the lookup on load */
4783 if (aot && data_ptr)
4784 return GUINT_TO_POINTER (rva);
4786 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
4788 data_ptr = mono_field_get_data (field);
4796 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
4798 char *method_fname = mono_method_full_name (method, TRUE);
4801 if (mono_method_get_header (method)->code_size == 0)
4802 method_code = g_strdup ("method body is empty.");
4804 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
4805 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
4806 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
4807 g_free (method_fname);
4808 g_free (method_code);
4812 set_exception_object (MonoCompile *cfg, MonoException *exception)
4814 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
4815 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
4816 cfg->exception_ptr = exception;
4820 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4824 if (cfg->generic_sharing_context)
4825 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
4827 type = &klass->byval_arg;
4828 return MONO_TYPE_IS_REFERENCE (type);
4832 * mono_decompose_array_access_opts:
4834 * Decompose array access opcodes.
4835 * This should be in decompose.c, but it emits calls so it has to stay here until
4836 * the old JIT is gone.
4839 mono_decompose_array_access_opts (MonoCompile *cfg)
4841 MonoBasicBlock *bb, *first_bb;
4844 * Unlike decompose_long_opts, this pass does not alter the CFG of the method so it
4845 * can be executed anytime. It should be run before decompose_long
4849 * Create a dummy bblock and emit code into it so we can use the normal
4850 * code generation macros.
4852 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4853 first_bb = cfg->cbb;
4855 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4857 MonoInst *prev = NULL;
4859 MonoInst *iargs [3];
4862 if (!bb->has_array_access)
4865 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE DECOMPOSE-ARRAY-ACCESS-OPTS ");
4867 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4873 for (ins = bb->code; ins; ins = ins->next) {
4874 switch (ins->opcode) {
4876 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg, ins->sreg1,
4877 G_STRUCT_OFFSET (MonoArray, max_length));
4878 MONO_ADD_INS (cfg->cbb, dest);
4880 case OP_BOUNDS_CHECK:
4881 MONO_ARCH_EMIT_BOUNDS_CHECK (cfg, ins->sreg1, ins->inst_imm, ins->sreg2);
4884 if (cfg->opt & MONO_OPT_SHARED) {
4885 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4886 EMIT_NEW_CLASSCONST (cfg, iargs [1], ins->inst_newa_class);
4887 MONO_INST_NEW (cfg, iargs [2], OP_MOVE);
4888 iargs [2]->dreg = ins->sreg1;
4890 dest = mono_emit_jit_icall (cfg, mono_array_new, iargs);
4891 dest->dreg = ins->dreg;
4893 MonoVTable *vtable = mono_class_vtable (cfg->domain, mono_array_class_get (ins->inst_newa_class, 1));
4896 NEW_VTABLECONST (cfg, iargs [0], vtable);
4897 MONO_ADD_INS (cfg->cbb, iargs [0]);
4898 MONO_INST_NEW (cfg, iargs [1], OP_MOVE);
4899 iargs [1]->dreg = ins->sreg1;
4901 dest = mono_emit_jit_icall (cfg, mono_array_new_specific, iargs);
4902 dest->dreg = ins->dreg;
4906 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg,
4907 ins->sreg1, G_STRUCT_OFFSET (MonoString, length));
4908 MONO_ADD_INS (cfg->cbb, dest);
4914 g_assert (cfg->cbb == first_bb);
4916 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
4917 /* Replace the original instruction with the new code sequence */
4919 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
4920 first_bb->code = first_bb->last_ins = NULL;
4921 first_bb->in_count = first_bb->out_count = 0;
4922 cfg->cbb = first_bb;
4929 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER DECOMPOSE-ARRAY-ACCESS-OPTS ");
4939 #ifdef MONO_ARCH_SOFT_FLOAT
4942 * mono_decompose_soft_float:
4944 * Soft float support on ARM. We store each double value in a pair of integer vregs,
4945 * similar to long support on 32 bit platforms. 32 bit float values require special
4946 * handling when used as locals, arguments, and in calls.
4947 * One big problem with soft-float is that there are few r4 test cases in our test suite.
4950 mono_decompose_soft_float (MonoCompile *cfg)
4952 MonoBasicBlock *bb, *first_bb;
4955 * This pass creates long opcodes, so it should be run before decompose_long_opts ().
4959 * Create a dummy bblock and emit code into it so we can use the normal
4960 * code generation macros.
4962 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4963 first_bb = cfg->cbb;
4965 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4967 MonoInst *prev = NULL;
4970 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE HANDLE-SOFT-FLOAT ");
4972 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4978 for (ins = bb->code; ins; ins = ins->next) {
4979 const char *spec = INS_INFO (ins->opcode);
4981 /* Most fp operations are handled automatically by opcode emulation */
4983 switch (ins->opcode) {
4986 d.vald = *(double*)ins->inst_p0;
4987 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
4992 /* We load the r8 value */
4993 d.vald = *(float*)ins->inst_p0;
4994 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
4998 ins->opcode = OP_LMOVE;
5001 ins->opcode = OP_MOVE;
5002 ins->sreg1 = ins->sreg1 + 1;
5005 ins->opcode = OP_MOVE;
5006 ins->sreg1 = ins->sreg1 + 2;
5009 int reg = ins->sreg1;
5011 ins->opcode = OP_SETLRET;
5013 ins->sreg1 = reg + 1;
5014 ins->sreg2 = reg + 2;
5017 case OP_LOADR8_MEMBASE:
5018 ins->opcode = OP_LOADI8_MEMBASE;
5020 case OP_STORER8_MEMBASE_REG:
5021 ins->opcode = OP_STOREI8_MEMBASE_REG;
5023 case OP_STORER4_MEMBASE_REG: {
5024 MonoInst *iargs [2];
5027 /* Arg 1 is the double value */
5028 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
5029 iargs [0]->dreg = ins->sreg1;
5031 /* Arg 2 is the address to store to */
5032 addr_reg = mono_alloc_preg (cfg);
5033 EMIT_NEW_BIALU_IMM (cfg, iargs [1], OP_PADD_IMM, addr_reg, ins->inst_destbasereg, ins->inst_offset);
5034 mono_emit_jit_icall (cfg, mono_fstore_r4, iargs);
5038 case OP_LOADR4_MEMBASE: {
5039 MonoInst *iargs [1];
5043 addr_reg = mono_alloc_preg (cfg);
5044 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, addr_reg, ins->inst_basereg, ins->inst_offset);
5045 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
5046 conv->dreg = ins->dreg;
5051 case OP_FCALL_MEMBASE: {
5052 MonoCallInst *call = (MonoCallInst*)ins;
5053 if (call->signature->ret->type == MONO_TYPE_R4) {
5054 MonoCallInst *call2;
5055 MonoInst *iargs [1];
5058 /* Convert the call into a call returning an int */
5059 MONO_INST_NEW_CALL (cfg, call2, OP_CALL);
5060 memcpy (call2, call, sizeof (MonoCallInst));
5061 switch (ins->opcode) {
5063 call2->inst.opcode = OP_CALL;
5066 call2->inst.opcode = OP_CALL_REG;
5068 case OP_FCALL_MEMBASE:
5069 call2->inst.opcode = OP_CALL_MEMBASE;
5072 g_assert_not_reached ();
5074 call2->inst.dreg = mono_alloc_ireg (cfg);
5075 MONO_ADD_INS (cfg->cbb, (MonoInst*)call2);
5077 /* FIXME: Optimize this */
5079 /* Emit an r4->r8 conversion */
5080 EMIT_NEW_VARLOADA_VREG (cfg, iargs [0], call2->inst.dreg, &mono_defaults.int32_class->byval_arg);
5081 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
5082 conv->dreg = ins->dreg;
5084 switch (ins->opcode) {
5086 ins->opcode = OP_LCALL;
5089 ins->opcode = OP_LCALL_REG;
5091 case OP_FCALL_MEMBASE:
5092 ins->opcode = OP_LCALL_MEMBASE;
5095 g_assert_not_reached ();
5101 MonoJitICallInfo *info;
5102 MonoInst *iargs [2];
5103 MonoInst *call, *cmp, *br;
5105 /* Convert fcompare+fbcc to icall+icompare+beq */
5107 info = mono_find_jit_opcode_emulation (ins->next->opcode);
5110 /* Create dummy MonoInst's for the arguments */
5111 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
5112 iargs [0]->dreg = ins->sreg1;
5113 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
5114 iargs [1]->dreg = ins->sreg2;
5116 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
5118 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE_IMM);
5119 cmp->sreg1 = call->dreg;
5121 MONO_ADD_INS (cfg->cbb, cmp);
5123 MONO_INST_NEW (cfg, br, OP_IBNE_UN);
5124 br->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * 2);
5125 br->inst_true_bb = ins->next->inst_true_bb;
5126 br->inst_false_bb = ins->next->inst_false_bb;
5127 MONO_ADD_INS (cfg->cbb, br);
5129 /* The call sequence might include fp ins */
5132 /* Skip fbcc or fccc */
5133 NULLIFY_INS (ins->next);
5141 MonoJitICallInfo *info;
5142 MonoInst *iargs [2];
5145 /* Convert fccc to icall+icompare+iceq */
5147 info = mono_find_jit_opcode_emulation (ins->opcode);
5150 /* Create dummy MonoInst's for the arguments */
5151 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
5152 iargs [0]->dreg = ins->sreg1;
5153 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
5154 iargs [1]->dreg = ins->sreg2;
5156 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
5158 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, call->dreg, 1);
5159 MONO_EMIT_NEW_UNALU (cfg, OP_ICEQ, ins->dreg, -1);
5161 /* The call sequence might include fp ins */
5166 MonoInst *iargs [2];
5167 MonoInst *call, *cmp;
5169 /* Convert to icall+icompare+cond_exc+move */
5171 /* Create dummy MonoInst's for the arguments */
5172 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
5173 iargs [0]->dreg = ins->sreg1;
5175 call = mono_emit_jit_icall (cfg, mono_isfinite, iargs);
5177 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE_IMM);
5178 cmp->sreg1 = call->dreg;
5180 MONO_ADD_INS (cfg->cbb, cmp);
5182 MONO_EMIT_NEW_COND_EXC (cfg, INE_UN, "ArithmeticException");
5184 /* Do the assignment if the value is finite */
5185 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, ins->dreg, ins->sreg1);
5191 if (spec [MONO_INST_SRC1] == 'f' || spec [MONO_INST_SRC2] == 'f' || spec [MONO_INST_DEST] == 'f') {
5192 mono_print_ins (ins);
5193 g_assert_not_reached ();
5198 g_assert (cfg->cbb == first_bb);
5200 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
5201 /* Replace the original instruction with the new code sequence */
5203 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
5204 first_bb->code = first_bb->last_ins = NULL;
5205 first_bb->in_count = first_bb->out_count = 0;
5206 cfg->cbb = first_bb;
5213 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER HANDLE-SOFT-FLOAT ");
5216 mono_decompose_long_opts (cfg);
5222 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5225 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5226 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5227 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5228 /* Optimize reg-reg moves away */
5230 * Can't optimize other opcodes, since sp[0] might point to
5231 * the last ins of a decomposed opcode.
5233 sp [0]->dreg = (cfg)->locals [n]->dreg;
5235 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5240 * ldloca inhibits many optimizations so try to get rid of it in common
5243 static inline unsigned char *
5244 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5253 local = read16 (ip + 2);
5257 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5258 gboolean skip = FALSE;
5260 /* From the INITOBJ case */
5261 token = read32 (ip + 2);
5262 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5263 CHECK_TYPELOAD (klass);
5264 if (generic_class_is_reference_type (cfg, klass)) {
5265 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5266 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5267 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5268 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5269 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5282 is_exception_class (MonoClass *class)
5285 if (class == mono_defaults.exception_class)
5287 class = class->parent;
5293 * mono_method_to_ir:
5295 * Translate the .net IL into linear IR.
5298 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5299 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5300 guint inline_offset, gboolean is_virtual_call)
5302 MonoInst *ins, **sp, **stack_start;
5303 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5304 MonoMethod *cmethod, *method_definition;
5305 MonoInst **arg_array;
5306 MonoMethodHeader *header;
5308 guint32 token, ins_flag;
5310 MonoClass *constrained_call = NULL;
5311 unsigned char *ip, *end, *target, *err_pos;
5312 static double r8_0 = 0.0;
5313 MonoMethodSignature *sig;
5314 MonoGenericContext *generic_context = NULL;
5315 MonoGenericContainer *generic_container = NULL;
5316 MonoType **param_types;
5317 int i, n, start_new_bblock, dreg;
5318 int num_calls = 0, inline_costs = 0;
5319 int breakpoint_id = 0;
5321 MonoBoolean security, pinvoke;
5322 MonoSecurityManager* secman = NULL;
5323 MonoDeclSecurityActions actions;
5324 GSList *class_inits = NULL;
5325 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5327 gboolean init_locals;
5329 /* serialization and xdomain stuff may need access to private fields and methods */
5330 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5331 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5332 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5333 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5334 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5335 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5337 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5339 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5340 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5341 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5342 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5344 image = method->klass->image;
5345 header = mono_method_get_header (method);
5346 generic_container = mono_method_get_generic_container (method);
5347 sig = mono_method_signature (method);
5348 num_args = sig->hasthis + sig->param_count;
5349 ip = (unsigned char*)header->code;
5350 cfg->cil_start = ip;
5351 end = ip + header->code_size;
5352 mono_jit_stats.cil_code_size += header->code_size;
5353 init_locals = header->init_locals;
5356 * Methods without init_locals set could cause asserts in various passes
5361 method_definition = method;
5362 while (method_definition->is_inflated) {
5363 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5364 method_definition = imethod->declaring;
5367 /* SkipVerification is not allowed if core-clr is enabled */
5368 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5370 dont_verify_stloc = TRUE;
5373 if (!dont_verify && mini_method_verify (cfg, method_definition))
5374 goto exception_exit;
5376 if (mono_debug_using_mono_debugger ())
5377 cfg->keep_cil_nops = TRUE;
5379 if (sig->is_inflated)
5380 generic_context = mono_method_get_context (method);
5381 else if (generic_container)
5382 generic_context = &generic_container->context;
5383 cfg->generic_context = generic_context;
5385 if (!cfg->generic_sharing_context)
5386 g_assert (!sig->has_type_parameters);
5388 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5389 g_assert (method->is_inflated);
5390 g_assert (mono_method_get_context (method)->method_inst);
5392 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5393 g_assert (sig->generic_param_count);
5395 if (cfg->method == method) {
5396 cfg->real_offset = 0;
5398 cfg->real_offset = inline_offset;
5401 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5402 cfg->cil_offset_to_bb_len = header->code_size;
5404 cfg->current_method = method;
5406 if (cfg->verbose_level > 2)
5407 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5409 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5411 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5412 for (n = 0; n < sig->param_count; ++n)
5413 param_types [n + sig->hasthis] = sig->params [n];
5414 cfg->arg_types = param_types;
5416 dont_inline = g_list_prepend (dont_inline, method);
5417 if (cfg->method == method) {
5419 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5420 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5423 NEW_BBLOCK (cfg, start_bblock);
5424 cfg->bb_entry = start_bblock;
5425 start_bblock->cil_code = NULL;
5426 start_bblock->cil_length = 0;
5429 NEW_BBLOCK (cfg, end_bblock);
5430 cfg->bb_exit = end_bblock;
5431 end_bblock->cil_code = NULL;
5432 end_bblock->cil_length = 0;
5433 g_assert (cfg->num_bblocks == 2);
5435 arg_array = cfg->args;
5437 if (header->num_clauses) {
5438 cfg->spvars = g_hash_table_new (NULL, NULL);
5439 cfg->exvars = g_hash_table_new (NULL, NULL);
5441 /* handle exception clauses */
5442 for (i = 0; i < header->num_clauses; ++i) {
5443 MonoBasicBlock *try_bb;
5444 MonoExceptionClause *clause = &header->clauses [i];
5445 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5446 try_bb->real_offset = clause->try_offset;
5447 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5448 tblock->real_offset = clause->handler_offset;
5449 tblock->flags |= BB_EXCEPTION_HANDLER;
5451 link_bblock (cfg, try_bb, tblock);
5453 if (*(ip + clause->handler_offset) == CEE_POP)
5454 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5456 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5457 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5458 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5459 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5460 MONO_ADD_INS (tblock, ins);
5462 /* todo: is a fault block unsafe to optimize? */
5463 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5464 tblock->flags |= BB_EXCEPTION_UNSAFE;
5468 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5470 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5472 /* catch and filter blocks get the exception object on the stack */
5473 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5474 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5475 MonoInst *dummy_use;
5477 /* mostly like handle_stack_args (), but just sets the input args */
5478 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5479 tblock->in_scount = 1;
5480 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5481 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5484 * Add a dummy use for the exvar so its liveness info will be
5488 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5490 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5491 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5492 tblock->flags |= BB_EXCEPTION_HANDLER;
5493 tblock->real_offset = clause->data.filter_offset;
5494 tblock->in_scount = 1;
5495 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5496 /* The filter block shares the exvar with the handler block */
5497 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5498 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5499 MONO_ADD_INS (tblock, ins);
5503 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5504 clause->data.catch_class &&
5505 cfg->generic_sharing_context &&
5506 mono_class_check_context_used (clause->data.catch_class)) {
5508 * In shared generic code with catch
5509 * clauses containing type variables
5510 * the exception handling code has to
5511 * be able to get to the rgctx.
5512 * Therefore we have to make sure that
5513 * the vtable/mrgctx argument (for
5514 * static or generic methods) or the
5515 * "this" argument (for non-static
5516 * methods) are live.
5518 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5519 mini_method_get_context (method)->method_inst ||
5520 method->klass->valuetype) {
5521 mono_get_vtable_var (cfg);
5523 MonoInst *dummy_use;
5525 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5530 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5531 cfg->cbb = start_bblock;
5532 cfg->args = arg_array;
5533 mono_save_args (cfg, sig, inline_args);
5536 /* FIRST CODE BLOCK */
5537 NEW_BBLOCK (cfg, bblock);
5538 bblock->cil_code = ip;
5542 ADD_BBLOCK (cfg, bblock);
5544 if (cfg->method == method) {
5545 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5546 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5547 MONO_INST_NEW (cfg, ins, OP_BREAK);
5548 MONO_ADD_INS (bblock, ins);
5552 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5553 secman = mono_security_manager_get_methods ();
5555 security = (secman && mono_method_has_declsec (method));
5556 /* at this point having security doesn't mean we have any code to generate */
5557 if (security && (cfg->method == method)) {
5558 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5559 * And we do not want to enter the next section (with allocation) if we
5560 * have nothing to generate */
5561 security = mono_declsec_get_demands (method, &actions);
5564 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5565 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5567 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5568 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5569 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5571 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5572 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5576 mono_custom_attrs_free (custom);
5579 custom = mono_custom_attrs_from_class (wrapped->klass);
5580 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5584 mono_custom_attrs_free (custom);
5587 /* not a P/Invoke after all */
5592 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5593 /* we use a separate basic block for the initialization code */
5594 NEW_BBLOCK (cfg, init_localsbb);
5595 cfg->bb_init = init_localsbb;
5596 init_localsbb->real_offset = cfg->real_offset;
5597 start_bblock->next_bb = init_localsbb;
5598 init_localsbb->next_bb = bblock;
5599 link_bblock (cfg, start_bblock, init_localsbb);
5600 link_bblock (cfg, init_localsbb, bblock);
5602 cfg->cbb = init_localsbb;
5604 start_bblock->next_bb = bblock;
5605 link_bblock (cfg, start_bblock, bblock);
5608 /* at this point we know, if security is TRUE, that some code needs to be generated */
5609 if (security && (cfg->method == method)) {
5612 mono_jit_stats.cas_demand_generation++;
5614 if (actions.demand.blob) {
5615 /* Add code for SecurityAction.Demand */
5616 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5617 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5618 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5619 mono_emit_method_call (cfg, secman->demand, args, NULL);
5621 if (actions.noncasdemand.blob) {
5622 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5623 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5624 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5625 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5626 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5627 mono_emit_method_call (cfg, secman->demand, args, NULL);
5629 if (actions.demandchoice.blob) {
5630 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5631 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5632 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5633 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5634 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5638 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5640 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5643 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5644 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5645 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5646 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5647 if (!(method->klass && method->klass->image &&
5648 mono_security_core_clr_is_platform_image (method->klass->image))) {
5649 emit_throw_method_access_exception (cfg, method, wrapped, bblock, ip);
5655 if (header->code_size == 0)
5658 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5663 if (cfg->method == method)
5664 mono_debug_init_method (cfg, bblock, breakpoint_id);
5666 for (n = 0; n < header->num_locals; ++n) {
5667 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5672 /* We force the vtable variable here for all shared methods
5673 for the possibility that they might show up in a stack
5674 trace where their exact instantiation is needed. */
5675 if (cfg->generic_sharing_context && method == cfg->method) {
5676 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5677 mini_method_get_context (method)->method_inst ||
5678 method->klass->valuetype) {
5679 mono_get_vtable_var (cfg);
5681 /* FIXME: Is there a better way to do this?
5682 We need the variable live for the duration
5683 of the whole method. */
5684 cfg->args [0]->flags |= MONO_INST_INDIRECT;
5688 /* add a check for this != NULL to inlined methods */
5689 if (is_virtual_call) {
5692 NEW_ARGLOAD (cfg, arg_ins, 0);
5693 MONO_ADD_INS (cfg->cbb, arg_ins);
5694 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
5695 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, arg_ins->dreg);
5696 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, arg_ins->dreg);
5699 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5700 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5703 start_new_bblock = 0;
5707 if (cfg->method == method)
5708 cfg->real_offset = ip - header->code;
5710 cfg->real_offset = inline_offset;
5715 if (start_new_bblock) {
5716 bblock->cil_length = ip - bblock->cil_code;
5717 if (start_new_bblock == 2) {
5718 g_assert (ip == tblock->cil_code);
5720 GET_BBLOCK (cfg, tblock, ip);
5722 bblock->next_bb = tblock;
5725 start_new_bblock = 0;
5726 for (i = 0; i < bblock->in_scount; ++i) {
5727 if (cfg->verbose_level > 3)
5728 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5729 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5733 g_slist_free (class_inits);
5736 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5737 link_bblock (cfg, bblock, tblock);
5738 if (sp != stack_start) {
5739 handle_stack_args (cfg, stack_start, sp - stack_start);
5741 CHECK_UNVERIFIABLE (cfg);
5743 bblock->next_bb = tblock;
5746 for (i = 0; i < bblock->in_scount; ++i) {
5747 if (cfg->verbose_level > 3)
5748 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5749 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5752 g_slist_free (class_inits);
5757 bblock->real_offset = cfg->real_offset;
5759 if ((cfg->method == method) && cfg->coverage_info) {
5760 guint32 cil_offset = ip - header->code;
5761 cfg->coverage_info->data [cil_offset].cil_code = ip;
5763 /* TODO: Use an increment here */
5764 #if defined(TARGET_X86)
5765 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5766 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5768 MONO_ADD_INS (cfg->cbb, ins);
5770 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5771 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5775 if (cfg->verbose_level > 3)
5776 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5780 if (cfg->keep_cil_nops)
5781 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
5783 MONO_INST_NEW (cfg, ins, OP_NOP);
5785 MONO_ADD_INS (bblock, ins);
5788 MONO_INST_NEW (cfg, ins, OP_BREAK);
5790 MONO_ADD_INS (bblock, ins);
5796 CHECK_STACK_OVF (1);
5797 n = (*ip)-CEE_LDARG_0;
5799 EMIT_NEW_ARGLOAD (cfg, ins, n);
5807 CHECK_STACK_OVF (1);
5808 n = (*ip)-CEE_LDLOC_0;
5810 EMIT_NEW_LOCLOAD (cfg, ins, n);
5819 n = (*ip)-CEE_STLOC_0;
5822 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5824 emit_stloc_ir (cfg, sp, header, n);
5831 CHECK_STACK_OVF (1);
5834 EMIT_NEW_ARGLOAD (cfg, ins, n);
5840 CHECK_STACK_OVF (1);
5843 NEW_ARGLOADA (cfg, ins, n);
5844 MONO_ADD_INS (cfg->cbb, ins);
5854 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5856 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5861 CHECK_STACK_OVF (1);
5864 EMIT_NEW_LOCLOAD (cfg, ins, n);
5868 case CEE_LDLOCA_S: {
5869 unsigned char *tmp_ip;
5871 CHECK_STACK_OVF (1);
5872 CHECK_LOCAL (ip [1]);
5874 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
5880 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5889 CHECK_LOCAL (ip [1]);
5890 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5892 emit_stloc_ir (cfg, sp, header, ip [1]);
5897 CHECK_STACK_OVF (1);
5898 EMIT_NEW_PCONST (cfg, ins, NULL);
5899 ins->type = STACK_OBJ;
5904 CHECK_STACK_OVF (1);
5905 EMIT_NEW_ICONST (cfg, ins, -1);
5918 CHECK_STACK_OVF (1);
5919 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
5925 CHECK_STACK_OVF (1);
5927 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
5933 CHECK_STACK_OVF (1);
5934 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
5940 CHECK_STACK_OVF (1);
5941 MONO_INST_NEW (cfg, ins, OP_I8CONST);
5942 ins->type = STACK_I8;
5943 ins->dreg = alloc_dreg (cfg, STACK_I8);
5945 ins->inst_l = (gint64)read64 (ip);
5946 MONO_ADD_INS (bblock, ins);
5952 gboolean use_aotconst = FALSE;
5954 #ifdef TARGET_POWERPC
5955 /* FIXME: Clean this up */
5956 if (cfg->compile_aot)
5957 use_aotconst = TRUE;
5960 /* FIXME: we should really allocate this only late in the compilation process */
5961 f = mono_domain_alloc (cfg->domain, sizeof (float));
5963 CHECK_STACK_OVF (1);
5969 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
5971 dreg = alloc_freg (cfg);
5972 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
5973 ins->type = STACK_R8;
5975 MONO_INST_NEW (cfg, ins, OP_R4CONST);
5976 ins->type = STACK_R8;
5977 ins->dreg = alloc_dreg (cfg, STACK_R8);
5979 MONO_ADD_INS (bblock, ins);
5989 gboolean use_aotconst = FALSE;
5991 #ifdef TARGET_POWERPC
5992 /* FIXME: Clean this up */
5993 if (cfg->compile_aot)
5994 use_aotconst = TRUE;
5997 /* FIXME: we should really allocate this only late in the compilation process */
5998 d = mono_domain_alloc (cfg->domain, sizeof (double));
6000 CHECK_STACK_OVF (1);
6006 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
6008 dreg = alloc_freg (cfg);
6009 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
6010 ins->type = STACK_R8;
6012 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6013 ins->type = STACK_R8;
6014 ins->dreg = alloc_dreg (cfg, STACK_R8);
6016 MONO_ADD_INS (bblock, ins);
6025 MonoInst *temp, *store;
6027 CHECK_STACK_OVF (1);
6031 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
6032 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
6034 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6037 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6050 if (sp [0]->type == STACK_R8)
6051 /* we need to pop the value from the x86 FP stack */
6052 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
6061 if (stack_start != sp)
6063 token = read32 (ip + 1);
6064 /* FIXME: check the signature matches */
6065 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6070 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
6071 GENERIC_SHARING_FAILURE (CEE_JMP);
6073 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
6074 CHECK_CFG_EXCEPTION;
6076 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6078 MonoMethodSignature *fsig = mono_method_signature (cmethod);
6081 /* Handle tail calls similarly to calls */
6082 n = fsig->param_count + fsig->hasthis;
6084 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
6085 call->method = cmethod;
6086 call->tail_call = TRUE;
6087 call->signature = mono_method_signature (cmethod);
6088 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
6089 call->inst.inst_p0 = cmethod;
6090 for (i = 0; i < n; ++i)
6091 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
6093 mono_arch_emit_call (cfg, call);
6094 MONO_ADD_INS (bblock, (MonoInst*)call);
6097 for (i = 0; i < num_args; ++i)
6098 /* Prevent arguments from being optimized away */
6099 arg_array [i]->flags |= MONO_INST_VOLATILE;
6101 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6102 ins = (MonoInst*)call;
6103 ins->inst_p0 = cmethod;
6104 MONO_ADD_INS (bblock, ins);
6108 start_new_bblock = 1;
6113 case CEE_CALLVIRT: {
6114 MonoInst *addr = NULL;
6115 MonoMethodSignature *fsig = NULL;
6117 int virtual = *ip == CEE_CALLVIRT;
6118 int calli = *ip == CEE_CALLI;
6119 gboolean pass_imt_from_rgctx = FALSE;
6120 MonoInst *imt_arg = NULL;
6121 gboolean pass_vtable = FALSE;
6122 gboolean pass_mrgctx = FALSE;
6123 MonoInst *vtable_arg = NULL;
6124 gboolean check_this = FALSE;
6125 gboolean supported_tail_call = FALSE;
6128 token = read32 (ip + 1);
6135 if (method->wrapper_type != MONO_WRAPPER_NONE)
6136 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6138 fsig = mono_metadata_parse_signature (image, token);
6140 n = fsig->param_count + fsig->hasthis;
6142 MonoMethod *cil_method;
6144 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6145 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6146 cil_method = cmethod;
6147 } else if (constrained_call) {
6148 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6150 * This is needed since get_method_constrained can't find
6151 * the method in klass representing a type var.
6152 * The type var is guaranteed to be a reference type in this
6155 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6156 cil_method = cmethod;
6157 g_assert (!cmethod->klass->valuetype);
6159 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6162 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6163 cil_method = cmethod;
6168 if (!dont_verify && !cfg->skip_visibility) {
6169 MonoMethod *target_method = cil_method;
6170 if (method->is_inflated) {
6171 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6173 if (!mono_method_can_access_method (method_definition, target_method) &&
6174 !mono_method_can_access_method (method, cil_method))
6175 METHOD_ACCESS_FAILURE;
6178 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6179 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6181 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6182 /* MS.NET seems to silently convert this to a callvirt */
6185 if (!cmethod->klass->inited)
6186 if (!mono_class_init (cmethod->klass))
6189 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6190 mini_class_is_system_array (cmethod->klass)) {
6191 array_rank = cmethod->klass->rank;
6192 fsig = mono_method_signature (cmethod);
6194 if (mono_method_signature (cmethod)->pinvoke) {
6195 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6196 check_for_pending_exc, FALSE);
6197 fsig = mono_method_signature (wrapper);
6198 } else if (constrained_call) {
6199 fsig = mono_method_signature (cmethod);
6201 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6205 mono_save_token_info (cfg, image, token, cil_method);
6207 n = fsig->param_count + fsig->hasthis;
6209 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6210 if (check_linkdemand (cfg, method, cmethod))
6212 CHECK_CFG_EXCEPTION;
6215 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6216 g_assert_not_reached ();
6219 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6222 if (!cfg->generic_sharing_context && cmethod)
6223 g_assert (!mono_method_check_context_used (cmethod));
6227 //g_assert (!virtual || fsig->hasthis);
6231 if (constrained_call) {
6233 * We have the `constrained.' prefix opcode.
6235 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6239 * The type parameter is instantiated as a valuetype,
6240 * but that type doesn't override the method we're
6241 * calling, so we need to box `this'.
6243 dreg = alloc_dreg (cfg, STACK_VTYPE);
6244 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADV_MEMBASE, dreg, sp [0]->dreg, 0);
6245 ins->klass = constrained_call;
6246 sp [0] = handle_box (cfg, ins, constrained_call);
6247 } else if (!constrained_call->valuetype) {
6248 int dreg = alloc_preg (cfg);
6251 * The type parameter is instantiated as a reference
6252 * type. We have a managed pointer on the stack, so
6253 * we need to dereference it here.
6255 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6256 ins->type = STACK_OBJ;
6258 } else if (cmethod->klass->valuetype)
6260 constrained_call = NULL;
6263 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6267 * If the callee is a shared method, then its static cctor
6268 * might not get called after the call was patched.
6270 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6271 emit_generic_class_init (cfg, cmethod->klass);
6274 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6275 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6276 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6277 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6278 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6281 * Pass vtable iff target method might
6282 * be shared, which means that sharing
6283 * is enabled for its class and its
6284 * context is sharable (and it's not a
6287 if (sharing_enabled && context_sharable &&
6288 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6292 if (cmethod && mini_method_get_context (cmethod) &&
6293 mini_method_get_context (cmethod)->method_inst) {
6294 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6295 MonoGenericContext *context = mini_method_get_context (cmethod);
6296 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6298 g_assert (!pass_vtable);
6300 if (sharing_enabled && context_sharable)
6304 if (cfg->generic_sharing_context && cmethod) {
6305 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6307 context_used = mono_method_check_context_used (cmethod);
6309 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6310 /* Generic method interface
6311 calls are resolved via a
6312 helper function and don't
6314 if (!cmethod_context || !cmethod_context->method_inst)
6315 pass_imt_from_rgctx = TRUE;
6319 * If a shared method calls another
6320 * shared method then the caller must
6321 * have a generic sharing context
6322 * because the magic trampoline
6323 * requires it. FIXME: We shouldn't
6324 * have to force the vtable/mrgctx
6325 * variable here. Instead there
6326 * should be a flag in the cfg to
6327 * request a generic sharing context.
6330 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6331 mono_get_vtable_var (cfg);
6336 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6338 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6340 CHECK_TYPELOAD (cmethod->klass);
6341 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6346 g_assert (!vtable_arg);
6349 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6351 EMIT_NEW_METHOD_RGCTX_CONST (cfg, vtable_arg, cmethod);
6354 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6355 MONO_METHOD_IS_FINAL (cmethod)) {
6362 if (pass_imt_from_rgctx) {
6363 g_assert (!pass_vtable);
6366 imt_arg = emit_get_rgctx_method (cfg, context_used,
6367 cmethod, MONO_RGCTX_INFO_METHOD);
6373 MONO_INST_NEW (cfg, check, OP_CHECK_THIS);
6374 check->sreg1 = sp [0]->dreg;
6375 MONO_ADD_INS (cfg->cbb, check);
6378 /* Calling virtual generic methods */
6379 if (cmethod && virtual &&
6380 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6381 !(MONO_METHOD_IS_FINAL (cmethod) &&
6382 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6383 mono_method_signature (cmethod)->generic_param_count) {
6384 MonoInst *this_temp, *this_arg_temp, *store;
6385 MonoInst *iargs [4];
6387 g_assert (mono_method_signature (cmethod)->is_inflated);
6389 /* Prevent inlining of methods that contain indirect calls */
6392 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && !defined(ENABLE_LLVM)
6393 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt) {
6394 g_assert (!imt_arg);
6396 imt_arg = emit_get_rgctx_method (cfg, context_used,
6397 cmethod, MONO_RGCTX_INFO_METHOD);
6400 g_assert (cmethod->is_inflated);
6401 EMIT_NEW_METHODCONST (cfg, imt_arg, cmethod);
6403 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6407 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6408 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6409 MONO_ADD_INS (bblock, store);
6411 /* FIXME: This should be a managed pointer */
6412 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6414 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6416 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6417 cmethod, MONO_RGCTX_INFO_METHOD);
6418 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6419 addr = mono_emit_jit_icall (cfg,
6420 mono_helper_compile_generic_method, iargs);
6422 EMIT_NEW_METHODCONST (cfg, iargs [1], cmethod);
6423 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6424 addr = mono_emit_jit_icall (cfg, mono_helper_compile_generic_method, iargs);
6427 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6429 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6432 if (!MONO_TYPE_IS_VOID (fsig->ret))
6433 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6440 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6441 supported_tail_call = cmethod && MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
6443 supported_tail_call = cmethod && mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6447 /* FIXME: runtime generic context pointer for jumps? */
6448 /* FIXME: handle this for generic sharing eventually */
6449 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) && supported_tail_call) {
6452 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6455 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6456 /* Handle tail calls similarly to calls */
6457 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE);
6459 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6460 call->tail_call = TRUE;
6461 call->method = cmethod;
6462 call->signature = mono_method_signature (cmethod);
6465 * We implement tail calls by storing the actual arguments into the
6466 * argument variables, then emitting a CEE_JMP.
6468 for (i = 0; i < n; ++i) {
6469 /* Prevent argument from being register allocated */
6470 arg_array [i]->flags |= MONO_INST_VOLATILE;
6471 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6475 ins = (MonoInst*)call;
6476 ins->inst_p0 = cmethod;
6477 ins->inst_p1 = arg_array [0];
6478 MONO_ADD_INS (bblock, ins);
6479 link_bblock (cfg, bblock, end_bblock);
6480 start_new_bblock = 1;
6481 /* skip CEE_RET as well */
6487 /* Conversion to a JIT intrinsic */
6488 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6489 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6490 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6501 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6502 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6503 mono_method_check_inlining (cfg, cmethod) &&
6504 !g_list_find (dont_inline, cmethod)) {
6506 gboolean allways = FALSE;
6508 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6509 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6510 /* Prevent inlining of methods that call wrappers */
6512 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6516 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6518 cfg->real_offset += 5;
6521 if (!MONO_TYPE_IS_VOID (fsig->ret))
6522 /* *sp is already set by inline_method */
6525 inline_costs += costs;
6531 inline_costs += 10 * num_calls++;
6533 /* Tail recursion elimination */
6534 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6535 gboolean has_vtargs = FALSE;
6538 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6541 /* keep it simple */
6542 for (i = fsig->param_count - 1; i >= 0; i--) {
6543 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6548 for (i = 0; i < n; ++i)
6549 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6550 MONO_INST_NEW (cfg, ins, OP_BR);
6551 MONO_ADD_INS (bblock, ins);
6552 tblock = start_bblock->out_bb [0];
6553 link_bblock (cfg, bblock, tblock);
6554 ins->inst_target_bb = tblock;
6555 start_new_bblock = 1;
6557 /* skip the CEE_RET, too */
6558 if (ip_in_bb (cfg, bblock, ip + 5))
6568 /* Generic sharing */
6569 /* FIXME: only do this for generic methods if
6570 they are not shared! */
6571 if (context_used && !imt_arg && !array_rank &&
6572 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6573 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6574 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6575 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6578 g_assert (cfg->generic_sharing_context && cmethod);
6582 * We are compiling a call to a
6583 * generic method from shared code,
6584 * which means that we have to look up
6585 * the method in the rgctx and do an
6588 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6591 /* Indirect calls */
6593 g_assert (!imt_arg);
6595 if (*ip == CEE_CALL)
6596 g_assert (context_used);
6597 else if (*ip == CEE_CALLI)
6598 g_assert (!vtable_arg);
6600 /* FIXME: what the hell is this??? */
6601 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6602 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6604 /* Prevent inlining of methods with indirect calls */
6608 #ifdef MONO_ARCH_RGCTX_REG
6610 int rgctx_reg = mono_alloc_preg (cfg);
6612 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6613 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6614 call = (MonoCallInst*)ins;
6615 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
6616 cfg->uses_rgctx_reg = TRUE;
6617 call->rgctx_reg = TRUE;
6622 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6624 * Instead of emitting an indirect call, emit a direct call
6625 * with the contents of the aotconst as the patch info.
6627 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
6629 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6630 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
6633 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6636 if (!MONO_TYPE_IS_VOID (fsig->ret))
6637 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6648 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6649 if (sp [fsig->param_count]->type == STACK_OBJ) {
6650 MonoInst *iargs [2];
6653 iargs [1] = sp [fsig->param_count];
6655 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6658 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6659 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6660 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6661 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6663 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6666 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6667 if (!cmethod->klass->element_class->valuetype && !readonly)
6668 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6671 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6674 g_assert_not_reached ();
6682 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6684 if (!MONO_TYPE_IS_VOID (fsig->ret))
6685 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6695 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6697 } else if (imt_arg) {
6698 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6700 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6703 if (!MONO_TYPE_IS_VOID (fsig->ret))
6704 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6711 if (cfg->method != method) {
6712 /* return from inlined method */
6714 * If in_count == 0, that means the ret is unreachable due to
6715 * being preceeded by a throw. In that case, inline_method () will
6716 * handle setting the return value
6717 * (test case: test_0_inline_throw ()).
6719 if (return_var && cfg->cbb->in_count) {
6723 //g_assert (returnvar != -1);
6724 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6725 cfg->ret_var_set = TRUE;
6729 MonoType *ret_type = mono_method_signature (method)->ret;
6731 g_assert (!return_var);
6734 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6737 if (!cfg->vret_addr) {
6740 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6742 EMIT_NEW_RETLOADA (cfg, ret_addr);
6744 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6745 ins->klass = mono_class_from_mono_type (ret_type);
6748 #ifdef MONO_ARCH_SOFT_FLOAT
6749 if (!ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6750 MonoInst *iargs [1];
6754 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6755 mono_arch_emit_setret (cfg, method, conv);
6757 mono_arch_emit_setret (cfg, method, *sp);
6760 mono_arch_emit_setret (cfg, method, *sp);
6765 if (sp != stack_start)
6767 MONO_INST_NEW (cfg, ins, OP_BR);
6769 ins->inst_target_bb = end_bblock;
6770 MONO_ADD_INS (bblock, ins);
6771 link_bblock (cfg, bblock, end_bblock);
6772 start_new_bblock = 1;
6776 MONO_INST_NEW (cfg, ins, OP_BR);
6778 target = ip + 1 + (signed char)(*ip);
6780 GET_BBLOCK (cfg, tblock, target);
6781 link_bblock (cfg, bblock, tblock);
6782 ins->inst_target_bb = tblock;
6783 if (sp != stack_start) {
6784 handle_stack_args (cfg, stack_start, sp - stack_start);
6786 CHECK_UNVERIFIABLE (cfg);
6788 MONO_ADD_INS (bblock, ins);
6789 start_new_bblock = 1;
6790 inline_costs += BRANCH_COST;
6804 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6806 target = ip + 1 + *(signed char*)ip;
6812 inline_costs += BRANCH_COST;
6816 MONO_INST_NEW (cfg, ins, OP_BR);
6819 target = ip + 4 + (gint32)read32(ip);
6821 GET_BBLOCK (cfg, tblock, target);
6822 link_bblock (cfg, bblock, tblock);
6823 ins->inst_target_bb = tblock;
6824 if (sp != stack_start) {
6825 handle_stack_args (cfg, stack_start, sp - stack_start);
6827 CHECK_UNVERIFIABLE (cfg);
6830 MONO_ADD_INS (bblock, ins);
6832 start_new_bblock = 1;
6833 inline_costs += BRANCH_COST;
6840 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6841 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6842 guint32 opsize = is_short ? 1 : 4;
6844 CHECK_OPSIZE (opsize);
6846 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6849 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6854 GET_BBLOCK (cfg, tblock, target);
6855 link_bblock (cfg, bblock, tblock);
6856 GET_BBLOCK (cfg, tblock, ip);
6857 link_bblock (cfg, bblock, tblock);
6859 if (sp != stack_start) {
6860 handle_stack_args (cfg, stack_start, sp - stack_start);
6861 CHECK_UNVERIFIABLE (cfg);
6864 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6865 cmp->sreg1 = sp [0]->dreg;
6866 type_from_op (cmp, sp [0], NULL);
6869 #if SIZEOF_REGISTER == 4
6870 if (cmp->opcode == OP_LCOMPARE_IMM) {
6871 /* Convert it to OP_LCOMPARE */
6872 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6873 ins->type = STACK_I8;
6874 ins->dreg = alloc_dreg (cfg, STACK_I8);
6876 MONO_ADD_INS (bblock, ins);
6877 cmp->opcode = OP_LCOMPARE;
6878 cmp->sreg2 = ins->dreg;
6881 MONO_ADD_INS (bblock, cmp);
6883 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
6884 type_from_op (ins, sp [0], NULL);
6885 MONO_ADD_INS (bblock, ins);
6886 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
6887 GET_BBLOCK (cfg, tblock, target);
6888 ins->inst_true_bb = tblock;
6889 GET_BBLOCK (cfg, tblock, ip);
6890 ins->inst_false_bb = tblock;
6891 start_new_bblock = 2;
6894 inline_costs += BRANCH_COST;
6909 MONO_INST_NEW (cfg, ins, *ip);
6911 target = ip + 4 + (gint32)read32(ip);
6917 inline_costs += BRANCH_COST;
6921 MonoBasicBlock **targets;
6922 MonoBasicBlock *default_bblock;
6923 MonoJumpInfoBBTable *table;
6924 int offset_reg = alloc_preg (cfg);
6925 int target_reg = alloc_preg (cfg);
6926 int table_reg = alloc_preg (cfg);
6927 int sum_reg = alloc_preg (cfg);
6928 gboolean use_op_switch;
6932 n = read32 (ip + 1);
6935 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
6939 CHECK_OPSIZE (n * sizeof (guint32));
6940 target = ip + n * sizeof (guint32);
6942 GET_BBLOCK (cfg, default_bblock, target);
6944 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
6945 for (i = 0; i < n; ++i) {
6946 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
6947 targets [i] = tblock;
6951 if (sp != stack_start) {
6953 * Link the current bb with the targets as well, so handle_stack_args
6954 * will set their in_stack correctly.
6956 link_bblock (cfg, bblock, default_bblock);
6957 for (i = 0; i < n; ++i)
6958 link_bblock (cfg, bblock, targets [i]);
6960 handle_stack_args (cfg, stack_start, sp - stack_start);
6962 CHECK_UNVERIFIABLE (cfg);
6965 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
6966 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
6969 for (i = 0; i < n; ++i)
6970 link_bblock (cfg, bblock, targets [i]);
6972 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
6973 table->table = targets;
6974 table->table_size = n;
6976 use_op_switch = FALSE;
6978 /* ARM implements SWITCH statements differently */
6979 /* FIXME: Make it use the generic implementation */
6980 if (!cfg->compile_aot)
6981 use_op_switch = TRUE;
6984 if (COMPILE_LLVM (cfg))
6985 use_op_switch = TRUE;
6987 cfg->cbb->has_jump_table = 1;
6989 if (use_op_switch) {
6990 MONO_INST_NEW (cfg, ins, OP_SWITCH);
6991 ins->sreg1 = src1->dreg;
6992 ins->inst_p0 = table;
6993 ins->inst_many_bb = targets;
6994 ins->klass = GUINT_TO_POINTER (n);
6995 MONO_ADD_INS (cfg->cbb, ins);
6997 if (sizeof (gpointer) == 8)
6998 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
7000 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
7002 #if SIZEOF_REGISTER == 8
7003 /* The upper word might not be zero, and we add it to a 64 bit address later */
7004 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
7007 if (cfg->compile_aot) {
7008 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
7010 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
7011 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
7012 ins->inst_p0 = table;
7013 ins->dreg = table_reg;
7014 MONO_ADD_INS (cfg->cbb, ins);
7017 /* FIXME: Use load_memindex */
7018 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
7019 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
7020 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
7022 start_new_bblock = 1;
7023 inline_costs += (BRANCH_COST * 2);
7043 dreg = alloc_freg (cfg);
7046 dreg = alloc_lreg (cfg);
7049 dreg = alloc_preg (cfg);
7052 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
7053 ins->type = ldind_type [*ip - CEE_LDIND_I1];
7054 ins->flags |= ins_flag;
7056 MONO_ADD_INS (bblock, ins);
7071 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
7072 ins->flags |= ins_flag;
7074 MONO_ADD_INS (bblock, ins);
7076 #if HAVE_WRITE_BARRIERS
7077 if (*ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0))) {
7078 /* insert call to write barrier */
7079 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
7080 mono_emit_method_call (cfg, write_barrier, sp, NULL);
7091 MONO_INST_NEW (cfg, ins, (*ip));
7093 ins->sreg1 = sp [0]->dreg;
7094 ins->sreg2 = sp [1]->dreg;
7095 type_from_op (ins, sp [0], sp [1]);
7097 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7099 /* Use the immediate opcodes if possible */
7100 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
7101 int imm_opcode = mono_op_to_op_imm (ins->opcode);
7102 if (imm_opcode != -1) {
7103 ins->opcode = imm_opcode;
7104 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7107 sp [1]->opcode = OP_NOP;
7111 MONO_ADD_INS ((cfg)->cbb, (ins));
7113 *sp++ = mono_decompose_opcode (cfg, ins);
7130 MONO_INST_NEW (cfg, ins, (*ip));
7132 ins->sreg1 = sp [0]->dreg;
7133 ins->sreg2 = sp [1]->dreg;
7134 type_from_op (ins, sp [0], sp [1]);
7136 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7137 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7139 /* FIXME: Pass opcode to is_inst_imm */
7141 /* Use the immediate opcodes if possible */
7142 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7145 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7146 if (imm_opcode != -1) {
7147 ins->opcode = imm_opcode;
7148 if (sp [1]->opcode == OP_I8CONST) {
7149 #if SIZEOF_REGISTER == 8
7150 ins->inst_imm = sp [1]->inst_l;
7152 ins->inst_ls_word = sp [1]->inst_ls_word;
7153 ins->inst_ms_word = sp [1]->inst_ms_word;
7157 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7160 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7161 if (sp [1]->next == NULL)
7162 sp [1]->opcode = OP_NOP;
7165 MONO_ADD_INS ((cfg)->cbb, (ins));
7167 *sp++ = mono_decompose_opcode (cfg, ins);
7180 case CEE_CONV_OVF_I8:
7181 case CEE_CONV_OVF_U8:
7185 /* Special case this earlier so we have long constants in the IR */
7186 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7187 int data = sp [-1]->inst_c0;
7188 sp [-1]->opcode = OP_I8CONST;
7189 sp [-1]->type = STACK_I8;
7190 #if SIZEOF_REGISTER == 8
7191 if ((*ip) == CEE_CONV_U8)
7192 sp [-1]->inst_c0 = (guint32)data;
7194 sp [-1]->inst_c0 = data;
7196 sp [-1]->inst_ls_word = data;
7197 if ((*ip) == CEE_CONV_U8)
7198 sp [-1]->inst_ms_word = 0;
7200 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7202 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7209 case CEE_CONV_OVF_I4:
7210 case CEE_CONV_OVF_I1:
7211 case CEE_CONV_OVF_I2:
7212 case CEE_CONV_OVF_I:
7213 case CEE_CONV_OVF_U:
7216 if (sp [-1]->type == STACK_R8) {
7217 ADD_UNOP (CEE_CONV_OVF_I8);
7224 case CEE_CONV_OVF_U1:
7225 case CEE_CONV_OVF_U2:
7226 case CEE_CONV_OVF_U4:
7229 if (sp [-1]->type == STACK_R8) {
7230 ADD_UNOP (CEE_CONV_OVF_U8);
7237 case CEE_CONV_OVF_I1_UN:
7238 case CEE_CONV_OVF_I2_UN:
7239 case CEE_CONV_OVF_I4_UN:
7240 case CEE_CONV_OVF_I8_UN:
7241 case CEE_CONV_OVF_U1_UN:
7242 case CEE_CONV_OVF_U2_UN:
7243 case CEE_CONV_OVF_U4_UN:
7244 case CEE_CONV_OVF_U8_UN:
7245 case CEE_CONV_OVF_I_UN:
7246 case CEE_CONV_OVF_U_UN:
7256 case CEE_ADD_OVF_UN:
7258 case CEE_MUL_OVF_UN:
7260 case CEE_SUB_OVF_UN:
7268 token = read32 (ip + 1);
7269 klass = mini_get_class (method, token, generic_context);
7270 CHECK_TYPELOAD (klass);
7272 if (generic_class_is_reference_type (cfg, klass)) {
7273 MonoInst *store, *load;
7274 int dreg = alloc_preg (cfg);
7276 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7277 load->flags |= ins_flag;
7278 MONO_ADD_INS (cfg->cbb, load);
7280 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7281 store->flags |= ins_flag;
7282 MONO_ADD_INS (cfg->cbb, store);
7284 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7296 token = read32 (ip + 1);
7297 klass = mini_get_class (method, token, generic_context);
7298 CHECK_TYPELOAD (klass);
7300 /* Optimize the common ldobj+stloc combination */
7310 loc_index = ip [5] - CEE_STLOC_0;
7317 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7318 CHECK_LOCAL (loc_index);
7320 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7321 ins->dreg = cfg->locals [loc_index]->dreg;
7327 /* Optimize the ldobj+stobj combination */
7328 /* The reference case ends up being a load+store anyway */
7329 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7334 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7341 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7350 CHECK_STACK_OVF (1);
7352 n = read32 (ip + 1);
7354 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7355 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7356 ins->type = STACK_OBJ;
7359 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7360 MonoInst *iargs [1];
7362 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7363 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7365 if (cfg->opt & MONO_OPT_SHARED) {
7366 MonoInst *iargs [3];
7368 if (cfg->compile_aot) {
7369 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7371 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7372 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7373 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7374 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7375 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7377 if (bblock->out_of_line) {
7378 MonoInst *iargs [2];
7380 if (image == mono_defaults.corlib) {
7382 * Avoid relocations in AOT and save some space by using a
7383 * version of helper_ldstr specialized to mscorlib.
7385 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7386 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7388 /* Avoid creating the string object */
7389 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7390 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7391 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7395 if (cfg->compile_aot) {
7396 NEW_LDSTRCONST (cfg, ins, image, n);
7398 MONO_ADD_INS (bblock, ins);
7401 NEW_PCONST (cfg, ins, NULL);
7402 ins->type = STACK_OBJ;
7403 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7405 MONO_ADD_INS (bblock, ins);
7414 MonoInst *iargs [2];
7415 MonoMethodSignature *fsig;
7418 MonoInst *vtable_arg = NULL;
7421 token = read32 (ip + 1);
7422 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7425 fsig = mono_method_get_signature (cmethod, image, token);
7427 mono_save_token_info (cfg, image, token, cmethod);
7429 if (!mono_class_init (cmethod->klass))
7432 if (cfg->generic_sharing_context)
7433 context_used = mono_method_check_context_used (cmethod);
7435 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7436 if (check_linkdemand (cfg, method, cmethod))
7438 CHECK_CFG_EXCEPTION;
7439 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7440 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7443 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7444 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7445 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7447 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7448 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7450 EMIT_NEW_METHOD_RGCTX_CONST (cfg, vtable_arg, cmethod);
7454 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7455 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7457 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7459 CHECK_TYPELOAD (cmethod->klass);
7460 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7465 n = fsig->param_count;
7469 * Generate smaller code for the common newobj <exception> instruction in
7470 * argument checking code.
7472 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7473 is_exception_class (cmethod->klass) && n <= 2 &&
7474 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7475 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7476 MonoInst *iargs [3];
7478 g_assert (!vtable_arg);
7482 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7485 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7489 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7494 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7497 g_assert_not_reached ();
7505 /* move the args to allow room for 'this' in the first position */
7511 /* check_call_signature () requires sp[0] to be set */
7512 this_ins.type = STACK_OBJ;
7514 if (check_call_signature (cfg, fsig, sp))
7519 if (mini_class_is_system_array (cmethod->klass)) {
7520 g_assert (!vtable_arg);
7523 *sp = emit_get_rgctx_method (cfg, context_used,
7524 cmethod, MONO_RGCTX_INFO_METHOD);
7526 EMIT_NEW_METHODCONST (cfg, *sp, cmethod);
7529 /* Avoid varargs in the common case */
7530 if (fsig->param_count == 1)
7531 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7532 else if (fsig->param_count == 2)
7533 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7535 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7536 } else if (cmethod->string_ctor) {
7537 g_assert (!context_used);
7538 g_assert (!vtable_arg);
7539 /* we simply pass a null pointer */
7540 EMIT_NEW_PCONST (cfg, *sp, NULL);
7541 /* now call the string ctor */
7542 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7544 MonoInst* callvirt_this_arg = NULL;
7546 if (cmethod->klass->valuetype) {
7547 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7548 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7549 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7554 * The code generated by mini_emit_virtual_call () expects
7555 * iargs [0] to be a boxed instance, but luckily the vcall
7556 * will be transformed into a normal call there.
7558 } else if (context_used) {
7562 if (cfg->opt & MONO_OPT_SHARED)
7563 rgctx_info = MONO_RGCTX_INFO_KLASS;
7565 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7566 data = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, rgctx_info);
7568 alloc = handle_alloc_from_inst (cfg, cmethod->klass, data, FALSE);
7571 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7573 CHECK_TYPELOAD (cmethod->klass);
7576 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7577 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7578 * As a workaround, we call class cctors before allocating objects.
7580 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7581 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7582 if (cfg->verbose_level > 2)
7583 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7584 class_inits = g_slist_prepend (class_inits, vtable);
7587 alloc = handle_alloc (cfg, cmethod->klass, FALSE);
7592 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7594 /* Now call the actual ctor */
7595 /* Avoid virtual calls to ctors if possible */
7596 if (cmethod->klass->marshalbyref)
7597 callvirt_this_arg = sp [0];
7599 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7600 mono_method_check_inlining (cfg, cmethod) &&
7601 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7602 !g_list_find (dont_inline, cmethod)) {
7605 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7606 cfg->real_offset += 5;
7609 inline_costs += costs - 5;
7612 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7614 } else if (context_used &&
7615 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7616 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7617 MonoInst *cmethod_addr;
7619 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7620 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7622 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7625 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7626 callvirt_this_arg, NULL, vtable_arg);
7630 if (alloc == NULL) {
7632 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7633 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7647 token = read32 (ip + 1);
7648 klass = mini_get_class (method, token, generic_context);
7649 CHECK_TYPELOAD (klass);
7650 if (sp [0]->type != STACK_OBJ)
7653 if (cfg->generic_sharing_context)
7654 context_used = mono_class_check_context_used (klass);
7663 args [1] = emit_get_rgctx_klass (cfg, context_used,
7664 klass, MONO_RGCTX_INFO_KLASS);
7666 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7670 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7671 MonoMethod *mono_castclass;
7672 MonoInst *iargs [1];
7675 mono_castclass = mono_marshal_get_castclass (klass);
7678 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7679 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7680 g_assert (costs > 0);
7683 cfg->real_offset += 5;
7688 inline_costs += costs;
7691 ins = handle_castclass (cfg, klass, *sp);
7701 token = read32 (ip + 1);
7702 klass = mini_get_class (method, token, generic_context);
7703 CHECK_TYPELOAD (klass);
7704 if (sp [0]->type != STACK_OBJ)
7707 if (cfg->generic_sharing_context)
7708 context_used = mono_class_check_context_used (klass);
7717 args [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7719 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7723 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7724 MonoMethod *mono_isinst;
7725 MonoInst *iargs [1];
7728 mono_isinst = mono_marshal_get_isinst (klass);
7731 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7732 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7733 g_assert (costs > 0);
7736 cfg->real_offset += 5;
7741 inline_costs += costs;
7744 ins = handle_isinst (cfg, klass, *sp);
7751 case CEE_UNBOX_ANY: {
7755 token = read32 (ip + 1);
7756 klass = mini_get_class (method, token, generic_context);
7757 CHECK_TYPELOAD (klass);
7759 mono_save_token_info (cfg, image, token, klass);
7761 if (cfg->generic_sharing_context)
7762 context_used = mono_class_check_context_used (klass);
7764 if (generic_class_is_reference_type (cfg, klass)) {
7767 MonoInst *iargs [2];
7772 iargs [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7773 ins = mono_emit_jit_icall (cfg, mono_object_castclass, iargs);
7777 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7778 MonoMethod *mono_castclass;
7779 MonoInst *iargs [1];
7782 mono_castclass = mono_marshal_get_castclass (klass);
7785 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7786 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7788 g_assert (costs > 0);
7791 cfg->real_offset += 5;
7795 inline_costs += costs;
7797 ins = handle_castclass (cfg, klass, *sp);
7805 if (mono_class_is_nullable (klass)) {
7806 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
7813 ins = handle_unbox (cfg, klass, sp, context_used);
7819 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7832 token = read32 (ip + 1);
7833 klass = mini_get_class (method, token, generic_context);
7834 CHECK_TYPELOAD (klass);
7836 mono_save_token_info (cfg, image, token, klass);
7838 if (cfg->generic_sharing_context)
7839 context_used = mono_class_check_context_used (klass);
7841 if (generic_class_is_reference_type (cfg, klass)) {
7847 if (klass == mono_defaults.void_class)
7849 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7851 /* frequent check in generic code: box (struct), brtrue */
7852 if (!mono_class_is_nullable (klass) &&
7853 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) && (ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S)) {
7854 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7856 MONO_INST_NEW (cfg, ins, OP_BR);
7857 if (*ip == CEE_BRTRUE_S) {
7860 target = ip + 1 + (signed char)(*ip);
7865 target = ip + 4 + (gint)(read32 (ip));
7868 GET_BBLOCK (cfg, tblock, target);
7869 link_bblock (cfg, bblock, tblock);
7870 ins->inst_target_bb = tblock;
7871 GET_BBLOCK (cfg, tblock, ip);
7873 * This leads to some inconsistency, since the two bblocks are
7874 * not really connected, but it is needed for handling stack
7875 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
7876 * FIXME: This should only be needed if sp != stack_start, but that
7877 * doesn't work for some reason (test failure in mcs/tests on x86).
7879 link_bblock (cfg, bblock, tblock);
7880 if (sp != stack_start) {
7881 handle_stack_args (cfg, stack_start, sp - stack_start);
7883 CHECK_UNVERIFIABLE (cfg);
7885 MONO_ADD_INS (bblock, ins);
7886 start_new_bblock = 1;
7894 if (cfg->opt & MONO_OPT_SHARED)
7895 rgctx_info = MONO_RGCTX_INFO_KLASS;
7897 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7898 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
7899 *sp++ = handle_box_from_inst (cfg, val, klass, context_used, data);
7901 *sp++ = handle_box (cfg, val, klass);
7912 token = read32 (ip + 1);
7913 klass = mini_get_class (method, token, generic_context);
7914 CHECK_TYPELOAD (klass);
7916 mono_save_token_info (cfg, image, token, klass);
7918 if (cfg->generic_sharing_context)
7919 context_used = mono_class_check_context_used (klass);
7921 if (mono_class_is_nullable (klass)) {
7924 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
7925 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
7929 ins = handle_unbox (cfg, klass, sp, context_used);
7939 MonoClassField *field;
7943 if (*ip == CEE_STFLD) {
7950 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
7952 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
7955 token = read32 (ip + 1);
7956 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7957 field = mono_method_get_wrapper_data (method, token);
7958 klass = field->parent;
7961 field = mono_field_from_token (image, token, &klass, generic_context);
7965 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7966 FIELD_ACCESS_FAILURE;
7967 mono_class_init (klass);
7969 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
7970 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
7971 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
7972 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
7975 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
7976 if (*ip == CEE_STFLD) {
7977 if (target_type_is_incompatible (cfg, field->type, sp [1]))
7979 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7980 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
7981 MonoInst *iargs [5];
7984 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7985 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7986 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
7990 if (cfg->opt & MONO_OPT_INLINE) {
7991 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
7992 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7993 g_assert (costs > 0);
7995 cfg->real_offset += 5;
7998 inline_costs += costs;
8000 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
8005 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
8007 #if HAVE_WRITE_BARRIERS
8008 if (mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
8009 /* insert call to write barrier */
8010 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
8011 MonoInst *iargs [2];
8014 dreg = alloc_preg (cfg);
8015 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8017 mono_emit_method_call (cfg, write_barrier, iargs, NULL);
8021 store->flags |= ins_flag;
8028 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8029 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
8030 MonoInst *iargs [4];
8033 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8034 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8035 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
8036 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8037 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
8038 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8040 g_assert (costs > 0);
8042 cfg->real_offset += 5;
8046 inline_costs += costs;
8048 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
8052 if (sp [0]->type == STACK_VTYPE) {
8055 /* Have to compute the address of the variable */
8057 var = get_vreg_to_inst (cfg, sp [0]->dreg);
8059 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
8061 g_assert (var->klass == klass);
8063 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
8067 if (*ip == CEE_LDFLDA) {
8068 dreg = alloc_preg (cfg);
8070 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8071 ins->klass = mono_class_from_mono_type (field->type);
8072 ins->type = STACK_MP;
8077 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
8078 load->flags |= ins_flag;
8089 MonoClassField *field;
8090 gpointer addr = NULL;
8091 gboolean is_special_static;
8094 token = read32 (ip + 1);
8096 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8097 field = mono_method_get_wrapper_data (method, token);
8098 klass = field->parent;
8101 field = mono_field_from_token (image, token, &klass, generic_context);
8104 mono_class_init (klass);
8105 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8106 FIELD_ACCESS_FAILURE;
8108 /* if the class is Critical then transparent code cannot access it's fields */
8109 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8110 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8113 * We can only support shared generic static
8114 * field access on architectures where the
8115 * trampoline code has been extended to handle
8116 * the generic class init.
8118 #ifndef MONO_ARCH_VTABLE_REG
8119 GENERIC_SHARING_FAILURE (*ip);
8122 if (cfg->generic_sharing_context)
8123 context_used = mono_class_check_context_used (klass);
8125 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
8127 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8128 * to be called here.
8130 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8131 mono_class_vtable (cfg->domain, klass);
8132 CHECK_TYPELOAD (klass);
8134 mono_domain_lock (cfg->domain);
8135 if (cfg->domain->special_static_fields)
8136 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8137 mono_domain_unlock (cfg->domain);
8139 is_special_static = mono_class_field_is_special_static (field);
8141 /* Generate IR to compute the field address */
8143 if ((cfg->opt & MONO_OPT_SHARED) ||
8144 (cfg->compile_aot && is_special_static) ||
8145 (context_used && is_special_static)) {
8146 MonoInst *iargs [2];
8148 g_assert (field->parent);
8149 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8151 iargs [1] = emit_get_rgctx_field (cfg, context_used,
8152 field, MONO_RGCTX_INFO_CLASS_FIELD);
8154 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8156 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8157 } else if (context_used) {
8158 MonoInst *static_data;
8161 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8162 method->klass->name_space, method->klass->name, method->name,
8163 depth, field->offset);
8166 if (mono_class_needs_cctor_run (klass, method)) {
8170 vtable = emit_get_rgctx_klass (cfg, context_used,
8171 klass, MONO_RGCTX_INFO_VTABLE);
8173 // FIXME: This doesn't work since it tries to pass the argument
8174 // in the normal way, instead of using MONO_ARCH_VTABLE_REG
8176 * The vtable pointer is always passed in a register regardless of
8177 * the calling convention, so assign it manually, and make a call
8178 * using a signature without parameters.
8180 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable);
8181 #ifdef MONO_ARCH_VTABLE_REG
8182 mono_call_inst_add_outarg_reg (cfg, call, vtable->dreg, MONO_ARCH_VTABLE_REG, FALSE);
8183 cfg->uses_vtable_reg = TRUE;
8190 * The pointer we're computing here is
8192 * super_info.static_data + field->offset
8194 static_data = emit_get_rgctx_klass (cfg, context_used,
8195 klass, MONO_RGCTX_INFO_STATIC_DATA);
8197 if (field->offset == 0) {
8200 int addr_reg = mono_alloc_preg (cfg);
8201 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8203 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8204 MonoInst *iargs [2];
8206 g_assert (field->parent);
8207 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8208 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8209 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8211 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8213 CHECK_TYPELOAD (klass);
8215 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8216 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8217 if (cfg->verbose_level > 2)
8218 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8219 class_inits = g_slist_prepend (class_inits, vtable);
8221 if (cfg->run_cctors) {
8223 /* This makes so that inline cannot trigger */
8224 /* .cctors: too many apps depend on them */
8225 /* running with a specific order... */
8226 if (! vtable->initialized)
8228 ex = mono_runtime_class_init_full (vtable, FALSE);
8230 set_exception_object (cfg, ex);
8231 goto exception_exit;
8235 addr = (char*)vtable->data + field->offset;
8237 if (cfg->compile_aot)
8238 EMIT_NEW_SFLDACONST (cfg, ins, field);
8240 EMIT_NEW_PCONST (cfg, ins, addr);
8243 * insert call to mono_threads_get_static_data (GPOINTER_TO_UINT (addr))
8244 * This could be later optimized to do just a couple of
8245 * memory dereferences with constant offsets.
8247 MonoInst *iargs [1];
8248 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8249 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8253 /* Generate IR to do the actual load/store operation */
8255 if (*ip == CEE_LDSFLDA) {
8256 ins->klass = mono_class_from_mono_type (field->type);
8257 ins->type = STACK_PTR;
8259 } else if (*ip == CEE_STSFLD) {
8264 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
8265 store->flags |= ins_flag;
8267 gboolean is_const = FALSE;
8268 MonoVTable *vtable = NULL;
8270 if (!context_used) {
8271 vtable = mono_class_vtable (cfg->domain, klass);
8272 CHECK_TYPELOAD (klass);
8274 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8275 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8276 gpointer addr = (char*)vtable->data + field->offset;
8277 int ro_type = field->type->type;
8278 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
8279 ro_type = mono_class_enum_basetype (field->type->data.klass)->type;
8281 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8284 case MONO_TYPE_BOOLEAN:
8286 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8290 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8293 case MONO_TYPE_CHAR:
8295 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8299 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8304 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8308 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8311 #ifndef HAVE_MOVING_COLLECTOR
8314 case MONO_TYPE_STRING:
8315 case MONO_TYPE_OBJECT:
8316 case MONO_TYPE_CLASS:
8317 case MONO_TYPE_SZARRAY:
8319 case MONO_TYPE_FNPTR:
8320 case MONO_TYPE_ARRAY:
8321 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8322 type_to_eval_stack_type ((cfg), field->type, *sp);
8328 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8333 case MONO_TYPE_VALUETYPE:
8343 CHECK_STACK_OVF (1);
8345 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8346 load->flags |= ins_flag;
8359 token = read32 (ip + 1);
8360 klass = mini_get_class (method, token, generic_context);
8361 CHECK_TYPELOAD (klass);
8362 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8363 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8374 const char *data_ptr;
8376 guint32 field_token;
8382 token = read32 (ip + 1);
8384 klass = mini_get_class (method, token, generic_context);
8385 CHECK_TYPELOAD (klass);
8387 if (cfg->generic_sharing_context)
8388 context_used = mono_class_check_context_used (klass);
8393 /* FIXME: Decompose later to help abcrem */
8396 args [0] = emit_get_rgctx_klass (cfg, context_used,
8397 mono_array_class_get (klass, 1), MONO_RGCTX_INFO_VTABLE);
8402 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8404 if (cfg->opt & MONO_OPT_SHARED) {
8405 /* Decompose now to avoid problems with references to the domainvar */
8406 MonoInst *iargs [3];
8408 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8409 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8412 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8414 /* Decompose later since it is needed by abcrem */
8415 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8416 ins->dreg = alloc_preg (cfg);
8417 ins->sreg1 = sp [0]->dreg;
8418 ins->inst_newa_class = klass;
8419 ins->type = STACK_OBJ;
8421 MONO_ADD_INS (cfg->cbb, ins);
8422 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8423 cfg->cbb->has_array_access = TRUE;
8425 /* Needed so mono_emit_load_get_addr () gets called */
8426 mono_get_got_var (cfg);
8436 * we inline/optimize the initialization sequence if possible.
8437 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8438 * for small sizes open code the memcpy
8439 * ensure the rva field is big enough
8441 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8442 MonoMethod *memcpy_method = get_memcpy_method ();
8443 MonoInst *iargs [3];
8444 int add_reg = alloc_preg (cfg);
8446 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8447 if (cfg->compile_aot) {
8448 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8450 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8452 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8453 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8462 if (sp [0]->type != STACK_OBJ)
8465 dreg = alloc_preg (cfg);
8466 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8467 ins->dreg = alloc_preg (cfg);
8468 ins->sreg1 = sp [0]->dreg;
8469 ins->type = STACK_I4;
8470 MONO_ADD_INS (cfg->cbb, ins);
8471 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8472 cfg->cbb->has_array_access = TRUE;
8480 if (sp [0]->type != STACK_OBJ)
8483 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8485 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8486 CHECK_TYPELOAD (klass);
8487 /* we need to make sure that this array is exactly the type it needs
8488 * to be for correctness. the wrappers are lax with their usage
8489 * so we need to ignore them here
8491 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly)
8492 mini_emit_check_array_type (cfg, sp [0], mono_array_class_get (klass, 1));
8495 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8510 case CEE_LDELEM_REF: {
8516 if (*ip == CEE_LDELEM) {
8518 token = read32 (ip + 1);
8519 klass = mini_get_class (method, token, generic_context);
8520 CHECK_TYPELOAD (klass);
8521 mono_class_init (klass);
8524 klass = array_access_to_klass (*ip);
8526 if (sp [0]->type != STACK_OBJ)
8529 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8531 if (sp [1]->opcode == OP_ICONST) {
8532 int array_reg = sp [0]->dreg;
8533 int index_reg = sp [1]->dreg;
8534 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8536 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8537 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8539 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8540 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8543 if (*ip == CEE_LDELEM)
8556 case CEE_STELEM_REF:
8563 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8565 if (*ip == CEE_STELEM) {
8567 token = read32 (ip + 1);
8568 klass = mini_get_class (method, token, generic_context);
8569 CHECK_TYPELOAD (klass);
8570 mono_class_init (klass);
8573 klass = array_access_to_klass (*ip);
8575 if (sp [0]->type != STACK_OBJ)
8578 /* storing a NULL doesn't need any of the complex checks in stelemref */
8579 if (generic_class_is_reference_type (cfg, klass) &&
8580 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8581 MonoMethod* helper = mono_marshal_get_stelemref ();
8582 MonoInst *iargs [3];
8584 if (sp [0]->type != STACK_OBJ)
8586 if (sp [2]->type != STACK_OBJ)
8593 mono_emit_method_call (cfg, helper, iargs, NULL);
8595 if (sp [1]->opcode == OP_ICONST) {
8596 int array_reg = sp [0]->dreg;
8597 int index_reg = sp [1]->dreg;
8598 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8600 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8601 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8603 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8604 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8608 if (*ip == CEE_STELEM)
8615 case CEE_CKFINITE: {
8619 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8620 ins->sreg1 = sp [0]->dreg;
8621 ins->dreg = alloc_freg (cfg);
8622 ins->type = STACK_R8;
8623 MONO_ADD_INS (bblock, ins);
8625 *sp++ = mono_decompose_opcode (cfg, ins);
8630 case CEE_REFANYVAL: {
8631 MonoInst *src_var, *src;
8633 int klass_reg = alloc_preg (cfg);
8634 int dreg = alloc_preg (cfg);
8637 MONO_INST_NEW (cfg, ins, *ip);
8640 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8641 CHECK_TYPELOAD (klass);
8642 mono_class_init (klass);
8644 if (cfg->generic_sharing_context)
8645 context_used = mono_class_check_context_used (klass);
8648 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8650 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8651 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8652 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8655 MonoInst *klass_ins;
8657 klass_ins = emit_get_rgctx_klass (cfg, context_used,
8658 klass, MONO_RGCTX_INFO_KLASS);
8661 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8662 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8664 mini_emit_class_check (cfg, klass_reg, klass);
8666 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8667 ins->type = STACK_MP;
8672 case CEE_MKREFANY: {
8673 MonoInst *loc, *addr;
8676 MONO_INST_NEW (cfg, ins, *ip);
8679 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8680 CHECK_TYPELOAD (klass);
8681 mono_class_init (klass);
8683 if (cfg->generic_sharing_context)
8684 context_used = mono_class_check_context_used (klass);
8686 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8687 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8690 MonoInst *const_ins;
8691 int type_reg = alloc_preg (cfg);
8693 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
8694 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8695 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8696 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8697 } else if (cfg->compile_aot) {
8698 int const_reg = alloc_preg (cfg);
8699 int type_reg = alloc_preg (cfg);
8701 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8702 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8703 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8704 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8706 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8707 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8709 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8711 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8712 ins->type = STACK_VTYPE;
8713 ins->klass = mono_defaults.typed_reference_class;
8720 MonoClass *handle_class;
8722 CHECK_STACK_OVF (1);
8725 n = read32 (ip + 1);
8727 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
8728 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8729 handle = mono_method_get_wrapper_data (method, n);
8730 handle_class = mono_method_get_wrapper_data (method, n + 1);
8731 if (handle_class == mono_defaults.typehandle_class)
8732 handle = &((MonoClass*)handle)->byval_arg;
8735 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8739 mono_class_init (handle_class);
8740 if (cfg->generic_sharing_context) {
8741 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
8742 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
8743 /* This case handles ldtoken
8744 of an open type, like for
8747 } else if (handle_class == mono_defaults.typehandle_class) {
8748 /* If we get a MONO_TYPE_CLASS
8749 then we need to provide the
8751 instantiation of it. */
8752 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8755 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8756 } else if (handle_class == mono_defaults.fieldhandle_class)
8757 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8758 else if (handle_class == mono_defaults.methodhandle_class)
8759 context_used = mono_method_check_context_used (handle);
8761 g_assert_not_reached ();
8764 if ((cfg->opt & MONO_OPT_SHARED) &&
8765 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
8766 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
8767 MonoInst *addr, *vtvar, *iargs [3];
8768 int method_context_used;
8770 if (cfg->generic_sharing_context)
8771 method_context_used = mono_method_check_context_used (method);
8773 method_context_used = 0;
8775 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8777 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8778 EMIT_NEW_ICONST (cfg, iargs [1], n);
8779 if (method_context_used) {
8780 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
8781 method, MONO_RGCTX_INFO_METHOD);
8782 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
8784 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
8785 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
8787 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8789 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8791 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8793 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
8794 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
8795 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
8796 (cmethod->klass == mono_defaults.monotype_class->parent) &&
8797 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
8798 MonoClass *tclass = mono_class_from_mono_type (handle);
8800 mono_class_init (tclass);
8802 ins = emit_get_rgctx_klass (cfg, context_used,
8803 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
8804 } else if (cfg->compile_aot) {
8805 if (method->wrapper_type) {
8806 /* FIXME: n is not a normal token */
8807 cfg->disable_aot = TRUE;
8808 EMIT_NEW_PCONST (cfg, ins, NULL);
8810 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
8813 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
8815 ins->type = STACK_OBJ;
8816 ins->klass = cmethod->klass;
8819 MonoInst *addr, *vtvar;
8821 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8824 if (handle_class == mono_defaults.typehandle_class) {
8825 ins = emit_get_rgctx_klass (cfg, context_used,
8826 mono_class_from_mono_type (handle),
8827 MONO_RGCTX_INFO_TYPE);
8828 } else if (handle_class == mono_defaults.methodhandle_class) {
8829 ins = emit_get_rgctx_method (cfg, context_used,
8830 handle, MONO_RGCTX_INFO_METHOD);
8831 } else if (handle_class == mono_defaults.fieldhandle_class) {
8832 ins = emit_get_rgctx_field (cfg, context_used,
8833 handle, MONO_RGCTX_INFO_CLASS_FIELD);
8835 g_assert_not_reached ();
8837 } else if (cfg->compile_aot) {
8838 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
8840 EMIT_NEW_PCONST (cfg, ins, handle);
8842 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8843 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8844 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8854 MONO_INST_NEW (cfg, ins, OP_THROW);
8856 ins->sreg1 = sp [0]->dreg;
8858 bblock->out_of_line = TRUE;
8859 MONO_ADD_INS (bblock, ins);
8860 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
8861 MONO_ADD_INS (bblock, ins);
8864 link_bblock (cfg, bblock, end_bblock);
8865 start_new_bblock = 1;
8867 case CEE_ENDFINALLY:
8868 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
8869 MONO_ADD_INS (bblock, ins);
8871 start_new_bblock = 1;
8874 * Control will leave the method so empty the stack, otherwise
8875 * the next basic block will start with a nonempty stack.
8877 while (sp != stack_start) {
8885 if (*ip == CEE_LEAVE) {
8887 target = ip + 5 + (gint32)read32(ip + 1);
8890 target = ip + 2 + (signed char)(ip [1]);
8893 /* empty the stack */
8894 while (sp != stack_start) {
8899 * If this leave statement is in a catch block, check for a
8900 * pending exception, and rethrow it if necessary.
8902 for (i = 0; i < header->num_clauses; ++i) {
8903 MonoExceptionClause *clause = &header->clauses [i];
8906 * Use <= in the final comparison to handle clauses with multiple
8907 * leave statements, like in bug #78024.
8908 * The ordering of the exception clauses guarantees that we find the
8911 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len)) {
8913 MonoBasicBlock *dont_throw;
8918 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
8921 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
8923 NEW_BBLOCK (cfg, dont_throw);
8926 * Currently, we allways rethrow the abort exception, despite the
8927 * fact that this is not correct. See thread6.cs for an example.
8928 * But propagating the abort exception is more important than
8929 * getting the sematics right.
8931 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
8932 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
8933 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
8935 MONO_START_BB (cfg, dont_throw);
8940 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
8942 for (tmp = handlers; tmp; tmp = tmp->next) {
8944 link_bblock (cfg, bblock, tblock);
8945 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
8946 ins->inst_target_bb = tblock;
8947 MONO_ADD_INS (bblock, ins);
8948 bblock->has_call_handler = 1;
8950 g_list_free (handlers);
8953 MONO_INST_NEW (cfg, ins, OP_BR);
8954 MONO_ADD_INS (bblock, ins);
8955 GET_BBLOCK (cfg, tblock, target);
8956 link_bblock (cfg, bblock, tblock);
8957 ins->inst_target_bb = tblock;
8958 start_new_bblock = 1;
8960 if (*ip == CEE_LEAVE)
8969 * Mono specific opcodes
8971 case MONO_CUSTOM_PREFIX: {
8973 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
8977 case CEE_MONO_ICALL: {
8979 MonoJitICallInfo *info;
8981 token = read32 (ip + 2);
8982 func = mono_method_get_wrapper_data (method, token);
8983 info = mono_find_jit_icall_by_addr (func);
8986 CHECK_STACK (info->sig->param_count);
8987 sp -= info->sig->param_count;
8989 ins = mono_emit_jit_icall (cfg, info->func, sp);
8990 if (!MONO_TYPE_IS_VOID (info->sig->ret))
8994 inline_costs += 10 * num_calls++;
8998 case CEE_MONO_LDPTR: {
9001 CHECK_STACK_OVF (1);
9003 token = read32 (ip + 2);
9005 ptr = mono_method_get_wrapper_data (method, token);
9006 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
9007 MonoJitICallInfo *callinfo;
9008 const char *icall_name;
9010 icall_name = method->name + strlen ("__icall_wrapper_");
9011 g_assert (icall_name);
9012 callinfo = mono_find_jit_icall_by_name (icall_name);
9013 g_assert (callinfo);
9015 if (ptr == callinfo->func) {
9016 /* Will be transformed into an AOTCONST later */
9017 EMIT_NEW_PCONST (cfg, ins, ptr);
9023 /* FIXME: Generalize this */
9024 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
9025 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
9030 EMIT_NEW_PCONST (cfg, ins, ptr);
9033 inline_costs += 10 * num_calls++;
9034 /* Can't embed random pointers into AOT code */
9035 cfg->disable_aot = 1;
9038 case CEE_MONO_ICALL_ADDR: {
9039 MonoMethod *cmethod;
9042 CHECK_STACK_OVF (1);
9044 token = read32 (ip + 2);
9046 cmethod = mono_method_get_wrapper_data (method, token);
9048 if (cfg->compile_aot) {
9049 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
9051 ptr = mono_lookup_internal_call (cmethod);
9053 EMIT_NEW_PCONST (cfg, ins, ptr);
9059 case CEE_MONO_VTADDR: {
9060 MonoInst *src_var, *src;
9066 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9067 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
9072 case CEE_MONO_NEWOBJ: {
9073 MonoInst *iargs [2];
9075 CHECK_STACK_OVF (1);
9077 token = read32 (ip + 2);
9078 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9079 mono_class_init (klass);
9080 NEW_DOMAINCONST (cfg, iargs [0]);
9081 MONO_ADD_INS (cfg->cbb, iargs [0]);
9082 NEW_CLASSCONST (cfg, iargs [1], klass);
9083 MONO_ADD_INS (cfg->cbb, iargs [1]);
9084 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
9086 inline_costs += 10 * num_calls++;
9089 case CEE_MONO_OBJADDR:
9092 MONO_INST_NEW (cfg, ins, OP_MOVE);
9093 ins->dreg = alloc_preg (cfg);
9094 ins->sreg1 = sp [0]->dreg;
9095 ins->type = STACK_MP;
9096 MONO_ADD_INS (cfg->cbb, ins);
9100 case CEE_MONO_LDNATIVEOBJ:
9102 * Similar to LDOBJ, but instead load the unmanaged
9103 * representation of the vtype to the stack.
9108 token = read32 (ip + 2);
9109 klass = mono_method_get_wrapper_data (method, token);
9110 g_assert (klass->valuetype);
9111 mono_class_init (klass);
9114 MonoInst *src, *dest, *temp;
9117 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
9118 temp->backend.is_pinvoke = 1;
9119 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
9120 mini_emit_stobj (cfg, dest, src, klass, TRUE);
9122 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
9123 dest->type = STACK_VTYPE;
9124 dest->klass = klass;
9130 case CEE_MONO_RETOBJ: {
9132 * Same as RET, but return the native representation of a vtype
9135 g_assert (cfg->ret);
9136 g_assert (mono_method_signature (method)->pinvoke);
9141 token = read32 (ip + 2);
9142 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9144 if (!cfg->vret_addr) {
9145 g_assert (cfg->ret_var_is_local);
9147 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
9149 EMIT_NEW_RETLOADA (cfg, ins);
9151 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
9153 if (sp != stack_start)
9156 MONO_INST_NEW (cfg, ins, OP_BR);
9157 ins->inst_target_bb = end_bblock;
9158 MONO_ADD_INS (bblock, ins);
9159 link_bblock (cfg, bblock, end_bblock);
9160 start_new_bblock = 1;
9164 case CEE_MONO_CISINST:
9165 case CEE_MONO_CCASTCLASS: {
9170 token = read32 (ip + 2);
9171 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9172 if (ip [1] == CEE_MONO_CISINST)
9173 ins = handle_cisinst (cfg, klass, sp [0]);
9175 ins = handle_ccastclass (cfg, klass, sp [0]);
9181 case CEE_MONO_SAVE_LMF:
9182 case CEE_MONO_RESTORE_LMF:
9183 #ifdef MONO_ARCH_HAVE_LMF_OPS
9184 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9185 MONO_ADD_INS (bblock, ins);
9186 cfg->need_lmf_area = TRUE;
9190 case CEE_MONO_CLASSCONST:
9191 CHECK_STACK_OVF (1);
9193 token = read32 (ip + 2);
9194 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9197 inline_costs += 10 * num_calls++;
9199 case CEE_MONO_NOT_TAKEN:
9200 bblock->out_of_line = TRUE;
9204 CHECK_STACK_OVF (1);
9206 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9207 ins->dreg = alloc_preg (cfg);
9208 ins->inst_offset = (gint32)read32 (ip + 2);
9209 ins->type = STACK_PTR;
9210 MONO_ADD_INS (bblock, ins);
9215 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9225 /* somewhat similar to LDTOKEN */
9226 MonoInst *addr, *vtvar;
9227 CHECK_STACK_OVF (1);
9228 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9230 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9231 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9233 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9234 ins->type = STACK_VTYPE;
9235 ins->klass = mono_defaults.argumenthandle_class;
9248 * The following transforms:
9249 * CEE_CEQ into OP_CEQ
9250 * CEE_CGT into OP_CGT
9251 * CEE_CGT_UN into OP_CGT_UN
9252 * CEE_CLT into OP_CLT
9253 * CEE_CLT_UN into OP_CLT_UN
9255 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9257 MONO_INST_NEW (cfg, ins, cmp->opcode);
9259 cmp->sreg1 = sp [0]->dreg;
9260 cmp->sreg2 = sp [1]->dreg;
9261 type_from_op (cmp, sp [0], sp [1]);
9263 if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9264 cmp->opcode = OP_LCOMPARE;
9265 else if (sp [0]->type == STACK_R8)
9266 cmp->opcode = OP_FCOMPARE;
9268 cmp->opcode = OP_ICOMPARE;
9269 MONO_ADD_INS (bblock, cmp);
9270 ins->type = STACK_I4;
9271 ins->dreg = alloc_dreg (cfg, ins->type);
9272 type_from_op (ins, sp [0], sp [1]);
9274 if (cmp->opcode == OP_FCOMPARE) {
9276 * The backends expect the fceq opcodes to do the
9279 cmp->opcode = OP_NOP;
9280 ins->sreg1 = cmp->sreg1;
9281 ins->sreg2 = cmp->sreg2;
9283 MONO_ADD_INS (bblock, ins);
9290 MonoMethod *cil_method;
9291 gboolean needs_static_rgctx_invoke;
9293 CHECK_STACK_OVF (1);
9295 n = read32 (ip + 2);
9296 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9299 mono_class_init (cmethod->klass);
9301 mono_save_token_info (cfg, image, n, cmethod);
9303 if (cfg->generic_sharing_context)
9304 context_used = mono_method_check_context_used (cmethod);
9306 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9308 cil_method = cmethod;
9309 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9310 METHOD_ACCESS_FAILURE;
9312 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9313 if (check_linkdemand (cfg, method, cmethod))
9315 CHECK_CFG_EXCEPTION;
9316 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9317 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9321 * Optimize the common case of ldftn+delegate creation
9323 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
9324 /* FIXME: SGEN support */
9325 /* FIXME: handle shared static generic methods */
9326 /* FIXME: handle this in shared code */
9327 if (!needs_static_rgctx_invoke && !context_used && (sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9328 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9329 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9330 MonoInst *target_ins;
9333 invoke = mono_get_delegate_invoke (ctor_method->klass);
9334 if (!invoke || !mono_method_signature (invoke))
9338 if (cfg->verbose_level > 3)
9339 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9340 target_ins = sp [-1];
9342 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod);
9351 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
9353 EMIT_NEW_METHODCONST (cfg, argconst, cmethod);
9355 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
9359 inline_costs += 10 * num_calls++;
9362 case CEE_LDVIRTFTN: {
9367 n = read32 (ip + 2);
9368 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9371 mono_class_init (cmethod->klass);
9373 if (cfg->generic_sharing_context)
9374 context_used = mono_method_check_context_used (cmethod);
9376 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9377 if (check_linkdemand (cfg, method, cmethod))
9379 CHECK_CFG_EXCEPTION;
9380 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9381 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9388 args [1] = emit_get_rgctx_method (cfg, context_used,
9389 cmethod, MONO_RGCTX_INFO_METHOD);
9390 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
9392 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
9393 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
9397 inline_costs += 10 * num_calls++;
9401 CHECK_STACK_OVF (1);
9403 n = read16 (ip + 2);
9405 EMIT_NEW_ARGLOAD (cfg, ins, n);
9410 CHECK_STACK_OVF (1);
9412 n = read16 (ip + 2);
9414 NEW_ARGLOADA (cfg, ins, n);
9415 MONO_ADD_INS (cfg->cbb, ins);
9423 n = read16 (ip + 2);
9425 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9427 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9431 CHECK_STACK_OVF (1);
9433 n = read16 (ip + 2);
9435 EMIT_NEW_LOCLOAD (cfg, ins, n);
9440 unsigned char *tmp_ip;
9441 CHECK_STACK_OVF (1);
9443 n = read16 (ip + 2);
9446 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9452 EMIT_NEW_LOCLOADA (cfg, ins, n);
9461 n = read16 (ip + 2);
9463 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9465 emit_stloc_ir (cfg, sp, header, n);
9472 if (sp != stack_start)
9474 if (cfg->method != method)
9476 * Inlining this into a loop in a parent could lead to
9477 * stack overflows which is different behavior than the
9478 * non-inlined case, thus disable inlining in this case.
9480 goto inline_failure;
9482 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9483 ins->dreg = alloc_preg (cfg);
9484 ins->sreg1 = sp [0]->dreg;
9485 ins->type = STACK_PTR;
9486 MONO_ADD_INS (cfg->cbb, ins);
9488 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9490 ins->flags |= MONO_INST_INIT;
9495 case CEE_ENDFILTER: {
9496 MonoExceptionClause *clause, *nearest;
9497 int cc, nearest_num;
9501 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9503 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9504 ins->sreg1 = (*sp)->dreg;
9505 MONO_ADD_INS (bblock, ins);
9506 start_new_bblock = 1;
9511 for (cc = 0; cc < header->num_clauses; ++cc) {
9512 clause = &header->clauses [cc];
9513 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9514 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9515 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9521 if ((ip - header->code) != nearest->handler_offset)
9526 case CEE_UNALIGNED_:
9527 ins_flag |= MONO_INST_UNALIGNED;
9528 /* FIXME: record alignment? we can assume 1 for now */
9533 ins_flag |= MONO_INST_VOLATILE;
9537 ins_flag |= MONO_INST_TAILCALL;
9538 cfg->flags |= MONO_CFG_HAS_TAIL;
9539 /* Can't inline tail calls at this time */
9540 inline_costs += 100000;
9547 token = read32 (ip + 2);
9548 klass = mini_get_class (method, token, generic_context);
9549 CHECK_TYPELOAD (klass);
9550 if (generic_class_is_reference_type (cfg, klass))
9551 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9553 mini_emit_initobj (cfg, *sp, NULL, klass);
9557 case CEE_CONSTRAINED_:
9559 token = read32 (ip + 2);
9560 if (method->wrapper_type != MONO_WRAPPER_NONE)
9561 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
9563 constrained_call = mono_class_get_full (image, token, generic_context);
9564 CHECK_TYPELOAD (constrained_call);
9569 MonoInst *iargs [3];
9573 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9574 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9575 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9576 /* emit_memset only works when val == 0 */
9577 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9582 if (ip [1] == CEE_CPBLK) {
9583 MonoMethod *memcpy_method = get_memcpy_method ();
9584 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9586 MonoMethod *memset_method = get_memset_method ();
9587 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9597 ins_flag |= MONO_INST_NOTYPECHECK;
9599 ins_flag |= MONO_INST_NORANGECHECK;
9600 /* we ignore the no-nullcheck for now since we
9601 * really do it explicitly only when doing callvirt->call
9607 int handler_offset = -1;
9609 for (i = 0; i < header->num_clauses; ++i) {
9610 MonoExceptionClause *clause = &header->clauses [i];
9611 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9612 handler_offset = clause->handler_offset;
9617 bblock->flags |= BB_EXCEPTION_UNSAFE;
9619 g_assert (handler_offset != -1);
9621 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9622 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9623 ins->sreg1 = load->dreg;
9624 MONO_ADD_INS (bblock, ins);
9626 link_bblock (cfg, bblock, end_bblock);
9627 start_new_bblock = 1;
9635 CHECK_STACK_OVF (1);
9637 token = read32 (ip + 2);
9638 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC) {
9639 MonoType *type = mono_type_create_from_typespec (image, token);
9640 token = mono_type_size (type, &ialign);
9642 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9643 CHECK_TYPELOAD (klass);
9644 mono_class_init (klass);
9645 token = mono_class_value_size (klass, &align);
9647 EMIT_NEW_ICONST (cfg, ins, token);
9652 case CEE_REFANYTYPE: {
9653 MonoInst *src_var, *src;
9659 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9661 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9662 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9663 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9673 g_error ("opcode 0xfe 0x%02x not handled", ip [1]);
9678 g_error ("opcode 0x%02x not handled", *ip);
9681 if (start_new_bblock != 1)
9684 bblock->cil_length = ip - bblock->cil_code;
9685 bblock->next_bb = end_bblock;
9687 if (cfg->method == method && cfg->domainvar) {
9689 MonoInst *get_domain;
9691 cfg->cbb = init_localsbb;
9693 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9694 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9697 get_domain->dreg = alloc_preg (cfg);
9698 MONO_ADD_INS (cfg->cbb, get_domain);
9700 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9701 MONO_ADD_INS (cfg->cbb, store);
9704 #ifdef TARGET_POWERPC
9705 if (cfg->compile_aot)
9706 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
9707 mono_get_got_var (cfg);
9710 if (cfg->method == method && cfg->got_var)
9711 mono_emit_load_got_addr (cfg);
9716 cfg->cbb = init_localsbb;
9718 for (i = 0; i < header->num_locals; ++i) {
9719 MonoType *ptype = header->locals [i];
9720 int t = ptype->type;
9721 dreg = cfg->locals [i]->dreg;
9723 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
9724 t = mono_class_enum_basetype (ptype->data.klass)->type;
9726 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9727 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
9728 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
9729 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
9730 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
9731 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
9732 MONO_INST_NEW (cfg, ins, OP_R8CONST);
9733 ins->type = STACK_R8;
9734 ins->inst_p0 = (void*)&r8_0;
9735 ins->dreg = alloc_dreg (cfg, STACK_R8);
9736 MONO_ADD_INS (init_localsbb, ins);
9737 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
9738 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
9739 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
9740 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
9742 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9749 if (cfg->method == method) {
9751 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9752 bb->region = mono_find_block_region (cfg, bb->real_offset);
9754 mono_create_spvar_for_region (cfg, bb->region);
9755 if (cfg->verbose_level > 2)
9756 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
9760 g_slist_free (class_inits);
9761 dont_inline = g_list_remove (dont_inline, method);
9763 if (inline_costs < 0) {
9766 /* Method is too large */
9767 mname = mono_method_full_name (method, TRUE);
9768 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
9769 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
9774 if ((cfg->verbose_level > 2) && (cfg->method == method))
9775 mono_print_code (cfg, "AFTER METHOD-TO-IR");
9777 return inline_costs;
9780 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
9781 g_slist_free (class_inits);
9782 dont_inline = g_list_remove (dont_inline, method);
9786 g_slist_free (class_inits);
9787 dont_inline = g_list_remove (dont_inline, method);
9791 g_slist_free (class_inits);
9792 dont_inline = g_list_remove (dont_inline, method);
9793 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
9797 g_slist_free (class_inits);
9798 dont_inline = g_list_remove (dont_inline, method);
9799 set_exception_type_from_invalid_il (cfg, method, ip);
9804 store_membase_reg_to_store_membase_imm (int opcode)
9807 case OP_STORE_MEMBASE_REG:
9808 return OP_STORE_MEMBASE_IMM;
9809 case OP_STOREI1_MEMBASE_REG:
9810 return OP_STOREI1_MEMBASE_IMM;
9811 case OP_STOREI2_MEMBASE_REG:
9812 return OP_STOREI2_MEMBASE_IMM;
9813 case OP_STOREI4_MEMBASE_REG:
9814 return OP_STOREI4_MEMBASE_IMM;
9815 case OP_STOREI8_MEMBASE_REG:
9816 return OP_STOREI8_MEMBASE_IMM;
9818 g_assert_not_reached ();
9824 #endif /* DISABLE_JIT */
9827 mono_op_to_op_imm (int opcode)
9837 return OP_IDIV_UN_IMM;
9841 return OP_IREM_UN_IMM;
9855 return OP_ISHR_UN_IMM;
9872 return OP_LSHR_UN_IMM;
9875 return OP_COMPARE_IMM;
9877 return OP_ICOMPARE_IMM;
9879 return OP_LCOMPARE_IMM;
9881 case OP_STORE_MEMBASE_REG:
9882 return OP_STORE_MEMBASE_IMM;
9883 case OP_STOREI1_MEMBASE_REG:
9884 return OP_STOREI1_MEMBASE_IMM;
9885 case OP_STOREI2_MEMBASE_REG:
9886 return OP_STOREI2_MEMBASE_IMM;
9887 case OP_STOREI4_MEMBASE_REG:
9888 return OP_STOREI4_MEMBASE_IMM;
9890 #if defined(TARGET_X86) || defined (TARGET_AMD64)
9892 return OP_X86_PUSH_IMM;
9893 case OP_X86_COMPARE_MEMBASE_REG:
9894 return OP_X86_COMPARE_MEMBASE_IMM;
9896 #if defined(TARGET_AMD64)
9897 case OP_AMD64_ICOMPARE_MEMBASE_REG:
9898 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9900 case OP_VOIDCALL_REG:
9909 return OP_LOCALLOC_IMM;
9916 ldind_to_load_membase (int opcode)
9920 return OP_LOADI1_MEMBASE;
9922 return OP_LOADU1_MEMBASE;
9924 return OP_LOADI2_MEMBASE;
9926 return OP_LOADU2_MEMBASE;
9928 return OP_LOADI4_MEMBASE;
9930 return OP_LOADU4_MEMBASE;
9932 return OP_LOAD_MEMBASE;
9934 return OP_LOAD_MEMBASE;
9936 return OP_LOADI8_MEMBASE;
9938 return OP_LOADR4_MEMBASE;
9940 return OP_LOADR8_MEMBASE;
9942 g_assert_not_reached ();
9949 stind_to_store_membase (int opcode)
9953 return OP_STOREI1_MEMBASE_REG;
9955 return OP_STOREI2_MEMBASE_REG;
9957 return OP_STOREI4_MEMBASE_REG;
9960 return OP_STORE_MEMBASE_REG;
9962 return OP_STOREI8_MEMBASE_REG;
9964 return OP_STORER4_MEMBASE_REG;
9966 return OP_STORER8_MEMBASE_REG;
9968 g_assert_not_reached ();
9975 mono_load_membase_to_load_mem (int opcode)
9977 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
9978 #if defined(TARGET_X86) || defined(TARGET_AMD64)
9980 case OP_LOAD_MEMBASE:
9982 case OP_LOADU1_MEMBASE:
9983 return OP_LOADU1_MEM;
9984 case OP_LOADU2_MEMBASE:
9985 return OP_LOADU2_MEM;
9986 case OP_LOADI4_MEMBASE:
9987 return OP_LOADI4_MEM;
9988 case OP_LOADU4_MEMBASE:
9989 return OP_LOADU4_MEM;
9990 #if SIZEOF_REGISTER == 8
9991 case OP_LOADI8_MEMBASE:
9992 return OP_LOADI8_MEM;
10001 op_to_op_dest_membase (int store_opcode, int opcode)
10003 #if defined(TARGET_X86)
10004 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
10009 return OP_X86_ADD_MEMBASE_REG;
10011 return OP_X86_SUB_MEMBASE_REG;
10013 return OP_X86_AND_MEMBASE_REG;
10015 return OP_X86_OR_MEMBASE_REG;
10017 return OP_X86_XOR_MEMBASE_REG;
10020 return OP_X86_ADD_MEMBASE_IMM;
10023 return OP_X86_SUB_MEMBASE_IMM;
10026 return OP_X86_AND_MEMBASE_IMM;
10029 return OP_X86_OR_MEMBASE_IMM;
10032 return OP_X86_XOR_MEMBASE_IMM;
10038 #if defined(TARGET_AMD64)
10039 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
10044 return OP_X86_ADD_MEMBASE_REG;
10046 return OP_X86_SUB_MEMBASE_REG;
10048 return OP_X86_AND_MEMBASE_REG;
10050 return OP_X86_OR_MEMBASE_REG;
10052 return OP_X86_XOR_MEMBASE_REG;
10054 return OP_X86_ADD_MEMBASE_IMM;
10056 return OP_X86_SUB_MEMBASE_IMM;
10058 return OP_X86_AND_MEMBASE_IMM;
10060 return OP_X86_OR_MEMBASE_IMM;
10062 return OP_X86_XOR_MEMBASE_IMM;
10064 return OP_AMD64_ADD_MEMBASE_REG;
10066 return OP_AMD64_SUB_MEMBASE_REG;
10068 return OP_AMD64_AND_MEMBASE_REG;
10070 return OP_AMD64_OR_MEMBASE_REG;
10072 return OP_AMD64_XOR_MEMBASE_REG;
10075 return OP_AMD64_ADD_MEMBASE_IMM;
10078 return OP_AMD64_SUB_MEMBASE_IMM;
10081 return OP_AMD64_AND_MEMBASE_IMM;
10084 return OP_AMD64_OR_MEMBASE_IMM;
10087 return OP_AMD64_XOR_MEMBASE_IMM;
10097 op_to_op_store_membase (int store_opcode, int opcode)
10099 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10102 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10103 return OP_X86_SETEQ_MEMBASE;
10105 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10106 return OP_X86_SETNE_MEMBASE;
10114 op_to_op_src1_membase (int load_opcode, int opcode)
10117 /* FIXME: This has sign extension issues */
10119 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10120 return OP_X86_COMPARE_MEMBASE8_IMM;
10123 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10128 return OP_X86_PUSH_MEMBASE;
10129 case OP_COMPARE_IMM:
10130 case OP_ICOMPARE_IMM:
10131 return OP_X86_COMPARE_MEMBASE_IMM;
10134 return OP_X86_COMPARE_MEMBASE_REG;
10138 #ifdef TARGET_AMD64
10139 /* FIXME: This has sign extension issues */
10141 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10142 return OP_X86_COMPARE_MEMBASE8_IMM;
10147 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10148 return OP_X86_PUSH_MEMBASE;
10150 /* FIXME: This only works for 32 bit immediates
10151 case OP_COMPARE_IMM:
10152 case OP_LCOMPARE_IMM:
10153 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10154 return OP_AMD64_COMPARE_MEMBASE_IMM;
10156 case OP_ICOMPARE_IMM:
10157 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10158 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10162 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10163 return OP_AMD64_COMPARE_MEMBASE_REG;
10166 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10167 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10176 op_to_op_src2_membase (int load_opcode, int opcode)
10179 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10185 return OP_X86_COMPARE_REG_MEMBASE;
10187 return OP_X86_ADD_REG_MEMBASE;
10189 return OP_X86_SUB_REG_MEMBASE;
10191 return OP_X86_AND_REG_MEMBASE;
10193 return OP_X86_OR_REG_MEMBASE;
10195 return OP_X86_XOR_REG_MEMBASE;
10199 #ifdef TARGET_AMD64
10202 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10203 return OP_AMD64_ICOMPARE_REG_MEMBASE;
10207 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10208 return OP_AMD64_COMPARE_REG_MEMBASE;
10211 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10212 return OP_X86_ADD_REG_MEMBASE;
10214 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10215 return OP_X86_SUB_REG_MEMBASE;
10217 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10218 return OP_X86_AND_REG_MEMBASE;
10220 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10221 return OP_X86_OR_REG_MEMBASE;
10223 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10224 return OP_X86_XOR_REG_MEMBASE;
10226 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10227 return OP_AMD64_ADD_REG_MEMBASE;
10229 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10230 return OP_AMD64_SUB_REG_MEMBASE;
10232 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10233 return OP_AMD64_AND_REG_MEMBASE;
10235 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10236 return OP_AMD64_OR_REG_MEMBASE;
10238 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10239 return OP_AMD64_XOR_REG_MEMBASE;
10247 mono_op_to_op_imm_noemul (int opcode)
10250 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10255 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10263 return mono_op_to_op_imm (opcode);
10267 #ifndef DISABLE_JIT
10270 * mono_handle_global_vregs:
10272 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10276 mono_handle_global_vregs (MonoCompile *cfg)
10278 gint32 *vreg_to_bb;
10279 MonoBasicBlock *bb;
10282 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
10284 #ifdef MONO_ARCH_SIMD_INTRINSICS
10285 if (cfg->uses_simd_intrinsics)
10286 mono_simd_simplify_indirection (cfg);
10289 /* Find local vregs used in more than one bb */
10290 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10291 MonoInst *ins = bb->code;
10292 int block_num = bb->block_num;
10294 if (cfg->verbose_level > 2)
10295 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
10298 for (; ins; ins = ins->next) {
10299 const char *spec = INS_INFO (ins->opcode);
10300 int regtype, regindex;
10303 if (G_UNLIKELY (cfg->verbose_level > 2))
10304 mono_print_ins (ins);
10306 g_assert (ins->opcode >= MONO_CEE_LAST);
10308 for (regindex = 0; regindex < 4; regindex ++) {
10311 if (regindex == 0) {
10312 regtype = spec [MONO_INST_DEST];
10313 if (regtype == ' ')
10316 } else if (regindex == 1) {
10317 regtype = spec [MONO_INST_SRC1];
10318 if (regtype == ' ')
10321 } else if (regindex == 2) {
10322 regtype = spec [MONO_INST_SRC2];
10323 if (regtype == ' ')
10326 } else if (regindex == 3) {
10327 regtype = spec [MONO_INST_SRC3];
10328 if (regtype == ' ')
10333 #if SIZEOF_REGISTER == 4
10334 /* In the LLVM case, the long opcodes are not decomposed */
10335 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
10337 * Since some instructions reference the original long vreg,
10338 * and some reference the two component vregs, it is quite hard
10339 * to determine when it needs to be global. So be conservative.
10341 if (!get_vreg_to_inst (cfg, vreg)) {
10342 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10344 if (cfg->verbose_level > 2)
10345 printf ("LONG VREG R%d made global.\n", vreg);
10349 * Make the component vregs volatile since the optimizations can
10350 * get confused otherwise.
10352 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
10353 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
10357 g_assert (vreg != -1);
10359 prev_bb = vreg_to_bb [vreg];
10360 if (prev_bb == 0) {
10361 /* 0 is a valid block num */
10362 vreg_to_bb [vreg] = block_num + 1;
10363 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
10364 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
10367 if (!get_vreg_to_inst (cfg, vreg)) {
10368 if (G_UNLIKELY (cfg->verbose_level > 2))
10369 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
10373 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
10376 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
10379 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
10382 g_assert_not_reached ();
10386 /* Flag as having been used in more than one bb */
10387 vreg_to_bb [vreg] = -1;
10393 /* If a variable is used in only one bblock, convert it into a local vreg */
10394 for (i = 0; i < cfg->num_varinfo; i++) {
10395 MonoInst *var = cfg->varinfo [i];
10396 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
10398 switch (var->type) {
10404 #if SIZEOF_REGISTER == 8
10407 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
10408 /* Enabling this screws up the fp stack on x86 */
10411 /* Arguments are implicitly global */
10412 /* Putting R4 vars into registers doesn't work currently */
10413 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
10415 * Make that the variable's liveness interval doesn't contain a call, since
10416 * that would cause the lvreg to be spilled, making the whole optimization
10419 /* This is too slow for JIT compilation */
10421 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
10423 int def_index, call_index, ins_index;
10424 gboolean spilled = FALSE;
10429 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
10430 const char *spec = INS_INFO (ins->opcode);
10432 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
10433 def_index = ins_index;
10435 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
10436 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
10437 if (call_index > def_index) {
10443 if (MONO_IS_CALL (ins))
10444 call_index = ins_index;
10454 if (G_UNLIKELY (cfg->verbose_level > 2))
10455 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10456 var->flags |= MONO_INST_IS_DEAD;
10457 cfg->vreg_to_inst [var->dreg] = NULL;
10464 * Compress the varinfo and vars tables so the liveness computation is faster and
10465 * takes up less space.
10468 for (i = 0; i < cfg->num_varinfo; ++i) {
10469 MonoInst *var = cfg->varinfo [i];
10470 if (pos < i && cfg->locals_start == i)
10471 cfg->locals_start = pos;
10472 if (!(var->flags & MONO_INST_IS_DEAD)) {
10474 cfg->varinfo [pos] = cfg->varinfo [i];
10475 cfg->varinfo [pos]->inst_c0 = pos;
10476 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10477 cfg->vars [pos].idx = pos;
10478 #if SIZEOF_REGISTER == 4
10479 if (cfg->varinfo [pos]->type == STACK_I8) {
10480 /* Modify the two component vars too */
10483 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10484 var1->inst_c0 = pos;
10485 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10486 var1->inst_c0 = pos;
10493 cfg->num_varinfo = pos;
10494 if (cfg->locals_start > cfg->num_varinfo)
10495 cfg->locals_start = cfg->num_varinfo;
10499 * mono_spill_global_vars:
10501 * Generate spill code for variables which are not allocated to registers,
10502 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10503 * code is generated which could be optimized by the local optimization passes.
10506 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10508 MonoBasicBlock *bb;
10510 int orig_next_vreg;
10511 guint32 *vreg_to_lvreg;
10513 guint32 i, lvregs_len;
10514 gboolean dest_has_lvreg = FALSE;
10515 guint32 stacktypes [128];
10516 MonoInst **live_range_start, **live_range_end;
10517 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
10519 *need_local_opts = FALSE;
10521 memset (spec2, 0, sizeof (spec2));
10523 /* FIXME: Move this function to mini.c */
10524 stacktypes ['i'] = STACK_PTR;
10525 stacktypes ['l'] = STACK_I8;
10526 stacktypes ['f'] = STACK_R8;
10527 #ifdef MONO_ARCH_SIMD_INTRINSICS
10528 stacktypes ['x'] = STACK_VTYPE;
10531 #if SIZEOF_REGISTER == 4
10532 /* Create MonoInsts for longs */
10533 for (i = 0; i < cfg->num_varinfo; i++) {
10534 MonoInst *ins = cfg->varinfo [i];
10536 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10537 switch (ins->type) {
10538 #ifdef MONO_ARCH_SOFT_FLOAT
10544 g_assert (ins->opcode == OP_REGOFFSET);
10546 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10548 tree->opcode = OP_REGOFFSET;
10549 tree->inst_basereg = ins->inst_basereg;
10550 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10552 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10554 tree->opcode = OP_REGOFFSET;
10555 tree->inst_basereg = ins->inst_basereg;
10556 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10566 /* FIXME: widening and truncation */
10569 * As an optimization, when a variable allocated to the stack is first loaded into
10570 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10571 * the variable again.
10573 orig_next_vreg = cfg->next_vreg;
10574 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10575 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10579 * These arrays contain the first and last instructions accessing a given
10581 * Since we emit bblocks in the same order we process them here, and we
10582 * don't split live ranges, these will precisely describe the live range of
10583 * the variable, i.e. the instruction range where a valid value can be found
10584 * in the variables location.
10586 /* FIXME: Only do this if debugging info is requested */
10587 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
10588 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
10589 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10590 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10592 /* Add spill loads/stores */
10593 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10596 if (cfg->verbose_level > 2)
10597 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10599 /* Clear vreg_to_lvreg array */
10600 for (i = 0; i < lvregs_len; i++)
10601 vreg_to_lvreg [lvregs [i]] = 0;
10605 MONO_BB_FOR_EACH_INS (bb, ins) {
10606 const char *spec = INS_INFO (ins->opcode);
10607 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
10608 gboolean store, no_lvreg;
10609 int sregs [MONO_MAX_SRC_REGS];
10611 if (G_UNLIKELY (cfg->verbose_level > 2))
10612 mono_print_ins (ins);
10614 if (ins->opcode == OP_NOP)
10618 * We handle LDADDR here as well, since it can only be decomposed
10619 * when variable addresses are known.
10621 if (ins->opcode == OP_LDADDR) {
10622 MonoInst *var = ins->inst_p0;
10624 if (var->opcode == OP_VTARG_ADDR) {
10625 /* Happens on SPARC/S390 where vtypes are passed by reference */
10626 MonoInst *vtaddr = var->inst_left;
10627 if (vtaddr->opcode == OP_REGVAR) {
10628 ins->opcode = OP_MOVE;
10629 ins->sreg1 = vtaddr->dreg;
10631 else if (var->inst_left->opcode == OP_REGOFFSET) {
10632 ins->opcode = OP_LOAD_MEMBASE;
10633 ins->inst_basereg = vtaddr->inst_basereg;
10634 ins->inst_offset = vtaddr->inst_offset;
10638 g_assert (var->opcode == OP_REGOFFSET);
10640 ins->opcode = OP_ADD_IMM;
10641 ins->sreg1 = var->inst_basereg;
10642 ins->inst_imm = var->inst_offset;
10645 *need_local_opts = TRUE;
10646 spec = INS_INFO (ins->opcode);
10649 if (ins->opcode < MONO_CEE_LAST) {
10650 mono_print_ins (ins);
10651 g_assert_not_reached ();
10655 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10659 if (MONO_IS_STORE_MEMBASE (ins)) {
10660 tmp_reg = ins->dreg;
10661 ins->dreg = ins->sreg2;
10662 ins->sreg2 = tmp_reg;
10665 spec2 [MONO_INST_DEST] = ' ';
10666 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10667 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10668 spec2 [MONO_INST_SRC3] = ' ';
10670 } else if (MONO_IS_STORE_MEMINDEX (ins))
10671 g_assert_not_reached ();
10676 if (G_UNLIKELY (cfg->verbose_level > 2)) {
10677 printf ("\t %.3s %d", spec, ins->dreg);
10678 num_sregs = mono_inst_get_src_registers (ins, sregs);
10679 for (srcindex = 0; srcindex < 3; ++srcindex)
10680 printf (" %d", sregs [srcindex]);
10687 regtype = spec [MONO_INST_DEST];
10688 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
10691 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
10692 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
10693 MonoInst *store_ins;
10695 MonoInst *def_ins = ins;
10696 int dreg = ins->dreg; /* The original vreg */
10698 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
10700 if (var->opcode == OP_REGVAR) {
10701 ins->dreg = var->dreg;
10702 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
10704 * Instead of emitting a load+store, use a _membase opcode.
10706 g_assert (var->opcode == OP_REGOFFSET);
10707 if (ins->opcode == OP_MOVE) {
10711 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
10712 ins->inst_basereg = var->inst_basereg;
10713 ins->inst_offset = var->inst_offset;
10716 spec = INS_INFO (ins->opcode);
10720 g_assert (var->opcode == OP_REGOFFSET);
10722 prev_dreg = ins->dreg;
10724 /* Invalidate any previous lvreg for this vreg */
10725 vreg_to_lvreg [ins->dreg] = 0;
10729 #ifdef MONO_ARCH_SOFT_FLOAT
10730 if (store_opcode == OP_STORER8_MEMBASE_REG) {
10732 store_opcode = OP_STOREI8_MEMBASE_REG;
10736 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
10738 if (regtype == 'l') {
10739 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
10740 mono_bblock_insert_after_ins (bb, ins, store_ins);
10741 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
10742 mono_bblock_insert_after_ins (bb, ins, store_ins);
10743 def_ins = store_ins;
10746 g_assert (store_opcode != OP_STOREV_MEMBASE);
10748 /* Try to fuse the store into the instruction itself */
10749 /* FIXME: Add more instructions */
10750 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
10751 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
10752 ins->inst_imm = ins->inst_c0;
10753 ins->inst_destbasereg = var->inst_basereg;
10754 ins->inst_offset = var->inst_offset;
10755 spec = INS_INFO (ins->opcode);
10756 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
10757 ins->opcode = store_opcode;
10758 ins->inst_destbasereg = var->inst_basereg;
10759 ins->inst_offset = var->inst_offset;
10763 tmp_reg = ins->dreg;
10764 ins->dreg = ins->sreg2;
10765 ins->sreg2 = tmp_reg;
10768 spec2 [MONO_INST_DEST] = ' ';
10769 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10770 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10771 spec2 [MONO_INST_SRC3] = ' ';
10773 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
10774 // FIXME: The backends expect the base reg to be in inst_basereg
10775 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
10777 ins->inst_basereg = var->inst_basereg;
10778 ins->inst_offset = var->inst_offset;
10779 spec = INS_INFO (ins->opcode);
10781 /* printf ("INS: "); mono_print_ins (ins); */
10782 /* Create a store instruction */
10783 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
10785 /* Insert it after the instruction */
10786 mono_bblock_insert_after_ins (bb, ins, store_ins);
10788 def_ins = store_ins;
10791 * We can't assign ins->dreg to var->dreg here, since the
10792 * sregs could use it. So set a flag, and do it after
10795 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
10796 dest_has_lvreg = TRUE;
10801 if (def_ins && !live_range_start [dreg]) {
10802 live_range_start [dreg] = def_ins;
10803 live_range_start_bb [dreg] = bb;
10810 num_sregs = mono_inst_get_src_registers (ins, sregs);
10811 for (srcindex = 0; srcindex < 3; ++srcindex) {
10812 regtype = spec [MONO_INST_SRC1 + srcindex];
10813 sreg = sregs [srcindex];
10815 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
10816 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
10817 MonoInst *var = get_vreg_to_inst (cfg, sreg);
10818 MonoInst *use_ins = ins;
10819 MonoInst *load_ins;
10820 guint32 load_opcode;
10822 if (var->opcode == OP_REGVAR) {
10823 sregs [srcindex] = var->dreg;
10824 //mono_inst_set_src_registers (ins, sregs);
10825 live_range_end [sreg] = use_ins;
10826 live_range_end_bb [sreg] = bb;
10830 g_assert (var->opcode == OP_REGOFFSET);
10832 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
10834 g_assert (load_opcode != OP_LOADV_MEMBASE);
10836 if (vreg_to_lvreg [sreg]) {
10837 g_assert (vreg_to_lvreg [sreg] != -1);
10839 /* The variable is already loaded to an lvreg */
10840 if (G_UNLIKELY (cfg->verbose_level > 2))
10841 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
10842 sregs [srcindex] = vreg_to_lvreg [sreg];
10843 //mono_inst_set_src_registers (ins, sregs);
10847 /* Try to fuse the load into the instruction */
10848 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
10849 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
10850 sregs [0] = var->inst_basereg;
10851 //mono_inst_set_src_registers (ins, sregs);
10852 ins->inst_offset = var->inst_offset;
10853 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
10854 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
10855 sregs [1] = var->inst_basereg;
10856 //mono_inst_set_src_registers (ins, sregs);
10857 ins->inst_offset = var->inst_offset;
10859 if (MONO_IS_REAL_MOVE (ins)) {
10860 ins->opcode = OP_NOP;
10863 //printf ("%d ", srcindex); mono_print_ins (ins);
10865 sreg = alloc_dreg (cfg, stacktypes [regtype]);
10867 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
10868 if (var->dreg == prev_dreg) {
10870 * sreg refers to the value loaded by the load
10871 * emitted below, but we need to use ins->dreg
10872 * since it refers to the store emitted earlier.
10876 g_assert (sreg != -1);
10877 vreg_to_lvreg [var->dreg] = sreg;
10878 g_assert (lvregs_len < 1024);
10879 lvregs [lvregs_len ++] = var->dreg;
10883 sregs [srcindex] = sreg;
10884 //mono_inst_set_src_registers (ins, sregs);
10886 if (regtype == 'l') {
10887 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
10888 mono_bblock_insert_before_ins (bb, ins, load_ins);
10889 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
10890 mono_bblock_insert_before_ins (bb, ins, load_ins);
10891 use_ins = load_ins;
10894 #if SIZEOF_REGISTER == 4
10895 g_assert (load_opcode != OP_LOADI8_MEMBASE);
10897 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
10898 mono_bblock_insert_before_ins (bb, ins, load_ins);
10899 use_ins = load_ins;
10903 if (var->dreg < orig_next_vreg) {
10904 live_range_end [var->dreg] = use_ins;
10905 live_range_end_bb [var->dreg] = bb;
10909 mono_inst_set_src_registers (ins, sregs);
10911 if (dest_has_lvreg) {
10912 g_assert (ins->dreg != -1);
10913 vreg_to_lvreg [prev_dreg] = ins->dreg;
10914 g_assert (lvregs_len < 1024);
10915 lvregs [lvregs_len ++] = prev_dreg;
10916 dest_has_lvreg = FALSE;
10920 tmp_reg = ins->dreg;
10921 ins->dreg = ins->sreg2;
10922 ins->sreg2 = tmp_reg;
10925 if (MONO_IS_CALL (ins)) {
10926 /* Clear vreg_to_lvreg array */
10927 for (i = 0; i < lvregs_len; i++)
10928 vreg_to_lvreg [lvregs [i]] = 0;
10930 } else if (ins->opcode == OP_NOP) {
10932 MONO_INST_NULLIFY_SREGS (ins);
10935 if (cfg->verbose_level > 2)
10936 mono_print_ins_index (1, ins);
10940 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
10942 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
10943 * by storing the current native offset into MonoMethodVar->live_range_start/end.
10945 for (i = 0; i < cfg->num_varinfo; ++i) {
10946 int vreg = MONO_VARINFO (cfg, i)->vreg;
10949 if (live_range_start [vreg]) {
10950 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
10952 ins->inst_c1 = vreg;
10953 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
10955 if (live_range_end [vreg]) {
10956 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
10958 ins->inst_c1 = vreg;
10959 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
10964 g_free (live_range_start);
10965 g_free (live_range_end);
10966 g_free (live_range_start_bb);
10967 g_free (live_range_end_bb);
10972 * - use 'iadd' instead of 'int_add'
10973 * - handling ovf opcodes: decompose in method_to_ir.
10974 * - unify iregs/fregs
10975 * -> partly done, the missing parts are:
10976 * - a more complete unification would involve unifying the hregs as well, so
10977 * code wouldn't need if (fp) all over the place. but that would mean the hregs
10978 * would no longer map to the machine hregs, so the code generators would need to
10979 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
10980 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
10981 * fp/non-fp branches speeds it up by about 15%.
10982 * - use sext/zext opcodes instead of shifts
10984 * - get rid of TEMPLOADs if possible and use vregs instead
10985 * - clean up usage of OP_P/OP_ opcodes
10986 * - cleanup usage of DUMMY_USE
10987 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
10989 * - set the stack type and allocate a dreg in the EMIT_NEW macros
10990 * - get rid of all the <foo>2 stuff when the new JIT is ready.
10991 * - make sure handle_stack_args () is called before the branch is emitted
10992 * - when the new IR is done, get rid of all unused stuff
10993 * - COMPARE/BEQ as separate instructions or unify them ?
10994 * - keeping them separate allows specialized compare instructions like
10995 * compare_imm, compare_membase
10996 * - most back ends unify fp compare+branch, fp compare+ceq
10997 * - integrate mono_save_args into inline_method
10998 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
10999 * - handle long shift opts on 32 bit platforms somehow: they require
11000 * 3 sregs (2 for arg1 and 1 for arg2)
11001 * - make byref a 'normal' type.
11002 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
11003 * variable if needed.
11004 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
11005 * like inline_method.
11006 * - remove inlining restrictions
11007 * - fix LNEG and enable cfold of INEG
11008 * - generalize x86 optimizations like ldelema as a peephole optimization
11009 * - add store_mem_imm for amd64
11010 * - optimize the loading of the interruption flag in the managed->native wrappers
11011 * - avoid special handling of OP_NOP in passes
11012 * - move code inserting instructions into one function/macro.
11013 * - try a coalescing phase after liveness analysis
11014 * - add float -> vreg conversion + local optimizations on !x86
11015 * - figure out how to handle decomposed branches during optimizations, ie.
11016 * compare+branch, op_jump_table+op_br etc.
11017 * - promote RuntimeXHandles to vregs
11018 * - vtype cleanups:
11019 * - add a NEW_VARLOADA_VREG macro
11020 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11021 * accessing vtype fields.
11022 * - get rid of I8CONST on 64 bit platforms
11023 * - dealing with the increase in code size due to branches created during opcode
11025 * - use extended basic blocks
11026 * - all parts of the JIT
11027 * - handle_global_vregs () && local regalloc
11028 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
11029 * - sources of increase in code size:
11032 * - isinst and castclass
11033 * - lvregs not allocated to global registers even if used multiple times
11034 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
11036 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
11037 * - add all micro optimizations from the old JIT
11038 * - put tree optimizations into the deadce pass
11039 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
11040 * specific function.
11041 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
11042 * fcompare + branchCC.
11043 * - create a helper function for allocating a stack slot, taking into account
11044 * MONO_CFG_HAS_SPILLUP.
11046 * - merge the ia64 switch changes.
11047 * - optimize mono_regstate2_alloc_int/float.
11048 * - fix the pessimistic handling of variables accessed in exception handler blocks.
11049 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
11050 * parts of the tree could be separated by other instructions, killing the tree
11051 * arguments, or stores killing loads etc. Also, should we fold loads into other
11052 * instructions if the result of the load is used multiple times ?
11053 * - make the REM_IMM optimization in mini-x86.c arch-independent.
11054 * - LAST MERGE: 108395.
11055 * - when returning vtypes in registers, generate IR and append it to the end of the
11056 * last bb instead of doing it in the epilog.
11057 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
11065 - When to decompose opcodes:
11066 - earlier: this makes some optimizations hard to implement, since the low level IR
11067 no longer contains the neccessary information. But it is easier to do.
11068 - later: harder to implement, enables more optimizations.
11069 - Branches inside bblocks:
11070 - created when decomposing complex opcodes.
11071 - branches to another bblock: harmless, but not tracked by the branch
11072 optimizations, so need to branch to a label at the start of the bblock.
11073 - branches to inside the same bblock: very problematic, trips up the local
11074 reg allocator. Can be fixed by spitting the current bblock, but that is a
11075 complex operation, since some local vregs can become global vregs etc.
11076 - Local/global vregs:
11077 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11078 local register allocator.
11079 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11080 structure, created by mono_create_var (). Assigned to hregs or the stack by
11081 the global register allocator.
11082 - When to do optimizations like alu->alu_imm:
11083 - earlier -> saves work later on since the IR will be smaller/simpler
11084 - later -> can work on more instructions
11085 - Handling of valuetypes:
11086 - When a vtype is pushed on the stack, a new temporary is created, an
11087 instruction computing its address (LDADDR) is emitted and pushed on
11088 the stack. Need to optimize cases when the vtype is used immediately as in
11089 argument passing, stloc etc.
11090 - Instead of the to_end stuff in the old JIT, simply call the function handling
11091 the values on the stack before emitting the last instruction of the bb.
11094 #endif /* DISABLE_JIT */