2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
30 #ifdef HAVE_VALGRIND_MEMCHECK_H
31 #include <valgrind/memcheck.h>
34 #include <mono/metadata/assembly.h>
35 #include <mono/metadata/loader.h>
36 #include <mono/metadata/tabledefs.h>
37 #include <mono/metadata/class.h>
38 #include <mono/metadata/object.h>
39 #include <mono/metadata/exception.h>
40 #include <mono/metadata/opcodes.h>
41 #include <mono/metadata/mono-endian.h>
42 #include <mono/metadata/tokentype.h>
43 #include <mono/metadata/tabledefs.h>
44 #include <mono/metadata/marshal.h>
45 #include <mono/metadata/debug-helpers.h>
46 #include <mono/metadata/mono-debug.h>
47 #include <mono/metadata/gc-internal.h>
48 #include <mono/metadata/security-manager.h>
49 #include <mono/metadata/threads-types.h>
50 #include <mono/metadata/security-core-clr.h>
51 #include <mono/metadata/monitor.h>
52 #include <mono/utils/mono-compiler.h>
59 #include "jit-icalls.h"
61 #define BRANCH_COST 100
62 #define INLINE_LENGTH_LIMIT 20
63 #define INLINE_FAILURE do {\
64 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
67 #define CHECK_CFG_EXCEPTION do {\
68 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
71 #define METHOD_ACCESS_FAILURE do { \
72 char *method_fname = mono_method_full_name (method, TRUE); \
73 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
74 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
75 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
76 g_free (method_fname); \
77 g_free (cil_method_fname); \
78 goto exception_exit; \
80 #define FIELD_ACCESS_FAILURE do { \
81 char *method_fname = mono_method_full_name (method, TRUE); \
82 char *field_fname = mono_field_full_name (field); \
83 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
84 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
85 g_free (method_fname); \
86 g_free (field_fname); \
87 goto exception_exit; \
89 #define GENERIC_SHARING_FAILURE(opcode) do { \
90 if (cfg->generic_sharing_context) { \
91 if (cfg->verbose_level > 2) \
92 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
93 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
94 goto exception_exit; \
98 /* Determine whenever 'ins' represents a load of the 'this' argument */
99 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
101 static int ldind_to_load_membase (int opcode);
102 static int stind_to_store_membase (int opcode);
104 int mono_op_to_op_imm (int opcode);
105 int mono_op_to_op_imm_noemul (int opcode);
107 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
108 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
109 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
111 /* helper methods signature */
112 extern MonoMethodSignature *helper_sig_class_init_trampoline;
113 extern MonoMethodSignature *helper_sig_domain_get;
114 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
115 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
116 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
119 * Instruction metadata
127 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
128 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
134 #if SIZEOF_REGISTER == 8
139 /* keep in sync with the enum in mini.h */
142 #include "mini-ops.h"
147 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
148 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
150 * This should contain the index of the last sreg + 1. This is not the same
151 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
153 const gint8 ins_sreg_counts[] = {
154 #include "mini-ops.h"
159 extern GHashTable *jit_icall_name_hash;
161 #define MONO_INIT_VARINFO(vi,id) do { \
162 (vi)->range.first_use.pos.bid = 0xffff; \
168 mono_inst_set_src_registers (MonoInst *ins, int *regs)
170 ins->sreg1 = regs [0];
171 ins->sreg2 = regs [1];
172 ins->sreg3 = regs [2];
176 mono_alloc_ireg (MonoCompile *cfg)
178 return alloc_ireg (cfg);
182 mono_alloc_freg (MonoCompile *cfg)
184 return alloc_freg (cfg);
188 mono_alloc_preg (MonoCompile *cfg)
190 return alloc_preg (cfg);
194 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
196 return alloc_dreg (cfg, stack_type);
200 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
206 switch (type->type) {
209 case MONO_TYPE_BOOLEAN:
221 case MONO_TYPE_FNPTR:
223 case MONO_TYPE_CLASS:
224 case MONO_TYPE_STRING:
225 case MONO_TYPE_OBJECT:
226 case MONO_TYPE_SZARRAY:
227 case MONO_TYPE_ARRAY:
231 #if SIZEOF_REGISTER == 8
240 case MONO_TYPE_VALUETYPE:
241 if (type->data.klass->enumtype) {
242 type = mono_class_enum_basetype (type->data.klass);
245 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
248 case MONO_TYPE_TYPEDBYREF:
250 case MONO_TYPE_GENERICINST:
251 type = &type->data.generic_class->container_class->byval_arg;
255 g_assert (cfg->generic_sharing_context);
258 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
264 mono_print_bb (MonoBasicBlock *bb, const char *msg)
269 printf ("\n%s %d: [IN: ", msg, bb->block_num);
270 for (i = 0; i < bb->in_count; ++i)
271 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
273 for (i = 0; i < bb->out_count; ++i)
274 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
276 for (tree = bb->code; tree; tree = tree->next)
277 mono_print_ins_index (-1, tree);
281 * Can't put this at the beginning, since other files reference stuff from this
286 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
288 #define GET_BBLOCK(cfg,tblock,ip) do { \
289 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
291 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
292 NEW_BBLOCK (cfg, (tblock)); \
293 (tblock)->cil_code = (ip); \
294 ADD_BBLOCK (cfg, (tblock)); \
298 #if defined(__i386__) || defined(__x86_64__)
299 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
300 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
301 (dest)->dreg = alloc_preg ((cfg)); \
302 (dest)->sreg1 = (sr1); \
303 (dest)->sreg2 = (sr2); \
304 (dest)->inst_imm = (imm); \
305 (dest)->backend.shift_amount = (shift); \
306 MONO_ADD_INS ((cfg)->cbb, (dest)); \
310 #if SIZEOF_REGISTER == 8
311 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
312 /* FIXME: Need to add many more cases */ \
313 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
315 int dr = alloc_preg (cfg); \
316 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
317 (ins)->sreg2 = widen->dreg; \
321 #define ADD_WIDEN_OP(ins, arg1, arg2)
324 #define ADD_BINOP(op) do { \
325 MONO_INST_NEW (cfg, ins, (op)); \
327 ins->sreg1 = sp [0]->dreg; \
328 ins->sreg2 = sp [1]->dreg; \
329 type_from_op (ins, sp [0], sp [1]); \
331 /* Have to insert a widening op */ \
332 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
333 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
334 MONO_ADD_INS ((cfg)->cbb, (ins)); \
335 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
338 #define ADD_UNOP(op) do { \
339 MONO_INST_NEW (cfg, ins, (op)); \
341 ins->sreg1 = sp [0]->dreg; \
342 type_from_op (ins, sp [0], NULL); \
344 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
345 MONO_ADD_INS ((cfg)->cbb, (ins)); \
346 *sp++ = mono_decompose_opcode (cfg, ins); \
349 #define ADD_BINCOND(next_block) do { \
352 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
353 cmp->sreg1 = sp [0]->dreg; \
354 cmp->sreg2 = sp [1]->dreg; \
355 type_from_op (cmp, sp [0], sp [1]); \
357 type_from_op (ins, sp [0], sp [1]); \
358 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
359 GET_BBLOCK (cfg, tblock, target); \
360 link_bblock (cfg, bblock, tblock); \
361 ins->inst_true_bb = tblock; \
362 if ((next_block)) { \
363 link_bblock (cfg, bblock, (next_block)); \
364 ins->inst_false_bb = (next_block); \
365 start_new_bblock = 1; \
367 GET_BBLOCK (cfg, tblock, ip); \
368 link_bblock (cfg, bblock, tblock); \
369 ins->inst_false_bb = tblock; \
370 start_new_bblock = 2; \
372 if (sp != stack_start) { \
373 handle_stack_args (cfg, stack_start, sp - stack_start); \
374 CHECK_UNVERIFIABLE (cfg); \
376 MONO_ADD_INS (bblock, cmp); \
377 MONO_ADD_INS (bblock, ins); \
381 * link_bblock: Links two basic blocks
383 * links two basic blocks in the control flow graph, the 'from'
384 * argument is the starting block and the 'to' argument is the block
385 * the control flow ends to after 'from'.
388 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
390 MonoBasicBlock **newa;
394 if (from->cil_code) {
396 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
398 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
401 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
403 printf ("edge from entry to exit\n");
408 for (i = 0; i < from->out_count; ++i) {
409 if (to == from->out_bb [i]) {
415 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
416 for (i = 0; i < from->out_count; ++i) {
417 newa [i] = from->out_bb [i];
425 for (i = 0; i < to->in_count; ++i) {
426 if (from == to->in_bb [i]) {
432 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
433 for (i = 0; i < to->in_count; ++i) {
434 newa [i] = to->in_bb [i];
443 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
445 link_bblock (cfg, from, to);
449 * mono_find_block_region:
451 * We mark each basic block with a region ID. We use that to avoid BB
452 * optimizations when blocks are in different regions.
455 * A region token that encodes where this region is, and information
456 * about the clause owner for this block.
458 * The region encodes the try/catch/filter clause that owns this block
459 * as well as the type. -1 is a special value that represents a block
460 * that is in none of try/catch/filter.
463 mono_find_block_region (MonoCompile *cfg, int offset)
465 MonoMethod *method = cfg->method;
466 MonoMethodHeader *header = mono_method_get_header (method);
467 MonoExceptionClause *clause;
470 for (i = 0; i < header->num_clauses; ++i) {
471 clause = &header->clauses [i];
472 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
473 (offset < (clause->handler_offset)))
474 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
476 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
477 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
478 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
479 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
480 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
482 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
485 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
486 return ((i + 1) << 8) | clause->flags;
493 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
495 MonoMethod *method = cfg->method;
496 MonoMethodHeader *header = mono_method_get_header (method);
497 MonoExceptionClause *clause;
498 MonoBasicBlock *handler;
502 for (i = 0; i < header->num_clauses; ++i) {
503 clause = &header->clauses [i];
504 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
505 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
506 if (clause->flags == type) {
507 handler = cfg->cil_offset_to_bb [clause->handler_offset];
509 res = g_list_append (res, handler);
517 mono_create_spvar_for_region (MonoCompile *cfg, int region)
521 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
525 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
526 /* prevent it from being register allocated */
527 var->flags |= MONO_INST_INDIRECT;
529 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
533 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
535 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
539 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
543 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
547 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
548 /* prevent it from being register allocated */
549 var->flags |= MONO_INST_INDIRECT;
551 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
557 * Returns the type used in the eval stack when @type is loaded.
558 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
561 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
565 inst->klass = klass = mono_class_from_mono_type (type);
567 inst->type = STACK_MP;
572 switch (type->type) {
574 inst->type = STACK_INV;
578 case MONO_TYPE_BOOLEAN:
584 inst->type = STACK_I4;
589 case MONO_TYPE_FNPTR:
590 inst->type = STACK_PTR;
592 case MONO_TYPE_CLASS:
593 case MONO_TYPE_STRING:
594 case MONO_TYPE_OBJECT:
595 case MONO_TYPE_SZARRAY:
596 case MONO_TYPE_ARRAY:
597 inst->type = STACK_OBJ;
601 inst->type = STACK_I8;
605 inst->type = STACK_R8;
607 case MONO_TYPE_VALUETYPE:
608 if (type->data.klass->enumtype) {
609 type = mono_class_enum_basetype (type->data.klass);
613 inst->type = STACK_VTYPE;
616 case MONO_TYPE_TYPEDBYREF:
617 inst->klass = mono_defaults.typed_reference_class;
618 inst->type = STACK_VTYPE;
620 case MONO_TYPE_GENERICINST:
621 type = &type->data.generic_class->container_class->byval_arg;
624 case MONO_TYPE_MVAR :
625 /* FIXME: all the arguments must be references for now,
626 * later look inside cfg and see if the arg num is
629 g_assert (cfg->generic_sharing_context);
630 inst->type = STACK_OBJ;
633 g_error ("unknown type 0x%02x in eval stack type", type->type);
638 * The following tables are used to quickly validate the IL code in type_from_op ().
641 bin_num_table [STACK_MAX] [STACK_MAX] = {
642 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
643 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
644 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
645 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
646 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
647 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
648 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
649 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
654 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
657 /* reduce the size of this table */
659 bin_int_table [STACK_MAX] [STACK_MAX] = {
660 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
661 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
662 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
663 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
664 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
665 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
666 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
667 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
671 bin_comp_table [STACK_MAX] [STACK_MAX] = {
672 /* Inv i L p F & O vt */
674 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
675 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
676 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
677 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
678 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
679 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
680 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
683 /* reduce the size of this table */
685 shift_table [STACK_MAX] [STACK_MAX] = {
686 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
687 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
688 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
689 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
690 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
691 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
692 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
693 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
697 * Tables to map from the non-specific opcode to the matching
698 * type-specific opcode.
700 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
702 binops_op_map [STACK_MAX] = {
703 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
706 /* handles from CEE_NEG to CEE_CONV_U8 */
708 unops_op_map [STACK_MAX] = {
709 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
712 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
714 ovfops_op_map [STACK_MAX] = {
715 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
718 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
720 ovf2ops_op_map [STACK_MAX] = {
721 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
724 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
726 ovf3ops_op_map [STACK_MAX] = {
727 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
730 /* handles from CEE_BEQ to CEE_BLT_UN */
732 beqops_op_map [STACK_MAX] = {
733 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
736 /* handles from CEE_CEQ to CEE_CLT_UN */
738 ceqops_op_map [STACK_MAX] = {
739 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
743 * Sets ins->type (the type on the eval stack) according to the
744 * type of the opcode and the arguments to it.
745 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
747 * FIXME: this function sets ins->type unconditionally in some cases, but
748 * it should set it to invalid for some types (a conv.x on an object)
751 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
753 switch (ins->opcode) {
760 /* FIXME: check unverifiable args for STACK_MP */
761 ins->type = bin_num_table [src1->type] [src2->type];
762 ins->opcode += binops_op_map [ins->type];
769 ins->type = bin_int_table [src1->type] [src2->type];
770 ins->opcode += binops_op_map [ins->type];
775 ins->type = shift_table [src1->type] [src2->type];
776 ins->opcode += binops_op_map [ins->type];
781 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
782 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
783 ins->opcode = OP_LCOMPARE;
784 else if (src1->type == STACK_R8)
785 ins->opcode = OP_FCOMPARE;
787 ins->opcode = OP_ICOMPARE;
789 case OP_ICOMPARE_IMM:
790 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
791 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
792 ins->opcode = OP_LCOMPARE_IMM;
804 ins->opcode += beqops_op_map [src1->type];
807 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
808 ins->opcode += ceqops_op_map [src1->type];
814 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
815 ins->opcode += ceqops_op_map [src1->type];
819 ins->type = neg_table [src1->type];
820 ins->opcode += unops_op_map [ins->type];
823 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
824 ins->type = src1->type;
826 ins->type = STACK_INV;
827 ins->opcode += unops_op_map [ins->type];
833 ins->type = STACK_I4;
834 ins->opcode += unops_op_map [src1->type];
837 ins->type = STACK_R8;
838 switch (src1->type) {
841 ins->opcode = OP_ICONV_TO_R_UN;
844 ins->opcode = OP_LCONV_TO_R_UN;
848 case CEE_CONV_OVF_I1:
849 case CEE_CONV_OVF_U1:
850 case CEE_CONV_OVF_I2:
851 case CEE_CONV_OVF_U2:
852 case CEE_CONV_OVF_I4:
853 case CEE_CONV_OVF_U4:
854 ins->type = STACK_I4;
855 ins->opcode += ovf3ops_op_map [src1->type];
857 case CEE_CONV_OVF_I_UN:
858 case CEE_CONV_OVF_U_UN:
859 ins->type = STACK_PTR;
860 ins->opcode += ovf2ops_op_map [src1->type];
862 case CEE_CONV_OVF_I1_UN:
863 case CEE_CONV_OVF_I2_UN:
864 case CEE_CONV_OVF_I4_UN:
865 case CEE_CONV_OVF_U1_UN:
866 case CEE_CONV_OVF_U2_UN:
867 case CEE_CONV_OVF_U4_UN:
868 ins->type = STACK_I4;
869 ins->opcode += ovf2ops_op_map [src1->type];
872 ins->type = STACK_PTR;
873 switch (src1->type) {
875 ins->opcode = OP_ICONV_TO_U;
879 #if SIZEOF_REGISTER == 8
880 ins->opcode = OP_LCONV_TO_U;
882 ins->opcode = OP_MOVE;
886 ins->opcode = OP_LCONV_TO_U;
889 ins->opcode = OP_FCONV_TO_U;
895 ins->type = STACK_I8;
896 ins->opcode += unops_op_map [src1->type];
898 case CEE_CONV_OVF_I8:
899 case CEE_CONV_OVF_U8:
900 ins->type = STACK_I8;
901 ins->opcode += ovf3ops_op_map [src1->type];
903 case CEE_CONV_OVF_U8_UN:
904 case CEE_CONV_OVF_I8_UN:
905 ins->type = STACK_I8;
906 ins->opcode += ovf2ops_op_map [src1->type];
910 ins->type = STACK_R8;
911 ins->opcode += unops_op_map [src1->type];
914 ins->type = STACK_R8;
918 ins->type = STACK_I4;
919 ins->opcode += ovfops_op_map [src1->type];
924 ins->type = STACK_PTR;
925 ins->opcode += ovfops_op_map [src1->type];
933 ins->type = bin_num_table [src1->type] [src2->type];
934 ins->opcode += ovfops_op_map [src1->type];
935 if (ins->type == STACK_R8)
936 ins->type = STACK_INV;
938 case OP_LOAD_MEMBASE:
939 ins->type = STACK_PTR;
941 case OP_LOADI1_MEMBASE:
942 case OP_LOADU1_MEMBASE:
943 case OP_LOADI2_MEMBASE:
944 case OP_LOADU2_MEMBASE:
945 case OP_LOADI4_MEMBASE:
946 case OP_LOADU4_MEMBASE:
947 ins->type = STACK_PTR;
949 case OP_LOADI8_MEMBASE:
950 ins->type = STACK_I8;
952 case OP_LOADR4_MEMBASE:
953 case OP_LOADR8_MEMBASE:
954 ins->type = STACK_R8;
957 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
961 if (ins->type == STACK_MP)
962 ins->klass = mono_defaults.object_class;
967 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
973 param_table [STACK_MAX] [STACK_MAX] = {
978 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
982 switch (args->type) {
992 for (i = 0; i < sig->param_count; ++i) {
993 switch (args [i].type) {
997 if (!sig->params [i]->byref)
1001 if (sig->params [i]->byref)
1003 switch (sig->params [i]->type) {
1004 case MONO_TYPE_CLASS:
1005 case MONO_TYPE_STRING:
1006 case MONO_TYPE_OBJECT:
1007 case MONO_TYPE_SZARRAY:
1008 case MONO_TYPE_ARRAY:
1015 if (sig->params [i]->byref)
1017 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1026 /*if (!param_table [args [i].type] [sig->params [i]->type])
1034 * When we need a pointer to the current domain many times in a method, we
1035 * call mono_domain_get() once and we store the result in a local variable.
1036 * This function returns the variable that represents the MonoDomain*.
1038 inline static MonoInst *
1039 mono_get_domainvar (MonoCompile *cfg)
1041 if (!cfg->domainvar)
1042 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1043 return cfg->domainvar;
1047 * The got_var contains the address of the Global Offset Table when AOT
1050 inline static MonoInst *
1051 mono_get_got_var (MonoCompile *cfg)
1053 #ifdef MONO_ARCH_NEED_GOT_VAR
1054 if (!cfg->compile_aot)
1056 if (!cfg->got_var) {
1057 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1059 return cfg->got_var;
1066 mono_get_vtable_var (MonoCompile *cfg)
1068 g_assert (cfg->generic_sharing_context);
1070 if (!cfg->rgctx_var) {
1071 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1072 /* force the var to be stack allocated */
1073 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1076 return cfg->rgctx_var;
1080 type_from_stack_type (MonoInst *ins) {
1081 switch (ins->type) {
1082 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1083 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1084 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1085 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1087 return &ins->klass->this_arg;
1088 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1089 case STACK_VTYPE: return &ins->klass->byval_arg;
1091 g_error ("stack type %d to monotype not handled\n", ins->type);
1096 static G_GNUC_UNUSED int
1097 type_to_stack_type (MonoType *t)
1099 switch (mono_type_get_underlying_type (t)->type) {
1102 case MONO_TYPE_BOOLEAN:
1105 case MONO_TYPE_CHAR:
1112 case MONO_TYPE_FNPTR:
1114 case MONO_TYPE_CLASS:
1115 case MONO_TYPE_STRING:
1116 case MONO_TYPE_OBJECT:
1117 case MONO_TYPE_SZARRAY:
1118 case MONO_TYPE_ARRAY:
1126 case MONO_TYPE_VALUETYPE:
1127 case MONO_TYPE_TYPEDBYREF:
1129 case MONO_TYPE_GENERICINST:
1130 if (mono_type_generic_inst_is_valuetype (t))
1136 g_assert_not_reached ();
1143 array_access_to_klass (int opcode)
1147 return mono_defaults.byte_class;
1149 return mono_defaults.uint16_class;
1152 return mono_defaults.int_class;
1155 return mono_defaults.sbyte_class;
1158 return mono_defaults.int16_class;
1161 return mono_defaults.int32_class;
1163 return mono_defaults.uint32_class;
1166 return mono_defaults.int64_class;
1169 return mono_defaults.single_class;
1172 return mono_defaults.double_class;
1173 case CEE_LDELEM_REF:
1174 case CEE_STELEM_REF:
1175 return mono_defaults.object_class;
1177 g_assert_not_reached ();
1183 * We try to share variables when possible
1186 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1191 /* inlining can result in deeper stacks */
1192 if (slot >= mono_method_get_header (cfg->method)->max_stack)
1193 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1195 pos = ins->type - 1 + slot * STACK_MAX;
1197 switch (ins->type) {
1204 if ((vnum = cfg->intvars [pos]))
1205 return cfg->varinfo [vnum];
1206 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1207 cfg->intvars [pos] = res->inst_c0;
1210 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1216 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1219 * Don't use this if a generic_context is set, since that means AOT can't
1220 * look up the method using just the image+token.
1221 * table == 0 means this is a reference made from a wrapper.
1223 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1224 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1225 jump_info_token->image = image;
1226 jump_info_token->token = token;
1227 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1232 * This function is called to handle items that are left on the evaluation stack
1233 * at basic block boundaries. What happens is that we save the values to local variables
1234 * and we reload them later when first entering the target basic block (with the
1235 * handle_loaded_temps () function).
1236 * A single joint point will use the same variables (stored in the array bb->out_stack or
1237 * bb->in_stack, if the basic block is before or after the joint point).
1239 * This function needs to be called _before_ emitting the last instruction of
1240 * the bb (i.e. before emitting a branch).
1241 * If the stack merge fails at a join point, cfg->unverifiable is set.
1244 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1247 MonoBasicBlock *bb = cfg->cbb;
1248 MonoBasicBlock *outb;
1249 MonoInst *inst, **locals;
1254 if (cfg->verbose_level > 3)
1255 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1256 if (!bb->out_scount) {
1257 bb->out_scount = count;
1258 //printf ("bblock %d has out:", bb->block_num);
1260 for (i = 0; i < bb->out_count; ++i) {
1261 outb = bb->out_bb [i];
1262 /* exception handlers are linked, but they should not be considered for stack args */
1263 if (outb->flags & BB_EXCEPTION_HANDLER)
1265 //printf (" %d", outb->block_num);
1266 if (outb->in_stack) {
1268 bb->out_stack = outb->in_stack;
1274 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1275 for (i = 0; i < count; ++i) {
1277 * try to reuse temps already allocated for this purpouse, if they occupy the same
1278 * stack slot and if they are of the same type.
1279 * This won't cause conflicts since if 'local' is used to
1280 * store one of the values in the in_stack of a bblock, then
1281 * the same variable will be used for the same outgoing stack
1283 * This doesn't work when inlining methods, since the bblocks
1284 * in the inlined methods do not inherit their in_stack from
1285 * the bblock they are inlined to. See bug #58863 for an
1288 if (cfg->inlined_method)
1289 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1291 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1296 for (i = 0; i < bb->out_count; ++i) {
1297 outb = bb->out_bb [i];
1298 /* exception handlers are linked, but they should not be considered for stack args */
1299 if (outb->flags & BB_EXCEPTION_HANDLER)
1301 if (outb->in_scount) {
1302 if (outb->in_scount != bb->out_scount) {
1303 cfg->unverifiable = TRUE;
1306 continue; /* check they are the same locals */
1308 outb->in_scount = count;
1309 outb->in_stack = bb->out_stack;
1312 locals = bb->out_stack;
1314 for (i = 0; i < count; ++i) {
1315 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1316 inst->cil_code = sp [i]->cil_code;
1317 sp [i] = locals [i];
1318 if (cfg->verbose_level > 3)
1319 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1323 * It is possible that the out bblocks already have in_stack assigned, and
1324 * the in_stacks differ. In this case, we will store to all the different
1331 /* Find a bblock which has a different in_stack */
1333 while (bindex < bb->out_count) {
1334 outb = bb->out_bb [bindex];
1335 /* exception handlers are linked, but they should not be considered for stack args */
1336 if (outb->flags & BB_EXCEPTION_HANDLER) {
1340 if (outb->in_stack != locals) {
1341 for (i = 0; i < count; ++i) {
1342 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1343 inst->cil_code = sp [i]->cil_code;
1344 sp [i] = locals [i];
1345 if (cfg->verbose_level > 3)
1346 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1348 locals = outb->in_stack;
1357 /* Emit code which loads interface_offsets [klass->interface_id]
1358 * The array is stored in memory before vtable.
1361 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1363 if (cfg->compile_aot) {
1364 int ioffset_reg = alloc_preg (cfg);
1365 int iid_reg = alloc_preg (cfg);
1367 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1368 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1369 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1372 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1377 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1378 * stored in "klass_reg" implements the interface "klass".
1381 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1383 int ibitmap_reg = alloc_preg (cfg);
1384 int ibitmap_byte_reg = alloc_preg (cfg);
1386 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap));
1388 if (cfg->compile_aot) {
1389 int iid_reg = alloc_preg (cfg);
1390 int shifted_iid_reg = alloc_preg (cfg);
1391 int ibitmap_byte_address_reg = alloc_preg (cfg);
1392 int masked_iid_reg = alloc_preg (cfg);
1393 int iid_one_bit_reg = alloc_preg (cfg);
1394 int iid_bit_reg = alloc_preg (cfg);
1395 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1396 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1397 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1398 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1399 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1400 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1401 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1402 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1404 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1405 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1410 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1411 * stored in "vtable_reg" implements the interface "klass".
1414 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1416 int ibitmap_reg = alloc_preg (cfg);
1417 int ibitmap_byte_reg = alloc_preg (cfg);
1419 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap));
1421 if (cfg->compile_aot) {
1422 int iid_reg = alloc_preg (cfg);
1423 int shifted_iid_reg = alloc_preg (cfg);
1424 int ibitmap_byte_address_reg = alloc_preg (cfg);
1425 int masked_iid_reg = alloc_preg (cfg);
1426 int iid_one_bit_reg = alloc_preg (cfg);
1427 int iid_bit_reg = alloc_preg (cfg);
1428 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1429 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, shifted_iid_reg, iid_reg, 3);
1430 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1431 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1432 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, masked_iid_reg, iid_reg, 7);
1433 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1434 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1435 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1437 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1438 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1443 * Emit code which checks whenever the interface id of @klass is smaller than
1444 * than the value given by max_iid_reg.
1447 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1448 MonoBasicBlock *false_target)
1450 if (cfg->compile_aot) {
1451 int iid_reg = alloc_preg (cfg);
1452 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1453 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1456 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1458 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1460 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1463 /* Same as above, but obtains max_iid from a vtable */
1465 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1466 MonoBasicBlock *false_target)
1468 int max_iid_reg = alloc_preg (cfg);
1470 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1471 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1474 /* Same as above, but obtains max_iid from a klass */
1476 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1477 MonoBasicBlock *false_target)
1479 int max_iid_reg = alloc_preg (cfg);
1481 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1482 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1486 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1488 int idepth_reg = alloc_preg (cfg);
1489 int stypes_reg = alloc_preg (cfg);
1490 int stype = alloc_preg (cfg);
1492 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1493 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1494 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1495 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1497 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1498 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1499 if (cfg->compile_aot) {
1500 int const_reg = alloc_preg (cfg);
1501 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1502 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1504 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1506 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1510 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1512 int intf_reg = alloc_preg (cfg);
1514 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1515 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1516 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1518 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1520 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1524 * Variant of the above that takes a register to the class, not the vtable.
1527 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1529 int intf_bit_reg = alloc_preg (cfg);
1531 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1532 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1533 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1535 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1537 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1541 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1543 if (cfg->compile_aot) {
1544 int const_reg = alloc_preg (cfg);
1545 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1546 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1548 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1550 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1554 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1556 if (cfg->compile_aot) {
1557 int const_reg = alloc_preg (cfg);
1558 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1559 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1561 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1563 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1567 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1570 int rank_reg = alloc_preg (cfg);
1571 int eclass_reg = alloc_preg (cfg);
1573 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1574 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1575 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1576 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1577 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1578 if (klass->cast_class == mono_defaults.object_class) {
1579 int parent_reg = alloc_preg (cfg);
1580 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1581 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1582 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1583 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1584 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1585 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1586 } else if (klass->cast_class == mono_defaults.enum_class) {
1587 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1588 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1589 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1591 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1592 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1595 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1596 /* Check that the object is a vector too */
1597 int bounds_reg = alloc_preg (cfg);
1598 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1599 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1600 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1603 int idepth_reg = alloc_preg (cfg);
1604 int stypes_reg = alloc_preg (cfg);
1605 int stype = alloc_preg (cfg);
1607 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1608 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1609 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1610 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1612 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1613 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1614 mini_emit_class_check (cfg, stype, klass);
1619 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1623 g_assert (val == 0);
1628 if ((size <= 4) && (size <= align)) {
1631 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1634 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1637 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1639 #if SIZEOF_REGISTER == 8
1641 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1647 val_reg = alloc_preg (cfg);
1649 if (SIZEOF_REGISTER == 8)
1650 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1652 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1655 /* This could be optimized further if neccesary */
1657 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1664 #if !NO_UNALIGNED_ACCESS
1665 if (SIZEOF_REGISTER == 8) {
1667 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1672 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1680 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1685 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1690 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1696 #endif /* DISABLE_JIT */
1699 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1707 /* This could be optimized further if neccesary */
1709 cur_reg = alloc_preg (cfg);
1710 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1711 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1718 #if !NO_UNALIGNED_ACCESS
1719 if (SIZEOF_REGISTER == 8) {
1721 cur_reg = alloc_preg (cfg);
1722 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1723 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1732 cur_reg = alloc_preg (cfg);
1733 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1734 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1740 cur_reg = alloc_preg (cfg);
1741 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1742 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1748 cur_reg = alloc_preg (cfg);
1749 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1750 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1760 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1763 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1766 type = mini_get_basic_type_from_generic (gsctx, type);
1767 switch (type->type) {
1768 case MONO_TYPE_VOID:
1769 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1772 case MONO_TYPE_BOOLEAN:
1775 case MONO_TYPE_CHAR:
1778 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1782 case MONO_TYPE_FNPTR:
1783 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1784 case MONO_TYPE_CLASS:
1785 case MONO_TYPE_STRING:
1786 case MONO_TYPE_OBJECT:
1787 case MONO_TYPE_SZARRAY:
1788 case MONO_TYPE_ARRAY:
1789 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1792 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1795 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1796 case MONO_TYPE_VALUETYPE:
1797 if (type->data.klass->enumtype) {
1798 type = mono_class_enum_basetype (type->data.klass);
1801 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1802 case MONO_TYPE_TYPEDBYREF:
1803 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1804 case MONO_TYPE_GENERICINST:
1805 type = &type->data.generic_class->container_class->byval_arg;
1808 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1814 * target_type_is_incompatible:
1815 * @cfg: MonoCompile context
1817 * Check that the item @arg on the evaluation stack can be stored
1818 * in the target type (can be a local, or field, etc).
1819 * The cfg arg can be used to check if we need verification or just
1822 * Returns: non-0 value if arg can't be stored on a target.
1825 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1827 MonoType *simple_type;
1830 if (target->byref) {
1831 /* FIXME: check that the pointed to types match */
1832 if (arg->type == STACK_MP)
1833 return arg->klass != mono_class_from_mono_type (target);
1834 if (arg->type == STACK_PTR)
1839 simple_type = mono_type_get_underlying_type (target);
1840 switch (simple_type->type) {
1841 case MONO_TYPE_VOID:
1845 case MONO_TYPE_BOOLEAN:
1848 case MONO_TYPE_CHAR:
1851 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1855 /* STACK_MP is needed when setting pinned locals */
1856 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1861 case MONO_TYPE_FNPTR:
1862 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1865 case MONO_TYPE_CLASS:
1866 case MONO_TYPE_STRING:
1867 case MONO_TYPE_OBJECT:
1868 case MONO_TYPE_SZARRAY:
1869 case MONO_TYPE_ARRAY:
1870 if (arg->type != STACK_OBJ)
1872 /* FIXME: check type compatibility */
1876 if (arg->type != STACK_I8)
1881 if (arg->type != STACK_R8)
1884 case MONO_TYPE_VALUETYPE:
1885 if (arg->type != STACK_VTYPE)
1887 klass = mono_class_from_mono_type (simple_type);
1888 if (klass != arg->klass)
1891 case MONO_TYPE_TYPEDBYREF:
1892 if (arg->type != STACK_VTYPE)
1894 klass = mono_class_from_mono_type (simple_type);
1895 if (klass != arg->klass)
1898 case MONO_TYPE_GENERICINST:
1899 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1900 if (arg->type != STACK_VTYPE)
1902 klass = mono_class_from_mono_type (simple_type);
1903 if (klass != arg->klass)
1907 if (arg->type != STACK_OBJ)
1909 /* FIXME: check type compatibility */
1913 case MONO_TYPE_MVAR:
1914 /* FIXME: all the arguments must be references for now,
1915 * later look inside cfg and see if the arg num is
1916 * really a reference
1918 g_assert (cfg->generic_sharing_context);
1919 if (arg->type != STACK_OBJ)
1923 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1929 * Prepare arguments for passing to a function call.
1930 * Return a non-zero value if the arguments can't be passed to the given
1932 * The type checks are not yet complete and some conversions may need
1933 * casts on 32 or 64 bit architectures.
1935 * FIXME: implement this using target_type_is_incompatible ()
1938 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1940 MonoType *simple_type;
1944 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1948 for (i = 0; i < sig->param_count; ++i) {
1949 if (sig->params [i]->byref) {
1950 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1954 simple_type = sig->params [i];
1955 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1957 switch (simple_type->type) {
1958 case MONO_TYPE_VOID:
1963 case MONO_TYPE_BOOLEAN:
1966 case MONO_TYPE_CHAR:
1969 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
1975 case MONO_TYPE_FNPTR:
1976 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
1979 case MONO_TYPE_CLASS:
1980 case MONO_TYPE_STRING:
1981 case MONO_TYPE_OBJECT:
1982 case MONO_TYPE_SZARRAY:
1983 case MONO_TYPE_ARRAY:
1984 if (args [i]->type != STACK_OBJ)
1989 if (args [i]->type != STACK_I8)
1994 if (args [i]->type != STACK_R8)
1997 case MONO_TYPE_VALUETYPE:
1998 if (simple_type->data.klass->enumtype) {
1999 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2002 if (args [i]->type != STACK_VTYPE)
2005 case MONO_TYPE_TYPEDBYREF:
2006 if (args [i]->type != STACK_VTYPE)
2009 case MONO_TYPE_GENERICINST:
2010 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2014 g_error ("unknown type 0x%02x in check_call_signature",
2022 callvirt_to_call (int opcode)
2027 case OP_VOIDCALLVIRT:
2036 g_assert_not_reached ();
2043 callvirt_to_call_membase (int opcode)
2047 return OP_CALL_MEMBASE;
2048 case OP_VOIDCALLVIRT:
2049 return OP_VOIDCALL_MEMBASE;
2051 return OP_FCALL_MEMBASE;
2053 return OP_LCALL_MEMBASE;
2055 return OP_VCALL_MEMBASE;
2057 g_assert_not_reached ();
2063 #ifdef MONO_ARCH_HAVE_IMT
2065 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2067 #ifdef MONO_ARCH_IMT_REG
2068 int method_reg = alloc_preg (cfg);
2071 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2072 } else if (cfg->compile_aot) {
2073 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2076 MONO_INST_NEW (cfg, ins, OP_PCONST);
2077 ins->inst_p0 = call->method;
2078 ins->dreg = method_reg;
2079 MONO_ADD_INS (cfg->cbb, ins);
2082 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2084 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2089 static MonoJumpInfo *
2090 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2092 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2096 ji->data.target = target;
2101 inline static MonoInst*
2102 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args);
2104 inline static MonoCallInst *
2105 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2106 MonoInst **args, int calli, int virtual)
2109 #ifdef MONO_ARCH_SOFT_FLOAT
2113 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2116 call->signature = sig;
2118 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2120 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2121 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2124 temp->backend.is_pinvoke = sig->pinvoke;
2127 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2128 * address of return value to increase optimization opportunities.
2129 * Before vtype decomposition, the dreg of the call ins itself represents the
2130 * fact the call modifies the return value. After decomposition, the call will
2131 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2132 * will be transformed into an LDADDR.
2134 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2135 loada->dreg = alloc_preg (cfg);
2136 loada->inst_p0 = temp;
2137 /* We reference the call too since call->dreg could change during optimization */
2138 loada->inst_p1 = call;
2139 MONO_ADD_INS (cfg->cbb, loada);
2141 call->inst.dreg = temp->dreg;
2143 call->vret_var = loada;
2144 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2145 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2147 #ifdef MONO_ARCH_SOFT_FLOAT
2149 * If the call has a float argument, we would need to do an r8->r4 conversion using
2150 * an icall, but that cannot be done during the call sequence since it would clobber
2151 * the call registers + the stack. So we do it before emitting the call.
2153 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2155 MonoInst *in = call->args [i];
2157 if (i >= sig->hasthis)
2158 t = sig->params [i - sig->hasthis];
2160 t = &mono_defaults.int_class->byval_arg;
2161 t = mono_type_get_underlying_type (t);
2163 if (!t->byref && t->type == MONO_TYPE_R4) {
2164 MonoInst *iargs [1];
2168 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2170 /* The result will be in an int vreg */
2171 call->args [i] = conv;
2176 mono_arch_emit_call (cfg, call);
2178 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2179 cfg->flags |= MONO_CFG_HAS_CALLS;
2184 inline static MonoInst*
2185 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2187 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE);
2189 call->inst.sreg1 = addr->dreg;
2191 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2193 return (MonoInst*)call;
2196 inline static MonoInst*
2197 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2199 #ifdef MONO_ARCH_RGCTX_REG
2204 rgctx_reg = mono_alloc_preg (cfg);
2205 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2207 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2209 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2210 cfg->uses_rgctx_reg = TRUE;
2211 call->rgctx_reg = TRUE;
2213 return (MonoInst*)call;
2215 g_assert_not_reached ();
2221 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2222 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2224 gboolean virtual = this != NULL;
2225 gboolean enable_for_aot = TRUE;
2228 if (method->string_ctor) {
2229 /* Create the real signature */
2230 /* FIXME: Cache these */
2231 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2232 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2237 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual);
2239 if (this && sig->hasthis &&
2240 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2241 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this)) {
2242 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2244 call->method = method;
2246 call->inst.flags |= MONO_INST_HAS_METHOD;
2247 call->inst.inst_left = this;
2250 int vtable_reg, slot_reg, this_reg;
2252 this_reg = this->dreg;
2254 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2255 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2256 /* Make a call to delegate->invoke_impl */
2257 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2258 call->inst.inst_basereg = this_reg;
2259 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2260 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2262 return (MonoInst*)call;
2266 if ((!cfg->compile_aot || enable_for_aot) &&
2267 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2268 (MONO_METHOD_IS_FINAL (method) &&
2269 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK))) {
2271 * the method is not virtual, we just need to ensure this is not null
2272 * and then we can call the method directly.
2274 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2275 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2278 if (!method->string_ctor) {
2279 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2280 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2281 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2284 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2286 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2288 return (MonoInst*)call;
2291 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2293 * the method is virtual, but we can statically dispatch since either
2294 * it's class or the method itself are sealed.
2295 * But first we need to ensure it's not a null reference.
2297 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2298 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2299 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2301 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2302 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2304 return (MonoInst*)call;
2307 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2309 vtable_reg = alloc_preg (cfg);
2310 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2311 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2313 #ifdef MONO_ARCH_HAVE_IMT
2315 guint32 imt_slot = mono_method_get_imt_slot (method);
2316 emit_imt_argument (cfg, call, imt_arg);
2317 slot_reg = vtable_reg;
2318 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2321 if (slot_reg == -1) {
2322 slot_reg = alloc_preg (cfg);
2323 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2324 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2327 slot_reg = vtable_reg;
2328 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2329 (mono_method_get_vtable_index (method) * SIZEOF_VOID_P);
2330 #ifdef MONO_ARCH_HAVE_IMT
2332 g_assert (mono_method_signature (method)->generic_param_count);
2333 emit_imt_argument (cfg, call, imt_arg);
2338 call->inst.sreg1 = slot_reg;
2339 call->virtual = TRUE;
2342 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2344 return (MonoInst*)call;
2348 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2349 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2356 #ifdef MONO_ARCH_RGCTX_REG
2357 rgctx_reg = mono_alloc_preg (cfg);
2358 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2363 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2365 call = (MonoCallInst*)ins;
2367 #ifdef MONO_ARCH_RGCTX_REG
2368 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2369 cfg->uses_rgctx_reg = TRUE;
2370 call->rgctx_reg = TRUE;
2379 static inline MonoInst*
2380 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2382 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2386 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2393 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE);
2396 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2398 return (MonoInst*)call;
2401 inline static MonoInst*
2402 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2404 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2408 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2412 * mono_emit_abs_call:
2414 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2416 inline static MonoInst*
2417 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2418 MonoMethodSignature *sig, MonoInst **args)
2420 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2424 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2427 if (cfg->abs_patches == NULL)
2428 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2429 g_hash_table_insert (cfg->abs_patches, ji, ji);
2430 ins = mono_emit_native_call (cfg, ji, sig, args);
2431 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2436 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2438 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2439 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2443 * Native code might return non register sized integers
2444 * without initializing the upper bits.
2446 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2447 case OP_LOADI1_MEMBASE:
2448 widen_op = OP_ICONV_TO_I1;
2450 case OP_LOADU1_MEMBASE:
2451 widen_op = OP_ICONV_TO_U1;
2453 case OP_LOADI2_MEMBASE:
2454 widen_op = OP_ICONV_TO_I2;
2456 case OP_LOADU2_MEMBASE:
2457 widen_op = OP_ICONV_TO_U2;
2463 if (widen_op != -1) {
2464 int dreg = alloc_preg (cfg);
2467 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2468 widen->type = ins->type;
2478 get_memcpy_method (void)
2480 static MonoMethod *memcpy_method = NULL;
2481 if (!memcpy_method) {
2482 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2484 g_error ("Old corlib found. Install a new one");
2486 return memcpy_method;
2490 * Emit code to copy a valuetype of type @klass whose address is stored in
2491 * @src->dreg to memory whose address is stored at @dest->dreg.
2494 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2496 MonoInst *iargs [3];
2499 MonoMethod *memcpy_method;
2503 * This check breaks with spilled vars... need to handle it during verification anyway.
2504 * g_assert (klass && klass == src->klass && klass == dest->klass);
2508 n = mono_class_native_size (klass, &align);
2510 n = mono_class_value_size (klass, &align);
2512 #if HAVE_WRITE_BARRIERS
2513 /* if native is true there should be no references in the struct */
2514 if (klass->has_references && !native) {
2515 /* Avoid barriers when storing to the stack */
2516 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2517 (dest->opcode == OP_LDADDR))) {
2520 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2522 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2527 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2528 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2529 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2533 EMIT_NEW_ICONST (cfg, iargs [2], n);
2535 memcpy_method = get_memcpy_method ();
2536 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2541 get_memset_method (void)
2543 static MonoMethod *memset_method = NULL;
2544 if (!memset_method) {
2545 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2547 g_error ("Old corlib found. Install a new one");
2549 return memset_method;
2553 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2555 MonoInst *iargs [3];
2558 MonoMethod *memset_method;
2560 /* FIXME: Optimize this for the case when dest is an LDADDR */
2562 mono_class_init (klass);
2563 n = mono_class_value_size (klass, &align);
2565 if (n <= sizeof (gpointer) * 5) {
2566 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2569 memset_method = get_memset_method ();
2571 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2572 EMIT_NEW_ICONST (cfg, iargs [2], n);
2573 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2578 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2580 MonoInst *this = NULL;
2582 g_assert (cfg->generic_sharing_context);
2584 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2585 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2586 !method->klass->valuetype)
2587 EMIT_NEW_ARGLOAD (cfg, this, 0);
2589 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2590 MonoInst *mrgctx_loc, *mrgctx_var;
2593 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2595 mrgctx_loc = mono_get_vtable_var (cfg);
2596 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2599 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2600 MonoInst *vtable_loc, *vtable_var;
2604 vtable_loc = mono_get_vtable_var (cfg);
2605 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2607 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2608 MonoInst *mrgctx_var = vtable_var;
2611 vtable_reg = alloc_preg (cfg);
2612 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2613 vtable_var->type = STACK_PTR;
2619 int vtable_reg, res_reg;
2621 vtable_reg = alloc_preg (cfg);
2622 res_reg = alloc_preg (cfg);
2623 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2628 static MonoJumpInfoRgctxEntry *
2629 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2631 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2632 res->method = method;
2633 res->in_mrgctx = in_mrgctx;
2634 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2635 res->data->type = patch_type;
2636 res->data->data.target = patch_data;
2637 res->info_type = info_type;
2642 static inline MonoInst*
2643 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2645 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2649 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2650 MonoClass *klass, int rgctx_type)
2652 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2653 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2655 return emit_rgctx_fetch (cfg, rgctx, entry);
2659 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2660 MonoMethod *cmethod, int rgctx_type)
2662 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2663 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2665 return emit_rgctx_fetch (cfg, rgctx, entry);
2669 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2670 MonoClassField *field, int rgctx_type)
2672 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2673 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2675 return emit_rgctx_fetch (cfg, rgctx, entry);
2679 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
2681 int vtable_reg = alloc_preg (cfg);
2682 int context_used = 0;
2684 if (cfg->generic_sharing_context)
2685 context_used = mono_class_check_context_used (array_class);
2687 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2689 if (cfg->opt & MONO_OPT_SHARED) {
2690 int class_reg = alloc_preg (cfg);
2691 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2692 if (cfg->compile_aot) {
2693 int klass_reg = alloc_preg (cfg);
2694 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
2695 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
2697 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
2699 } else if (context_used) {
2700 MonoInst *vtable_ins;
2702 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
2703 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
2705 if (cfg->compile_aot) {
2706 int vt_reg = alloc_preg (cfg);
2707 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, mono_class_vtable (cfg->domain, array_class));
2708 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
2710 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, mono_class_vtable (cfg->domain, array_class));
2714 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
2718 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
2720 if (mini_get_debug_options ()->better_cast_details) {
2721 int to_klass_reg = alloc_preg (cfg);
2722 int vtable_reg = alloc_preg (cfg);
2723 int klass_reg = alloc_preg (cfg);
2724 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2727 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2731 MONO_ADD_INS (cfg->cbb, tls_get);
2732 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2733 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2735 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
2736 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
2737 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
2742 reset_cast_details (MonoCompile *cfg)
2744 /* Reset the variables holding the cast details */
2745 if (mini_get_debug_options ()->better_cast_details) {
2746 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2748 MONO_ADD_INS (cfg->cbb, tls_get);
2749 /* It is enough to reset the from field */
2750 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
2755 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
2756 * generic code is generated.
2759 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
2761 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
2764 MonoInst *rgctx, *addr;
2766 /* FIXME: What if the class is shared? We might not
2767 have to get the address of the method from the
2769 addr = emit_get_rgctx_method (cfg, context_used, method,
2770 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2772 rgctx = emit_get_rgctx (cfg, method, context_used);
2774 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2776 return mono_emit_method_call (cfg, method, &val, NULL);
2781 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
2785 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
2786 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
2787 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
2788 int rank_reg = alloc_dreg (cfg ,STACK_I4);
2790 obj_reg = sp [0]->dreg;
2791 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2792 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2794 /* FIXME: generics */
2795 g_assert (klass->rank == 0);
2798 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
2799 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2801 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2802 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
2805 MonoInst *element_class;
2807 /* This assertion is from the unboxcast insn */
2808 g_assert (klass->rank == 0);
2810 element_class = emit_get_rgctx_klass (cfg, context_used,
2811 klass->element_class, MONO_RGCTX_INFO_KLASS);
2813 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
2814 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2816 save_cast_details (cfg, klass->element_class, obj_reg);
2817 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
2818 reset_cast_details (cfg);
2821 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
2822 MONO_ADD_INS (cfg->cbb, add);
2823 add->type = STACK_MP;
2830 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box)
2832 MonoInst *iargs [2];
2835 if (cfg->opt & MONO_OPT_SHARED) {
2836 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2837 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
2839 alloc_ftn = mono_object_new;
2840 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
2841 /* This happens often in argument checking code, eg. throw new FooException... */
2842 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
2843 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
2844 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
2846 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2847 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2850 if (managed_alloc) {
2851 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2852 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2854 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
2856 guint32 lw = vtable->klass->instance_size;
2857 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
2858 EMIT_NEW_ICONST (cfg, iargs [0], lw);
2859 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
2862 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2866 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2870 handle_alloc_from_inst (MonoCompile *cfg, MonoClass *klass, MonoInst *data_inst,
2873 MonoInst *iargs [2];
2874 MonoMethod *managed_alloc = NULL;
2878 FIXME: we cannot get managed_alloc here because we can't get
2879 the class's vtable (because it's not a closed class)
2881 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2882 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2885 if (cfg->opt & MONO_OPT_SHARED) {
2886 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2887 iargs [1] = data_inst;
2888 alloc_ftn = mono_object_new;
2890 if (managed_alloc) {
2891 iargs [0] = data_inst;
2892 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2895 iargs [0] = data_inst;
2896 alloc_ftn = mono_object_new_specific;
2899 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2903 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass)
2905 MonoInst *alloc, *ins;
2907 if (mono_class_is_nullable (klass)) {
2908 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
2909 return mono_emit_method_call (cfg, method, &val, NULL);
2912 alloc = handle_alloc (cfg, klass, TRUE);
2914 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
2920 handle_box_from_inst (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoInst *data_inst)
2922 MonoInst *alloc, *ins;
2924 if (mono_class_is_nullable (klass)) {
2925 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
2926 /* FIXME: What if the class is shared? We might not
2927 have to get the method address from the RGCTX. */
2928 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
2929 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2930 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2932 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2934 alloc = handle_alloc_from_inst (cfg, klass, data_inst, TRUE);
2936 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
2943 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
2945 MonoBasicBlock *is_null_bb;
2946 int obj_reg = src->dreg;
2947 int vtable_reg = alloc_preg (cfg);
2949 NEW_BBLOCK (cfg, is_null_bb);
2951 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
2952 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
2954 save_cast_details (cfg, klass, obj_reg);
2956 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2957 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2958 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
2960 int klass_reg = alloc_preg (cfg);
2962 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2964 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
2965 /* the remoting code is broken, access the class for now */
2967 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
2968 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
2970 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2971 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
2973 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2975 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2976 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, is_null_bb);
2980 MONO_START_BB (cfg, is_null_bb);
2982 reset_cast_details (cfg);
2988 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
2991 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
2992 int obj_reg = src->dreg;
2993 int vtable_reg = alloc_preg (cfg);
2994 int res_reg = alloc_preg (cfg);
2996 NEW_BBLOCK (cfg, is_null_bb);
2997 NEW_BBLOCK (cfg, false_bb);
2998 NEW_BBLOCK (cfg, end_bb);
3000 /* Do the assignment at the beginning, so the other assignment can be if converted */
3001 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3002 ins->type = STACK_OBJ;
3005 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3006 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3008 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3009 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3010 /* the is_null_bb target simply copies the input register to the output */
3011 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3013 int klass_reg = alloc_preg (cfg);
3015 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3018 int rank_reg = alloc_preg (cfg);
3019 int eclass_reg = alloc_preg (cfg);
3021 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3022 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3023 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3024 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3025 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3026 if (klass->cast_class == mono_defaults.object_class) {
3027 int parent_reg = alloc_preg (cfg);
3028 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3029 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3030 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3031 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3032 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3033 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3034 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3035 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3036 } else if (klass->cast_class == mono_defaults.enum_class) {
3037 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3038 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3039 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3040 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3042 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3043 /* Check that the object is a vector too */
3044 int bounds_reg = alloc_preg (cfg);
3045 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3046 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3047 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3050 /* the is_null_bb target simply copies the input register to the output */
3051 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3053 } else if (mono_class_is_nullable (klass)) {
3054 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3055 /* the is_null_bb target simply copies the input register to the output */
3056 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3058 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3059 /* the remoting code is broken, access the class for now */
3061 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3062 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3064 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3065 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3067 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3068 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3070 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3071 /* the is_null_bb target simply copies the input register to the output */
3072 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, is_null_bb);
3077 MONO_START_BB (cfg, false_bb);
3079 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3080 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3082 MONO_START_BB (cfg, is_null_bb);
3084 MONO_START_BB (cfg, end_bb);
3090 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3092 /* This opcode takes as input an object reference and a class, and returns:
3093 0) if the object is an instance of the class,
3094 1) if the object is not instance of the class,
3095 2) if the object is a proxy whose type cannot be determined */
3098 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3099 int obj_reg = src->dreg;
3100 int dreg = alloc_ireg (cfg);
3102 int klass_reg = alloc_preg (cfg);
3104 NEW_BBLOCK (cfg, true_bb);
3105 NEW_BBLOCK (cfg, false_bb);
3106 NEW_BBLOCK (cfg, false2_bb);
3107 NEW_BBLOCK (cfg, end_bb);
3108 NEW_BBLOCK (cfg, no_proxy_bb);
3110 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3111 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3113 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3114 NEW_BBLOCK (cfg, interface_fail_bb);
3116 tmp_reg = alloc_preg (cfg);
3117 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3118 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3119 MONO_START_BB (cfg, interface_fail_bb);
3120 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3122 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3124 tmp_reg = alloc_preg (cfg);
3125 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3126 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3127 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3129 tmp_reg = alloc_preg (cfg);
3130 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3131 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3133 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3134 tmp_reg = alloc_preg (cfg);
3135 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3136 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3138 tmp_reg = alloc_preg (cfg);
3139 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3140 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3141 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3143 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3144 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3146 MONO_START_BB (cfg, no_proxy_bb);
3148 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3151 MONO_START_BB (cfg, false_bb);
3153 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3154 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3156 MONO_START_BB (cfg, false2_bb);
3158 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3159 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3161 MONO_START_BB (cfg, true_bb);
3163 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3165 MONO_START_BB (cfg, end_bb);
3168 MONO_INST_NEW (cfg, ins, OP_ICONST);
3170 ins->type = STACK_I4;
3176 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3178 /* This opcode takes as input an object reference and a class, and returns:
3179 0) if the object is an instance of the class,
3180 1) if the object is a proxy whose type cannot be determined
3181 an InvalidCastException exception is thrown otherwhise*/
3184 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3185 int obj_reg = src->dreg;
3186 int dreg = alloc_ireg (cfg);
3187 int tmp_reg = alloc_preg (cfg);
3188 int klass_reg = alloc_preg (cfg);
3190 NEW_BBLOCK (cfg, end_bb);
3191 NEW_BBLOCK (cfg, ok_result_bb);
3193 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3194 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3196 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3197 NEW_BBLOCK (cfg, interface_fail_bb);
3199 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3200 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3201 MONO_START_BB (cfg, interface_fail_bb);
3202 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3204 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3206 tmp_reg = alloc_preg (cfg);
3207 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3208 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3209 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3211 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3212 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3215 NEW_BBLOCK (cfg, no_proxy_bb);
3217 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3218 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3219 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3221 tmp_reg = alloc_preg (cfg);
3222 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3223 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3225 tmp_reg = alloc_preg (cfg);
3226 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3227 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3228 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3230 NEW_BBLOCK (cfg, fail_1_bb);
3232 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3234 MONO_START_BB (cfg, fail_1_bb);
3236 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3237 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3239 MONO_START_BB (cfg, no_proxy_bb);
3241 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3244 MONO_START_BB (cfg, ok_result_bb);
3246 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3248 MONO_START_BB (cfg, end_bb);
3251 MONO_INST_NEW (cfg, ins, OP_ICONST);
3253 ins->type = STACK_I4;
3258 static G_GNUC_UNUSED MonoInst*
3259 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method)
3261 gpointer *trampoline;
3262 MonoInst *obj, *method_ins, *tramp_ins;
3266 obj = handle_alloc (cfg, klass, FALSE);
3268 /* Inline the contents of mono_delegate_ctor */
3270 /* Set target field */
3271 /* Optimize away setting of NULL target */
3272 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3273 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3275 /* Set method field */
3276 EMIT_NEW_METHODCONST (cfg, method_ins, method);
3277 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3280 * To avoid looking up the compiled code belonging to the target method
3281 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3282 * store it, and we fill it after the method has been compiled.
3284 if (!cfg->compile_aot && !method->dynamic) {
3285 MonoInst *code_slot_ins;
3287 domain = mono_domain_get ();
3288 mono_domain_lock (domain);
3289 if (!domain_jit_info (domain)->method_code_hash)
3290 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3291 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3293 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3294 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3296 mono_domain_unlock (domain);
3298 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3299 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3302 /* Set invoke_impl field */
3303 if (cfg->compile_aot) {
3304 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3306 trampoline = mono_create_delegate_trampoline (klass);
3307 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3309 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3311 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3317 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3319 MonoJitICallInfo *info;
3321 /* Need to register the icall so it gets an icall wrapper */
3322 info = mono_get_array_new_va_icall (rank);
3324 cfg->flags |= MONO_CFG_HAS_VARARGS;
3326 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3327 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3331 mono_emit_load_got_addr (MonoCompile *cfg)
3333 MonoInst *getaddr, *dummy_use;
3335 if (!cfg->got_var || cfg->got_var_allocated)
3338 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3339 getaddr->dreg = cfg->got_var->dreg;
3341 /* Add it to the start of the first bblock */
3342 if (cfg->bb_entry->code) {
3343 getaddr->next = cfg->bb_entry->code;
3344 cfg->bb_entry->code = getaddr;
3347 MONO_ADD_INS (cfg->bb_entry, getaddr);
3349 cfg->got_var_allocated = TRUE;
3352 * Add a dummy use to keep the got_var alive, since real uses might
3353 * only be generated by the back ends.
3354 * Add it to end_bblock, so the variable's lifetime covers the whole
3356 * It would be better to make the usage of the got var explicit in all
3357 * cases when the backend needs it (i.e. calls, throw etc.), so this
3358 * wouldn't be needed.
3360 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3361 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3364 static int inline_limit;
3365 static gboolean inline_limit_inited;
3368 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3370 MonoMethodHeader *header;
3372 #ifdef MONO_ARCH_SOFT_FLOAT
3373 MonoMethodSignature *sig = mono_method_signature (method);
3377 if (cfg->generic_sharing_context)
3380 #ifdef MONO_ARCH_HAVE_LMF_OPS
3381 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3382 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3383 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3387 if (method->is_inflated)
3388 /* Avoid inflating the header */
3389 header = mono_method_get_header (((MonoMethodInflated*)method)->declaring);
3391 header = mono_method_get_header (method);
3393 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME) ||
3394 (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3395 (method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3396 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3397 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) ||
3398 (method->klass->marshalbyref) ||
3399 !header || header->num_clauses)
3402 /* also consider num_locals? */
3403 /* Do the size check early to avoid creating vtables */
3404 if (!inline_limit_inited) {
3405 if (getenv ("MONO_INLINELIMIT"))
3406 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3408 inline_limit = INLINE_LENGTH_LIMIT;
3409 inline_limit_inited = TRUE;
3411 if (header->code_size >= inline_limit)
3415 * if we can initialize the class of the method right away, we do,
3416 * otherwise we don't allow inlining if the class needs initialization,
3417 * since it would mean inserting a call to mono_runtime_class_init()
3418 * inside the inlined code
3420 if (!(cfg->opt & MONO_OPT_SHARED)) {
3421 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3422 if (cfg->run_cctors && method->klass->has_cctor) {
3423 if (!method->klass->runtime_info)
3424 /* No vtable created yet */
3426 vtable = mono_class_vtable (cfg->domain, method->klass);
3429 /* This makes so that inline cannot trigger */
3430 /* .cctors: too many apps depend on them */
3431 /* running with a specific order... */
3432 if (! vtable->initialized)
3434 mono_runtime_class_init (vtable);
3436 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3437 if (!method->klass->runtime_info)
3438 /* No vtable created yet */
3440 vtable = mono_class_vtable (cfg->domain, method->klass);
3443 if (!vtable->initialized)
3448 * If we're compiling for shared code
3449 * the cctor will need to be run at aot method load time, for example,
3450 * or at the end of the compilation of the inlining method.
3452 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3457 * CAS - do not inline methods with declarative security
3458 * Note: this has to be before any possible return TRUE;
3460 if (mono_method_has_declsec (method))
3463 #ifdef MONO_ARCH_SOFT_FLOAT
3465 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3467 for (i = 0; i < sig->param_count; ++i)
3468 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3476 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3478 if (vtable->initialized && !cfg->compile_aot)
3481 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3484 if (!mono_class_needs_cctor_run (vtable->klass, method))
3487 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3488 /* The initialization is already done before the method is called */
3495 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index)
3499 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3501 mono_class_init (klass);
3502 size = mono_class_array_element_size (klass);
3504 mult_reg = alloc_preg (cfg);
3505 array_reg = arr->dreg;
3506 index_reg = index->dreg;
3508 #if SIZEOF_REGISTER == 8
3509 /* The array reg is 64 bits but the index reg is only 32 */
3510 index2_reg = alloc_preg (cfg);
3511 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3513 if (index->type == STACK_I8) {
3514 index2_reg = alloc_preg (cfg);
3515 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
3517 index2_reg = index_reg;
3521 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3523 #if defined(__i386__) || defined(__x86_64__)
3524 if (size == 1 || size == 2 || size == 4 || size == 8) {
3525 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3527 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3528 ins->type = STACK_PTR;
3534 add_reg = alloc_preg (cfg);
3536 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3537 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3538 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3539 ins->type = STACK_PTR;
3540 MONO_ADD_INS (cfg->cbb, ins);
3545 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3547 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3549 int bounds_reg = alloc_preg (cfg);
3550 int add_reg = alloc_preg (cfg);
3551 int mult_reg = alloc_preg (cfg);
3552 int mult2_reg = alloc_preg (cfg);
3553 int low1_reg = alloc_preg (cfg);
3554 int low2_reg = alloc_preg (cfg);
3555 int high1_reg = alloc_preg (cfg);
3556 int high2_reg = alloc_preg (cfg);
3557 int realidx1_reg = alloc_preg (cfg);
3558 int realidx2_reg = alloc_preg (cfg);
3559 int sum_reg = alloc_preg (cfg);
3564 mono_class_init (klass);
3565 size = mono_class_array_element_size (klass);
3567 index1 = index_ins1->dreg;
3568 index2 = index_ins2->dreg;
3570 /* range checking */
3571 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3572 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3574 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3575 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3576 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3577 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3578 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3579 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3580 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3582 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3583 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3584 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3585 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3586 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3587 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3588 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3590 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3591 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3592 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3593 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3594 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3596 ins->type = STACK_MP;
3598 MONO_ADD_INS (cfg->cbb, ins);
3605 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3609 MonoMethod *addr_method;
3612 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3615 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1]);
3617 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3618 /* emit_ldelema_2 depends on OP_LMUL */
3619 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3620 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3624 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3625 addr_method = mono_marshal_get_array_address (rank, element_size);
3626 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
3632 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
3634 MonoInst *ins = NULL;
3636 static MonoClass *runtime_helpers_class = NULL;
3637 if (! runtime_helpers_class)
3638 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
3639 "System.Runtime.CompilerServices", "RuntimeHelpers");
3641 if (cmethod->klass == mono_defaults.string_class) {
3642 if (strcmp (cmethod->name, "get_Chars") == 0) {
3643 int dreg = alloc_ireg (cfg);
3644 int index_reg = alloc_preg (cfg);
3645 int mult_reg = alloc_preg (cfg);
3646 int add_reg = alloc_preg (cfg);
3648 #if SIZEOF_REGISTER == 8
3649 /* The array reg is 64 bits but the index reg is only 32 */
3650 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
3652 index_reg = args [1]->dreg;
3654 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
3656 #if defined(__i386__) || defined(__x86_64__)
3657 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
3658 add_reg = ins->dreg;
3659 /* Avoid a warning */
3661 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3664 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
3665 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3666 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3667 add_reg, G_STRUCT_OFFSET (MonoString, chars));
3669 type_from_op (ins, NULL, NULL);
3671 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3672 int dreg = alloc_ireg (cfg);
3673 /* Decompose later to allow more optimizations */
3674 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
3675 ins->type = STACK_I4;
3676 cfg->cbb->has_array_access = TRUE;
3677 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
3680 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
3681 int mult_reg = alloc_preg (cfg);
3682 int add_reg = alloc_preg (cfg);
3684 /* The corlib functions check for oob already. */
3685 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
3686 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3687 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
3690 } else if (cmethod->klass == mono_defaults.object_class) {
3692 if (strcmp (cmethod->name, "GetType") == 0) {
3693 int dreg = alloc_preg (cfg);
3694 int vt_reg = alloc_preg (cfg);
3695 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3696 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
3697 type_from_op (ins, NULL, NULL);
3700 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
3701 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0) {
3702 int dreg = alloc_ireg (cfg);
3703 int t1 = alloc_ireg (cfg);
3705 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
3706 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
3707 ins->type = STACK_I4;
3711 } else if (strcmp (cmethod->name, ".ctor") == 0) {
3712 MONO_INST_NEW (cfg, ins, OP_NOP);
3713 MONO_ADD_INS (cfg->cbb, ins);
3717 } else if (cmethod->klass == mono_defaults.array_class) {
3718 if (cmethod->name [0] != 'g')
3721 if (strcmp (cmethod->name, "get_Rank") == 0) {
3722 int dreg = alloc_ireg (cfg);
3723 int vtable_reg = alloc_preg (cfg);
3724 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, vtable_reg,
3725 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3726 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
3727 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3728 type_from_op (ins, NULL, NULL);
3731 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3732 int dreg = alloc_ireg (cfg);
3734 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
3735 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
3736 type_from_op (ins, NULL, NULL);
3741 } else if (cmethod->klass == runtime_helpers_class) {
3743 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
3744 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
3748 } else if (cmethod->klass == mono_defaults.thread_class) {
3749 if (strcmp (cmethod->name, "get_CurrentThread") == 0 && (ins = mono_arch_get_thread_intrinsic (cfg))) {
3750 ins->dreg = alloc_preg (cfg);
3751 ins->type = STACK_OBJ;
3752 MONO_ADD_INS (cfg->cbb, ins);
3754 } else if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
3755 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
3756 MONO_ADD_INS (cfg->cbb, ins);
3758 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
3759 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
3760 MONO_ADD_INS (cfg->cbb, ins);
3763 } else if (cmethod->klass == mono_defaults.monitor_class) {
3764 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
3765 if (strcmp (cmethod->name, "Enter") == 0) {
3768 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
3769 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
3770 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
3771 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
3773 return (MonoInst*)call;
3774 } else if (strcmp (cmethod->name, "Exit") == 0) {
3777 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
3778 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
3779 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
3780 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
3782 return (MonoInst*)call;
3784 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
3785 MonoMethod *fast_method = NULL;
3787 /* Avoid infinite recursion */
3788 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
3789 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
3790 strcmp (cfg->method->name, "FastMonitorExit") == 0))
3793 if (strcmp (cmethod->name, "Enter") == 0 ||
3794 strcmp (cmethod->name, "Exit") == 0)
3795 fast_method = mono_monitor_get_fast_path (cmethod);
3799 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
3801 } else if (mini_class_is_system_array (cmethod->klass) &&
3802 strcmp (cmethod->name, "GetGenericValueImpl") == 0) {
3803 MonoInst *addr, *store, *load;
3804 MonoClass *eklass = mono_class_from_mono_type (fsig->params [1]);
3806 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1]);
3807 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
3808 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
3810 } else if (cmethod->klass->image == mono_defaults.corlib &&
3811 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
3812 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
3815 #if SIZEOF_REGISTER == 8
3816 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
3817 /* 64 bit reads are already atomic */
3818 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
3819 ins->dreg = mono_alloc_preg (cfg);
3820 ins->inst_basereg = args [0]->dreg;
3821 ins->inst_offset = 0;
3822 MONO_ADD_INS (cfg->cbb, ins);
3826 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
3827 if (strcmp (cmethod->name, "Increment") == 0) {
3828 MonoInst *ins_iconst;
3831 if (fsig->params [0]->type == MONO_TYPE_I4)
3832 opcode = OP_ATOMIC_ADD_NEW_I4;
3833 #if SIZEOF_REGISTER == 8
3834 else if (fsig->params [0]->type == MONO_TYPE_I8)
3835 opcode = OP_ATOMIC_ADD_NEW_I8;
3838 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3839 ins_iconst->inst_c0 = 1;
3840 ins_iconst->dreg = mono_alloc_ireg (cfg);
3841 MONO_ADD_INS (cfg->cbb, ins_iconst);
3843 MONO_INST_NEW (cfg, ins, opcode);
3844 ins->dreg = mono_alloc_ireg (cfg);
3845 ins->inst_basereg = args [0]->dreg;
3846 ins->inst_offset = 0;
3847 ins->sreg2 = ins_iconst->dreg;
3848 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3849 MONO_ADD_INS (cfg->cbb, ins);
3851 } else if (strcmp (cmethod->name, "Decrement") == 0) {
3852 MonoInst *ins_iconst;
3855 if (fsig->params [0]->type == MONO_TYPE_I4)
3856 opcode = OP_ATOMIC_ADD_NEW_I4;
3857 #if SIZEOF_REGISTER == 8
3858 else if (fsig->params [0]->type == MONO_TYPE_I8)
3859 opcode = OP_ATOMIC_ADD_NEW_I8;
3862 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3863 ins_iconst->inst_c0 = -1;
3864 ins_iconst->dreg = mono_alloc_ireg (cfg);
3865 MONO_ADD_INS (cfg->cbb, ins_iconst);
3867 MONO_INST_NEW (cfg, ins, opcode);
3868 ins->dreg = mono_alloc_ireg (cfg);
3869 ins->inst_basereg = args [0]->dreg;
3870 ins->inst_offset = 0;
3871 ins->sreg2 = ins_iconst->dreg;
3872 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3873 MONO_ADD_INS (cfg->cbb, ins);
3875 } else if (strcmp (cmethod->name, "Add") == 0) {
3878 if (fsig->params [0]->type == MONO_TYPE_I4)
3879 opcode = OP_ATOMIC_ADD_NEW_I4;
3880 #if SIZEOF_REGISTER == 8
3881 else if (fsig->params [0]->type == MONO_TYPE_I8)
3882 opcode = OP_ATOMIC_ADD_NEW_I8;
3886 MONO_INST_NEW (cfg, ins, opcode);
3887 ins->dreg = mono_alloc_ireg (cfg);
3888 ins->inst_basereg = args [0]->dreg;
3889 ins->inst_offset = 0;
3890 ins->sreg2 = args [1]->dreg;
3891 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3892 MONO_ADD_INS (cfg->cbb, ins);
3895 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
3897 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
3898 if (strcmp (cmethod->name, "Exchange") == 0) {
3901 if (fsig->params [0]->type == MONO_TYPE_I4)
3902 opcode = OP_ATOMIC_EXCHANGE_I4;
3903 #if SIZEOF_REGISTER == 8
3904 else if ((fsig->params [0]->type == MONO_TYPE_I8) ||
3905 (fsig->params [0]->type == MONO_TYPE_I) ||
3906 (fsig->params [0]->type == MONO_TYPE_OBJECT))
3907 opcode = OP_ATOMIC_EXCHANGE_I8;
3909 else if ((fsig->params [0]->type == MONO_TYPE_I) ||
3910 (fsig->params [0]->type == MONO_TYPE_OBJECT))
3911 opcode = OP_ATOMIC_EXCHANGE_I4;
3916 MONO_INST_NEW (cfg, ins, opcode);
3917 ins->dreg = mono_alloc_ireg (cfg);
3918 ins->inst_basereg = args [0]->dreg;
3919 ins->inst_offset = 0;
3920 ins->sreg2 = args [1]->dreg;
3921 MONO_ADD_INS (cfg->cbb, ins);
3923 switch (fsig->params [0]->type) {
3925 ins->type = STACK_I4;
3929 ins->type = STACK_I8;
3931 case MONO_TYPE_OBJECT:
3932 ins->type = STACK_OBJ;
3935 g_assert_not_reached ();
3938 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
3940 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
3941 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
3943 if (fsig->params [1]->type == MONO_TYPE_I4)
3945 else if (fsig->params [1]->type == MONO_TYPE_I || MONO_TYPE_IS_REFERENCE (fsig->params [1]))
3946 size = sizeof (gpointer);
3947 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I4)
3950 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
3951 ins->dreg = alloc_ireg (cfg);
3952 ins->sreg1 = args [0]->dreg;
3953 ins->sreg2 = args [1]->dreg;
3954 ins->sreg3 = args [2]->dreg;
3955 ins->type = STACK_I4;
3956 MONO_ADD_INS (cfg->cbb, ins);
3957 } else if (size == 8) {
3958 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
3959 ins->dreg = alloc_ireg (cfg);
3960 ins->sreg1 = args [0]->dreg;
3961 ins->sreg2 = args [1]->dreg;
3962 ins->sreg3 = args [2]->dreg;
3963 ins->type = STACK_I8;
3964 MONO_ADD_INS (cfg->cbb, ins);
3966 /* g_assert_not_reached (); */
3969 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
3973 } else if (cmethod->klass->image == mono_defaults.corlib) {
3974 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
3975 && strcmp (cmethod->klass->name, "Debugger") == 0) {
3976 MONO_INST_NEW (cfg, ins, OP_BREAK);
3977 MONO_ADD_INS (cfg->cbb, ins);
3980 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
3981 && strcmp (cmethod->klass->name, "Environment") == 0) {
3982 #ifdef PLATFORM_WIN32
3983 EMIT_NEW_ICONST (cfg, ins, 1);
3985 EMIT_NEW_ICONST (cfg, ins, 0);
3989 } else if (cmethod->klass == mono_defaults.math_class) {
3991 * There is general branches code for Min/Max, but it does not work for
3993 * http://everything2.com/?node_id=1051618
3997 #ifdef MONO_ARCH_SIMD_INTRINSICS
3998 if (cfg->opt & MONO_OPT_SIMD) {
3999 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4005 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4009 * This entry point could be used later for arbitrary method
4012 inline static MonoInst*
4013 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4014 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4016 if (method->klass == mono_defaults.string_class) {
4017 /* managed string allocation support */
4018 if (strcmp (method->name, "InternalAllocateStr") == 0) {
4019 MonoInst *iargs [2];
4020 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4021 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4024 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4025 iargs [1] = args [0];
4026 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4033 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4035 MonoInst *store, *temp;
4038 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4039 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4042 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4043 * would be different than the MonoInst's used to represent arguments, and
4044 * the ldelema implementation can't deal with that.
4045 * Solution: When ldelema is used on an inline argument, create a var for
4046 * it, emit ldelema on that var, and emit the saving code below in
4047 * inline_method () if needed.
4049 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4050 cfg->args [i] = temp;
4051 /* This uses cfg->args [i] which is set by the preceeding line */
4052 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4053 store->cil_code = sp [0]->cil_code;
4058 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4059 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4061 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4063 check_inline_called_method_name_limit (MonoMethod *called_method)
4066 static char *limit = NULL;
4068 if (limit == NULL) {
4069 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4071 if (limit_string != NULL)
4072 limit = limit_string;
4074 limit = (char *) "";
4077 if (limit [0] != '\0') {
4078 char *called_method_name = mono_method_full_name (called_method, TRUE);
4080 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4081 g_free (called_method_name);
4083 //return (strncmp_result <= 0);
4084 return (strncmp_result == 0);
4091 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4093 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4096 static char *limit = NULL;
4098 if (limit == NULL) {
4099 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4100 if (limit_string != NULL) {
4101 limit = limit_string;
4103 limit = (char *) "";
4107 if (limit [0] != '\0') {
4108 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4110 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4111 g_free (caller_method_name);
4113 //return (strncmp_result <= 0);
4114 return (strncmp_result == 0);
4122 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4123 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4125 MonoInst *ins, *rvar = NULL;
4126 MonoMethodHeader *cheader;
4127 MonoBasicBlock *ebblock, *sbblock;
4129 MonoMethod *prev_inlined_method;
4130 MonoInst **prev_locals, **prev_args;
4131 MonoType **prev_arg_types;
4132 guint prev_real_offset;
4133 GHashTable *prev_cbb_hash;
4134 MonoBasicBlock **prev_cil_offset_to_bb;
4135 MonoBasicBlock *prev_cbb;
4136 unsigned char* prev_cil_start;
4137 guint32 prev_cil_offset_to_bb_len;
4138 MonoMethod *prev_current_method;
4139 MonoGenericContext *prev_generic_context;
4140 gboolean ret_var_set, prev_ret_var_set;
4142 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4144 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4145 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4148 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4149 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4153 if (cfg->verbose_level > 2)
4154 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4156 if (!cmethod->inline_info) {
4157 mono_jit_stats.inlineable_methods++;
4158 cmethod->inline_info = 1;
4160 /* allocate space to store the return value */
4161 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4162 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4165 /* allocate local variables */
4166 cheader = mono_method_get_header (cmethod);
4167 prev_locals = cfg->locals;
4168 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4169 for (i = 0; i < cheader->num_locals; ++i)
4170 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4172 /* allocate start and end blocks */
4173 /* This is needed so if the inline is aborted, we can clean up */
4174 NEW_BBLOCK (cfg, sbblock);
4175 sbblock->real_offset = real_offset;
4177 NEW_BBLOCK (cfg, ebblock);
4178 ebblock->block_num = cfg->num_bblocks++;
4179 ebblock->real_offset = real_offset;
4181 prev_args = cfg->args;
4182 prev_arg_types = cfg->arg_types;
4183 prev_inlined_method = cfg->inlined_method;
4184 cfg->inlined_method = cmethod;
4185 cfg->ret_var_set = FALSE;
4186 prev_real_offset = cfg->real_offset;
4187 prev_cbb_hash = cfg->cbb_hash;
4188 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4189 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4190 prev_cil_start = cfg->cil_start;
4191 prev_cbb = cfg->cbb;
4192 prev_current_method = cfg->current_method;
4193 prev_generic_context = cfg->generic_context;
4194 prev_ret_var_set = cfg->ret_var_set;
4196 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4198 ret_var_set = cfg->ret_var_set;
4200 cfg->inlined_method = prev_inlined_method;
4201 cfg->real_offset = prev_real_offset;
4202 cfg->cbb_hash = prev_cbb_hash;
4203 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4204 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4205 cfg->cil_start = prev_cil_start;
4206 cfg->locals = prev_locals;
4207 cfg->args = prev_args;
4208 cfg->arg_types = prev_arg_types;
4209 cfg->current_method = prev_current_method;
4210 cfg->generic_context = prev_generic_context;
4211 cfg->ret_var_set = prev_ret_var_set;
4213 if ((costs >= 0 && costs < 60) || inline_allways) {
4214 if (cfg->verbose_level > 2)
4215 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4217 mono_jit_stats.inlined_methods++;
4219 /* always add some code to avoid block split failures */
4220 MONO_INST_NEW (cfg, ins, OP_NOP);
4221 MONO_ADD_INS (prev_cbb, ins);
4223 prev_cbb->next_bb = sbblock;
4224 link_bblock (cfg, prev_cbb, sbblock);
4227 * Get rid of the begin and end bblocks if possible to aid local
4230 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4232 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4233 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4235 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4236 MonoBasicBlock *prev = ebblock->in_bb [0];
4237 mono_merge_basic_blocks (cfg, prev, ebblock);
4239 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4240 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4241 cfg->cbb = prev_cbb;
4249 * If the inlined method contains only a throw, then the ret var is not
4250 * set, so set it to a dummy value.
4253 static double r8_0 = 0.0;
4255 switch (rvar->type) {
4257 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4260 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4265 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4268 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4269 ins->type = STACK_R8;
4270 ins->inst_p0 = (void*)&r8_0;
4271 ins->dreg = rvar->dreg;
4272 MONO_ADD_INS (cfg->cbb, ins);
4275 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4278 g_assert_not_reached ();
4282 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4287 if (cfg->verbose_level > 2)
4288 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4289 cfg->exception_type = MONO_EXCEPTION_NONE;
4290 mono_loader_clear_error ();
4292 /* This gets rid of the newly added bblocks */
4293 cfg->cbb = prev_cbb;
4299 * Some of these comments may well be out-of-date.
4300 * Design decisions: we do a single pass over the IL code (and we do bblock
4301 * splitting/merging in the few cases when it's required: a back jump to an IL
4302 * address that was not already seen as bblock starting point).
4303 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4304 * Complex operations are decomposed in simpler ones right away. We need to let the
4305 * arch-specific code peek and poke inside this process somehow (except when the
4306 * optimizations can take advantage of the full semantic info of coarse opcodes).
4307 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4308 * MonoInst->opcode initially is the IL opcode or some simplification of that
4309 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4310 * opcode with value bigger than OP_LAST.
4311 * At this point the IR can be handed over to an interpreter, a dumb code generator
4312 * or to the optimizing code generator that will translate it to SSA form.
4314 * Profiling directed optimizations.
4315 * We may compile by default with few or no optimizations and instrument the code
4316 * or the user may indicate what methods to optimize the most either in a config file
4317 * or through repeated runs where the compiler applies offline the optimizations to
4318 * each method and then decides if it was worth it.
4321 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4322 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4323 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4324 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4325 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4326 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4327 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4328 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4330 /* offset from br.s -> br like opcodes */
4331 #define BIG_BRANCH_OFFSET 13
4334 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4336 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4338 return b == NULL || b == bb;
4342 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4344 unsigned char *ip = start;
4345 unsigned char *target;
4348 MonoBasicBlock *bblock;
4349 const MonoOpcode *opcode;
4352 cli_addr = ip - start;
4353 i = mono_opcode_value ((const guint8 **)&ip, end);
4356 opcode = &mono_opcodes [i];
4357 switch (opcode->argument) {
4358 case MonoInlineNone:
4361 case MonoInlineString:
4362 case MonoInlineType:
4363 case MonoInlineField:
4364 case MonoInlineMethod:
4367 case MonoShortInlineR:
4374 case MonoShortInlineVar:
4375 case MonoShortInlineI:
4378 case MonoShortInlineBrTarget:
4379 target = start + cli_addr + 2 + (signed char)ip [1];
4380 GET_BBLOCK (cfg, bblock, target);
4383 GET_BBLOCK (cfg, bblock, ip);
4385 case MonoInlineBrTarget:
4386 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4387 GET_BBLOCK (cfg, bblock, target);
4390 GET_BBLOCK (cfg, bblock, ip);
4392 case MonoInlineSwitch: {
4393 guint32 n = read32 (ip + 1);
4396 cli_addr += 5 + 4 * n;
4397 target = start + cli_addr;
4398 GET_BBLOCK (cfg, bblock, target);
4400 for (j = 0; j < n; ++j) {
4401 target = start + cli_addr + (gint32)read32 (ip);
4402 GET_BBLOCK (cfg, bblock, target);
4412 g_assert_not_reached ();
4415 if (i == CEE_THROW) {
4416 unsigned char *bb_start = ip - 1;
4418 /* Find the start of the bblock containing the throw */
4420 while ((bb_start >= start) && !bblock) {
4421 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4425 bblock->out_of_line = 1;
4434 static inline MonoMethod *
4435 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4439 if (m->wrapper_type != MONO_WRAPPER_NONE)
4440 return mono_method_get_wrapper_data (m, token);
4442 method = mono_get_method_full (m->klass->image, token, klass, context);
4447 static inline MonoMethod *
4448 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4450 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4452 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4458 static inline MonoClass*
4459 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4463 if (method->wrapper_type != MONO_WRAPPER_NONE)
4464 klass = mono_method_get_wrapper_data (method, token);
4466 klass = mono_class_get_full (method->klass->image, token, context);
4468 mono_class_init (klass);
4473 * Returns TRUE if the JIT should abort inlining because "callee"
4474 * is influenced by security attributes.
4477 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4481 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
4485 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
4486 if (result == MONO_JIT_SECURITY_OK)
4489 if (result == MONO_JIT_LINKDEMAND_ECMA) {
4490 /* Generate code to throw a SecurityException before the actual call/link */
4491 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4494 NEW_ICONST (cfg, args [0], 4);
4495 NEW_METHODCONST (cfg, args [1], caller);
4496 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
4497 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
4498 /* don't hide previous results */
4499 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
4500 cfg->exception_data = result;
4508 method_access_exception (void)
4510 static MonoMethod *method = NULL;
4513 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4514 method = mono_class_get_method_from_name (secman->securitymanager,
4515 "MethodAccessException", 2);
4522 emit_throw_method_access_exception (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4523 MonoBasicBlock *bblock, unsigned char *ip)
4525 MonoMethod *thrower = method_access_exception ();
4528 EMIT_NEW_METHODCONST (cfg, args [0], caller);
4529 EMIT_NEW_METHODCONST (cfg, args [1], callee);
4530 mono_emit_method_call (cfg, thrower, args, NULL);
4534 field_access_exception (void)
4536 static MonoMethod *method = NULL;
4539 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4540 method = mono_class_get_method_from_name (secman->securitymanager,
4541 "FieldAccessException", 2);
4548 emit_throw_field_access_exception (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
4549 MonoBasicBlock *bblock, unsigned char *ip)
4551 MonoMethod *thrower = field_access_exception ();
4554 EMIT_NEW_METHODCONST (cfg, args [0], caller);
4555 EMIT_NEW_METHODCONST (cfg, args [1], field);
4556 mono_emit_method_call (cfg, thrower, args, NULL);
4560 * Return the original method is a wrapper is specified. We can only access
4561 * the custom attributes from the original method.
4564 get_original_method (MonoMethod *method)
4566 if (method->wrapper_type == MONO_WRAPPER_NONE)
4569 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
4570 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
4573 /* in other cases we need to find the original method */
4574 return mono_marshal_method_from_wrapper (method);
4578 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
4579 MonoBasicBlock *bblock, unsigned char *ip)
4581 /* there's no restriction to access Transparent or SafeCritical fields, so we only check calls to Critical methods */
4582 if (mono_security_core_clr_class_level (mono_field_get_parent (field)) != MONO_SECURITY_CORE_CLR_CRITICAL)
4585 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4586 caller = get_original_method (caller);
4590 /* caller is Critical! only SafeCritical and Critical callers can access the field, so we throw if caller is Transparent */
4591 if (mono_security_core_clr_method_level (caller, TRUE) == MONO_SECURITY_CORE_CLR_TRANSPARENT)
4592 emit_throw_field_access_exception (cfg, caller, field, bblock, ip);
4596 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4597 MonoBasicBlock *bblock, unsigned char *ip)
4599 /* there's no restriction to call Transparent or SafeCritical code, so we only check calls to Critical methods */
4600 if (mono_security_core_clr_method_level (callee, TRUE) != MONO_SECURITY_CORE_CLR_CRITICAL)
4603 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4604 caller = get_original_method (caller);
4608 /* caller is Critical! only SafeCritical and Critical callers can call it, so we throw if the caller is Transparent */
4609 if (mono_security_core_clr_method_level (caller, TRUE) == MONO_SECURITY_CORE_CLR_TRANSPARENT)
4610 emit_throw_method_access_exception (cfg, caller, callee, bblock, ip);
4614 * Check that the IL instructions at ip are the array initialization
4615 * sequence and return the pointer to the data and the size.
4618 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
4621 * newarr[System.Int32]
4623 * ldtoken field valuetype ...
4624 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
4626 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
4627 guint32 token = read32 (ip + 7);
4628 guint32 field_token = read32 (ip + 2);
4629 guint32 field_index = field_token & 0xffffff;
4631 const char *data_ptr;
4633 MonoMethod *cmethod;
4634 MonoClass *dummy_class;
4635 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
4641 *out_field_token = field_token;
4643 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
4646 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
4648 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
4649 case MONO_TYPE_BOOLEAN:
4653 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
4654 #if G_BYTE_ORDER == G_LITTLE_ENDIAN
4655 case MONO_TYPE_CHAR:
4665 return NULL; /* stupid ARM FP swapped format */
4675 if (size > mono_type_size (field->type, &dummy_align))
4678 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
4679 if (!method->klass->image->dynamic) {
4680 field_index = read32 (ip + 2) & 0xffffff;
4681 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
4682 data_ptr = mono_image_rva_map (method->klass->image, rva);
4683 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
4684 /* for aot code we do the lookup on load */
4685 if (aot && data_ptr)
4686 return GUINT_TO_POINTER (rva);
4688 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
4690 data_ptr = mono_field_get_data (field);
4698 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
4700 char *method_fname = mono_method_full_name (method, TRUE);
4703 if (mono_method_get_header (method)->code_size == 0)
4704 method_code = g_strdup ("method body is empty.");
4706 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
4707 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
4708 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
4709 g_free (method_fname);
4710 g_free (method_code);
4714 set_exception_object (MonoCompile *cfg, MonoException *exception)
4716 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
4717 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
4718 cfg->exception_ptr = exception;
4722 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4726 if (cfg->generic_sharing_context)
4727 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
4729 type = &klass->byval_arg;
4730 return MONO_TYPE_IS_REFERENCE (type);
4734 * mono_decompose_array_access_opts:
4736 * Decompose array access opcodes.
4737 * This should be in decompose.c, but it emits calls so it has to stay here until
4738 * the old JIT is gone.
4741 mono_decompose_array_access_opts (MonoCompile *cfg)
4743 MonoBasicBlock *bb, *first_bb;
4746 * Unlike decompose_long_opts, this pass does not alter the CFG of the method so it
4747 * can be executed anytime. It should be run before decompose_long
4751 * Create a dummy bblock and emit code into it so we can use the normal
4752 * code generation macros.
4754 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4755 first_bb = cfg->cbb;
4757 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4759 MonoInst *prev = NULL;
4761 MonoInst *iargs [3];
4764 if (!bb->has_array_access)
4767 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE DECOMPOSE-ARRAY-ACCESS-OPTS ");
4769 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4775 for (ins = bb->code; ins; ins = ins->next) {
4776 switch (ins->opcode) {
4778 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg, ins->sreg1,
4779 G_STRUCT_OFFSET (MonoArray, max_length));
4780 MONO_ADD_INS (cfg->cbb, dest);
4782 case OP_BOUNDS_CHECK:
4783 MONO_ARCH_EMIT_BOUNDS_CHECK (cfg, ins->sreg1, ins->inst_imm, ins->sreg2);
4786 if (cfg->opt & MONO_OPT_SHARED) {
4787 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4788 EMIT_NEW_CLASSCONST (cfg, iargs [1], ins->inst_newa_class);
4789 MONO_INST_NEW (cfg, iargs [2], OP_MOVE);
4790 iargs [2]->dreg = ins->sreg1;
4792 dest = mono_emit_jit_icall (cfg, mono_array_new, iargs);
4793 dest->dreg = ins->dreg;
4795 MonoVTable *vtable = mono_class_vtable (cfg->domain, mono_array_class_get (ins->inst_newa_class, 1));
4798 NEW_VTABLECONST (cfg, iargs [0], vtable);
4799 MONO_ADD_INS (cfg->cbb, iargs [0]);
4800 MONO_INST_NEW (cfg, iargs [1], OP_MOVE);
4801 iargs [1]->dreg = ins->sreg1;
4803 dest = mono_emit_jit_icall (cfg, mono_array_new_specific, iargs);
4804 dest->dreg = ins->dreg;
4808 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg,
4809 ins->sreg1, G_STRUCT_OFFSET (MonoString, length));
4810 MONO_ADD_INS (cfg->cbb, dest);
4816 g_assert (cfg->cbb == first_bb);
4818 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
4819 /* Replace the original instruction with the new code sequence */
4821 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
4822 first_bb->code = first_bb->last_ins = NULL;
4823 first_bb->in_count = first_bb->out_count = 0;
4824 cfg->cbb = first_bb;
4831 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER DECOMPOSE-ARRAY-ACCESS-OPTS ");
4841 #ifdef MONO_ARCH_SOFT_FLOAT
4844 * mono_decompose_soft_float:
4846 * Soft float support on ARM. We store each double value in a pair of integer vregs,
4847 * similar to long support on 32 bit platforms. 32 bit float values require special
4848 * handling when used as locals, arguments, and in calls.
4849 * One big problem with soft-float is that there are few r4 test cases in our test suite.
4852 mono_decompose_soft_float (MonoCompile *cfg)
4854 MonoBasicBlock *bb, *first_bb;
4857 * This pass creates long opcodes, so it should be run before decompose_long_opts ().
4861 * Create a dummy bblock and emit code into it so we can use the normal
4862 * code generation macros.
4864 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4865 first_bb = cfg->cbb;
4867 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4869 MonoInst *prev = NULL;
4872 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE HANDLE-SOFT-FLOAT ");
4874 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4880 for (ins = bb->code; ins; ins = ins->next) {
4881 const char *spec = INS_INFO (ins->opcode);
4883 /* Most fp operations are handled automatically by opcode emulation */
4885 switch (ins->opcode) {
4888 d.vald = *(double*)ins->inst_p0;
4889 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
4894 /* We load the r8 value */
4895 d.vald = *(float*)ins->inst_p0;
4896 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
4900 ins->opcode = OP_LMOVE;
4903 ins->opcode = OP_MOVE;
4904 ins->sreg1 = ins->sreg1 + 1;
4907 ins->opcode = OP_MOVE;
4908 ins->sreg1 = ins->sreg1 + 2;
4911 int reg = ins->sreg1;
4913 ins->opcode = OP_SETLRET;
4915 ins->sreg1 = reg + 1;
4916 ins->sreg2 = reg + 2;
4919 case OP_LOADR8_MEMBASE:
4920 ins->opcode = OP_LOADI8_MEMBASE;
4922 case OP_STORER8_MEMBASE_REG:
4923 ins->opcode = OP_STOREI8_MEMBASE_REG;
4925 case OP_STORER4_MEMBASE_REG: {
4926 MonoInst *iargs [2];
4929 /* Arg 1 is the double value */
4930 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4931 iargs [0]->dreg = ins->sreg1;
4933 /* Arg 2 is the address to store to */
4934 addr_reg = mono_alloc_preg (cfg);
4935 EMIT_NEW_BIALU_IMM (cfg, iargs [1], OP_PADD_IMM, addr_reg, ins->inst_destbasereg, ins->inst_offset);
4936 mono_emit_jit_icall (cfg, mono_fstore_r4, iargs);
4940 case OP_LOADR4_MEMBASE: {
4941 MonoInst *iargs [1];
4945 addr_reg = mono_alloc_preg (cfg);
4946 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, addr_reg, ins->inst_basereg, ins->inst_offset);
4947 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
4948 conv->dreg = ins->dreg;
4953 case OP_FCALL_MEMBASE: {
4954 MonoCallInst *call = (MonoCallInst*)ins;
4955 if (call->signature->ret->type == MONO_TYPE_R4) {
4956 MonoCallInst *call2;
4957 MonoInst *iargs [1];
4960 /* Convert the call into a call returning an int */
4961 MONO_INST_NEW_CALL (cfg, call2, OP_CALL);
4962 memcpy (call2, call, sizeof (MonoCallInst));
4963 switch (ins->opcode) {
4965 call2->inst.opcode = OP_CALL;
4968 call2->inst.opcode = OP_CALL_REG;
4970 case OP_FCALL_MEMBASE:
4971 call2->inst.opcode = OP_CALL_MEMBASE;
4974 g_assert_not_reached ();
4976 call2->inst.dreg = mono_alloc_ireg (cfg);
4977 MONO_ADD_INS (cfg->cbb, (MonoInst*)call2);
4979 /* FIXME: Optimize this */
4981 /* Emit an r4->r8 conversion */
4982 EMIT_NEW_VARLOADA_VREG (cfg, iargs [0], call2->inst.dreg, &mono_defaults.int32_class->byval_arg);
4983 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
4984 conv->dreg = ins->dreg;
4986 switch (ins->opcode) {
4988 ins->opcode = OP_LCALL;
4991 ins->opcode = OP_LCALL_REG;
4993 case OP_FCALL_MEMBASE:
4994 ins->opcode = OP_LCALL_MEMBASE;
4997 g_assert_not_reached ();
5003 MonoJitICallInfo *info;
5004 MonoInst *iargs [2];
5005 MonoInst *call, *cmp, *br;
5007 /* Convert fcompare+fbcc to icall+icompare+beq */
5009 info = mono_find_jit_opcode_emulation (ins->next->opcode);
5012 /* Create dummy MonoInst's for the arguments */
5013 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
5014 iargs [0]->dreg = ins->sreg1;
5015 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
5016 iargs [1]->dreg = ins->sreg2;
5018 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
5020 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE_IMM);
5021 cmp->sreg1 = call->dreg;
5023 MONO_ADD_INS (cfg->cbb, cmp);
5025 MONO_INST_NEW (cfg, br, OP_IBNE_UN);
5026 br->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * 2);
5027 br->inst_true_bb = ins->next->inst_true_bb;
5028 br->inst_false_bb = ins->next->inst_false_bb;
5029 MONO_ADD_INS (cfg->cbb, br);
5031 /* The call sequence might include fp ins */
5034 /* Skip fbcc or fccc */
5035 NULLIFY_INS (ins->next);
5043 MonoJitICallInfo *info;
5044 MonoInst *iargs [2];
5047 /* Convert fccc to icall+icompare+iceq */
5049 info = mono_find_jit_opcode_emulation (ins->opcode);
5052 /* Create dummy MonoInst's for the arguments */
5053 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
5054 iargs [0]->dreg = ins->sreg1;
5055 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
5056 iargs [1]->dreg = ins->sreg2;
5058 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
5060 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, call->dreg, 1);
5061 MONO_EMIT_NEW_UNALU (cfg, OP_ICEQ, ins->dreg, -1);
5063 /* The call sequence might include fp ins */
5068 MonoInst *iargs [2];
5069 MonoInst *call, *cmp;
5071 /* Convert to icall+icompare+cond_exc+move */
5073 /* Create dummy MonoInst's for the arguments */
5074 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
5075 iargs [0]->dreg = ins->sreg1;
5077 call = mono_emit_jit_icall (cfg, mono_isfinite, iargs);
5079 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE_IMM);
5080 cmp->sreg1 = call->dreg;
5082 MONO_ADD_INS (cfg->cbb, cmp);
5084 MONO_EMIT_NEW_COND_EXC (cfg, INE_UN, "ArithmeticException");
5086 /* Do the assignment if the value is finite */
5087 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, ins->dreg, ins->sreg1);
5093 if (spec [MONO_INST_SRC1] == 'f' || spec [MONO_INST_SRC2] == 'f' || spec [MONO_INST_DEST] == 'f') {
5094 mono_print_ins (ins);
5095 g_assert_not_reached ();
5100 g_assert (cfg->cbb == first_bb);
5102 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
5103 /* Replace the original instruction with the new code sequence */
5105 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
5106 first_bb->code = first_bb->last_ins = NULL;
5107 first_bb->in_count = first_bb->out_count = 0;
5108 cfg->cbb = first_bb;
5115 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER HANDLE-SOFT-FLOAT ");
5118 mono_decompose_long_opts (cfg);
5124 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5127 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5128 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5129 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5130 /* Optimize reg-reg moves away */
5132 * Can't optimize other opcodes, since sp[0] might point to
5133 * the last ins of a decomposed opcode.
5135 sp [0]->dreg = (cfg)->locals [n]->dreg;
5137 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5142 * ldloca inhibits many optimizations so try to get rid of it in common
5145 static inline unsigned char *
5146 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5155 local = read16 (ip + 2);
5159 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5160 gboolean skip = FALSE;
5162 /* From the INITOBJ case */
5163 token = read32 (ip + 2);
5164 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5165 CHECK_TYPELOAD (klass);
5166 if (generic_class_is_reference_type (cfg, klass)) {
5167 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5168 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5169 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5170 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5171 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5184 is_exception_class (MonoClass *class)
5187 if (class == mono_defaults.exception_class)
5189 class = class->parent;
5195 * mono_method_to_ir:
5197 * Translate the .net IL into linear IR.
5200 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5201 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5202 guint inline_offset, gboolean is_virtual_call)
5204 MonoInst *ins, **sp, **stack_start;
5205 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5206 MonoMethod *cmethod, *method_definition;
5207 MonoInst **arg_array;
5208 MonoMethodHeader *header;
5210 guint32 token, ins_flag;
5212 MonoClass *constrained_call = NULL;
5213 unsigned char *ip, *end, *target, *err_pos;
5214 static double r8_0 = 0.0;
5215 MonoMethodSignature *sig;
5216 MonoGenericContext *generic_context = NULL;
5217 MonoGenericContainer *generic_container = NULL;
5218 MonoType **param_types;
5219 int i, n, start_new_bblock, dreg;
5220 int num_calls = 0, inline_costs = 0;
5221 int breakpoint_id = 0;
5223 MonoBoolean security, pinvoke;
5224 MonoSecurityManager* secman = NULL;
5225 MonoDeclSecurityActions actions;
5226 GSList *class_inits = NULL;
5227 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5230 /* serialization and xdomain stuff may need access to private fields and methods */
5231 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5232 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5233 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5234 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5235 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5236 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5238 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5240 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5241 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5242 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5243 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5245 image = method->klass->image;
5246 header = mono_method_get_header (method);
5247 generic_container = mono_method_get_generic_container (method);
5248 sig = mono_method_signature (method);
5249 num_args = sig->hasthis + sig->param_count;
5250 ip = (unsigned char*)header->code;
5251 cfg->cil_start = ip;
5252 end = ip + header->code_size;
5253 mono_jit_stats.cil_code_size += header->code_size;
5255 method_definition = method;
5256 while (method_definition->is_inflated) {
5257 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5258 method_definition = imethod->declaring;
5261 /* SkipVerification is not allowed if core-clr is enabled */
5262 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5264 dont_verify_stloc = TRUE;
5267 if (!dont_verify && mini_method_verify (cfg, method_definition))
5268 goto exception_exit;
5270 if (mono_debug_using_mono_debugger ())
5271 cfg->keep_cil_nops = TRUE;
5273 if (sig->is_inflated)
5274 generic_context = mono_method_get_context (method);
5275 else if (generic_container)
5276 generic_context = &generic_container->context;
5277 cfg->generic_context = generic_context;
5279 if (!cfg->generic_sharing_context)
5280 g_assert (!sig->has_type_parameters);
5282 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5283 g_assert (method->is_inflated);
5284 g_assert (mono_method_get_context (method)->method_inst);
5286 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5287 g_assert (sig->generic_param_count);
5289 if (cfg->method == method) {
5290 cfg->real_offset = 0;
5292 cfg->real_offset = inline_offset;
5295 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5296 cfg->cil_offset_to_bb_len = header->code_size;
5298 cfg->current_method = method;
5300 if (cfg->verbose_level > 2)
5301 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5303 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5305 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5306 for (n = 0; n < sig->param_count; ++n)
5307 param_types [n + sig->hasthis] = sig->params [n];
5308 cfg->arg_types = param_types;
5310 dont_inline = g_list_prepend (dont_inline, method);
5311 if (cfg->method == method) {
5313 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5314 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5317 NEW_BBLOCK (cfg, start_bblock);
5318 cfg->bb_entry = start_bblock;
5319 start_bblock->cil_code = NULL;
5320 start_bblock->cil_length = 0;
5323 NEW_BBLOCK (cfg, end_bblock);
5324 cfg->bb_exit = end_bblock;
5325 end_bblock->cil_code = NULL;
5326 end_bblock->cil_length = 0;
5327 g_assert (cfg->num_bblocks == 2);
5329 arg_array = cfg->args;
5331 if (header->num_clauses) {
5332 cfg->spvars = g_hash_table_new (NULL, NULL);
5333 cfg->exvars = g_hash_table_new (NULL, NULL);
5335 /* handle exception clauses */
5336 for (i = 0; i < header->num_clauses; ++i) {
5337 MonoBasicBlock *try_bb;
5338 MonoExceptionClause *clause = &header->clauses [i];
5339 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5340 try_bb->real_offset = clause->try_offset;
5341 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5342 tblock->real_offset = clause->handler_offset;
5343 tblock->flags |= BB_EXCEPTION_HANDLER;
5345 link_bblock (cfg, try_bb, tblock);
5347 if (*(ip + clause->handler_offset) == CEE_POP)
5348 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5350 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5351 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5352 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5353 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5354 MONO_ADD_INS (tblock, ins);
5356 /* todo: is a fault block unsafe to optimize? */
5357 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5358 tblock->flags |= BB_EXCEPTION_UNSAFE;
5362 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5364 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5366 /* catch and filter blocks get the exception object on the stack */
5367 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5368 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5369 MonoInst *dummy_use;
5371 /* mostly like handle_stack_args (), but just sets the input args */
5372 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5373 tblock->in_scount = 1;
5374 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5375 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5378 * Add a dummy use for the exvar so its liveness info will be
5382 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5384 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5385 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5386 tblock->flags |= BB_EXCEPTION_HANDLER;
5387 tblock->real_offset = clause->data.filter_offset;
5388 tblock->in_scount = 1;
5389 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5390 /* The filter block shares the exvar with the handler block */
5391 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5392 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5393 MONO_ADD_INS (tblock, ins);
5397 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5398 clause->data.catch_class &&
5399 cfg->generic_sharing_context &&
5400 mono_class_check_context_used (clause->data.catch_class)) {
5402 * In shared generic code with catch
5403 * clauses containing type variables
5404 * the exception handling code has to
5405 * be able to get to the rgctx.
5406 * Therefore we have to make sure that
5407 * the vtable/mrgctx argument (for
5408 * static or generic methods) or the
5409 * "this" argument (for non-static
5410 * methods) are live.
5412 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5413 mini_method_get_context (method)->method_inst ||
5414 method->klass->valuetype) {
5415 mono_get_vtable_var (cfg);
5417 MonoInst *dummy_use;
5419 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5424 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5425 cfg->cbb = start_bblock;
5426 cfg->args = arg_array;
5427 mono_save_args (cfg, sig, inline_args);
5430 /* FIRST CODE BLOCK */
5431 NEW_BBLOCK (cfg, bblock);
5432 bblock->cil_code = ip;
5436 ADD_BBLOCK (cfg, bblock);
5438 if (cfg->method == method) {
5439 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5440 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5441 MONO_INST_NEW (cfg, ins, OP_BREAK);
5442 MONO_ADD_INS (bblock, ins);
5446 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5447 secman = mono_security_manager_get_methods ();
5449 security = (secman && mono_method_has_declsec (method));
5450 /* at this point having security doesn't mean we have any code to generate */
5451 if (security && (cfg->method == method)) {
5452 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5453 * And we do not want to enter the next section (with allocation) if we
5454 * have nothing to generate */
5455 security = mono_declsec_get_demands (method, &actions);
5458 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5459 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5461 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5462 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5463 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5465 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5466 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5470 mono_custom_attrs_free (custom);
5473 custom = mono_custom_attrs_from_class (wrapped->klass);
5474 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5478 mono_custom_attrs_free (custom);
5481 /* not a P/Invoke after all */
5486 if ((header->init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5487 /* we use a separate basic block for the initialization code */
5488 NEW_BBLOCK (cfg, init_localsbb);
5489 cfg->bb_init = init_localsbb;
5490 init_localsbb->real_offset = cfg->real_offset;
5491 start_bblock->next_bb = init_localsbb;
5492 init_localsbb->next_bb = bblock;
5493 link_bblock (cfg, start_bblock, init_localsbb);
5494 link_bblock (cfg, init_localsbb, bblock);
5496 cfg->cbb = init_localsbb;
5498 start_bblock->next_bb = bblock;
5499 link_bblock (cfg, start_bblock, bblock);
5502 /* at this point we know, if security is TRUE, that some code needs to be generated */
5503 if (security && (cfg->method == method)) {
5506 mono_jit_stats.cas_demand_generation++;
5508 if (actions.demand.blob) {
5509 /* Add code for SecurityAction.Demand */
5510 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5511 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5512 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5513 mono_emit_method_call (cfg, secman->demand, args, NULL);
5515 if (actions.noncasdemand.blob) {
5516 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5517 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5518 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5519 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5520 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5521 mono_emit_method_call (cfg, secman->demand, args, NULL);
5523 if (actions.demandchoice.blob) {
5524 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5525 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5526 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5527 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5528 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5532 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5534 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5537 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5538 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5539 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5540 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5541 if (!(method->klass && method->klass->image &&
5542 mono_security_core_clr_is_platform_image (method->klass->image))) {
5543 emit_throw_method_access_exception (cfg, method, wrapped, bblock, ip);
5549 if (header->code_size == 0)
5552 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5557 if (cfg->method == method)
5558 mono_debug_init_method (cfg, bblock, breakpoint_id);
5560 for (n = 0; n < header->num_locals; ++n) {
5561 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5566 /* We force the vtable variable here for all shared methods
5567 for the possibility that they might show up in a stack
5568 trace where their exact instantiation is needed. */
5569 if (cfg->generic_sharing_context && method == cfg->method) {
5570 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5571 mini_method_get_context (method)->method_inst ||
5572 method->klass->valuetype) {
5573 mono_get_vtable_var (cfg);
5575 /* FIXME: Is there a better way to do this?
5576 We need the variable live for the duration
5577 of the whole method. */
5578 cfg->args [0]->flags |= MONO_INST_INDIRECT;
5582 /* add a check for this != NULL to inlined methods */
5583 if (is_virtual_call) {
5586 NEW_ARGLOAD (cfg, arg_ins, 0);
5587 MONO_ADD_INS (cfg->cbb, arg_ins);
5588 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
5589 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, arg_ins->dreg);
5590 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, arg_ins->dreg);
5593 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5594 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5597 start_new_bblock = 0;
5601 if (cfg->method == method)
5602 cfg->real_offset = ip - header->code;
5604 cfg->real_offset = inline_offset;
5609 if (start_new_bblock) {
5610 bblock->cil_length = ip - bblock->cil_code;
5611 if (start_new_bblock == 2) {
5612 g_assert (ip == tblock->cil_code);
5614 GET_BBLOCK (cfg, tblock, ip);
5616 bblock->next_bb = tblock;
5619 start_new_bblock = 0;
5620 for (i = 0; i < bblock->in_scount; ++i) {
5621 if (cfg->verbose_level > 3)
5622 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5623 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5627 g_slist_free (class_inits);
5630 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5631 link_bblock (cfg, bblock, tblock);
5632 if (sp != stack_start) {
5633 handle_stack_args (cfg, stack_start, sp - stack_start);
5635 CHECK_UNVERIFIABLE (cfg);
5637 bblock->next_bb = tblock;
5640 for (i = 0; i < bblock->in_scount; ++i) {
5641 if (cfg->verbose_level > 3)
5642 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5643 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5646 g_slist_free (class_inits);
5651 bblock->real_offset = cfg->real_offset;
5653 if ((cfg->method == method) && cfg->coverage_info) {
5654 guint32 cil_offset = ip - header->code;
5655 cfg->coverage_info->data [cil_offset].cil_code = ip;
5657 /* TODO: Use an increment here */
5658 #if defined(__i386__)
5659 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5660 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5662 MONO_ADD_INS (cfg->cbb, ins);
5664 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5665 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5669 if (cfg->verbose_level > 3)
5670 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5674 if (cfg->keep_cil_nops)
5675 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
5677 MONO_INST_NEW (cfg, ins, OP_NOP);
5679 MONO_ADD_INS (bblock, ins);
5682 MONO_INST_NEW (cfg, ins, OP_BREAK);
5684 MONO_ADD_INS (bblock, ins);
5690 CHECK_STACK_OVF (1);
5691 n = (*ip)-CEE_LDARG_0;
5693 EMIT_NEW_ARGLOAD (cfg, ins, n);
5701 CHECK_STACK_OVF (1);
5702 n = (*ip)-CEE_LDLOC_0;
5704 EMIT_NEW_LOCLOAD (cfg, ins, n);
5713 n = (*ip)-CEE_STLOC_0;
5716 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5718 emit_stloc_ir (cfg, sp, header, n);
5725 CHECK_STACK_OVF (1);
5728 EMIT_NEW_ARGLOAD (cfg, ins, n);
5734 CHECK_STACK_OVF (1);
5737 NEW_ARGLOADA (cfg, ins, n);
5738 MONO_ADD_INS (cfg->cbb, ins);
5748 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5750 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5755 CHECK_STACK_OVF (1);
5758 EMIT_NEW_LOCLOAD (cfg, ins, n);
5762 case CEE_LDLOCA_S: {
5763 unsigned char *tmp_ip;
5765 CHECK_STACK_OVF (1);
5766 CHECK_LOCAL (ip [1]);
5768 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
5774 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5783 CHECK_LOCAL (ip [1]);
5784 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5786 emit_stloc_ir (cfg, sp, header, ip [1]);
5791 CHECK_STACK_OVF (1);
5792 EMIT_NEW_PCONST (cfg, ins, NULL);
5793 ins->type = STACK_OBJ;
5798 CHECK_STACK_OVF (1);
5799 EMIT_NEW_ICONST (cfg, ins, -1);
5812 CHECK_STACK_OVF (1);
5813 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
5819 CHECK_STACK_OVF (1);
5821 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
5827 CHECK_STACK_OVF (1);
5828 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
5834 CHECK_STACK_OVF (1);
5835 MONO_INST_NEW (cfg, ins, OP_I8CONST);
5836 ins->type = STACK_I8;
5837 ins->dreg = alloc_dreg (cfg, STACK_I8);
5839 ins->inst_l = (gint64)read64 (ip);
5840 MONO_ADD_INS (bblock, ins);
5846 /* FIXME: we should really allocate this only late in the compilation process */
5847 f = mono_domain_alloc (cfg->domain, sizeof (float));
5849 CHECK_STACK_OVF (1);
5850 MONO_INST_NEW (cfg, ins, OP_R4CONST);
5851 ins->type = STACK_R8;
5852 ins->dreg = alloc_dreg (cfg, STACK_R8);
5856 MONO_ADD_INS (bblock, ins);
5864 /* FIXME: we should really allocate this only late in the compilation process */
5865 d = mono_domain_alloc (cfg->domain, sizeof (double));
5867 CHECK_STACK_OVF (1);
5868 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5869 ins->type = STACK_R8;
5870 ins->dreg = alloc_dreg (cfg, STACK_R8);
5874 MONO_ADD_INS (bblock, ins);
5881 MonoInst *temp, *store;
5883 CHECK_STACK_OVF (1);
5887 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
5888 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
5890 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5893 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5906 if (sp [0]->type == STACK_R8)
5907 /* we need to pop the value from the x86 FP stack */
5908 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
5917 if (stack_start != sp)
5919 token = read32 (ip + 1);
5920 /* FIXME: check the signature matches */
5921 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5926 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
5927 GENERIC_SHARING_FAILURE (CEE_JMP);
5929 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5930 CHECK_CFG_EXCEPTION;
5934 MonoMethodSignature *fsig = mono_method_signature (cmethod);
5937 /* Handle tail calls similarly to calls */
5938 n = fsig->param_count + fsig->hasthis;
5940 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
5941 call->method = cmethod;
5942 call->tail_call = TRUE;
5943 call->signature = mono_method_signature (cmethod);
5944 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
5945 call->inst.inst_p0 = cmethod;
5946 for (i = 0; i < n; ++i)
5947 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
5949 mono_arch_emit_call (cfg, call);
5950 MONO_ADD_INS (bblock, (MonoInst*)call);
5953 for (i = 0; i < num_args; ++i)
5954 /* Prevent arguments from being optimized away */
5955 arg_array [i]->flags |= MONO_INST_VOLATILE;
5957 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
5958 ins = (MonoInst*)call;
5959 ins->inst_p0 = cmethod;
5960 MONO_ADD_INS (bblock, ins);
5964 start_new_bblock = 1;
5969 case CEE_CALLVIRT: {
5970 MonoInst *addr = NULL;
5971 MonoMethodSignature *fsig = NULL;
5973 int virtual = *ip == CEE_CALLVIRT;
5974 int calli = *ip == CEE_CALLI;
5975 gboolean pass_imt_from_rgctx = FALSE;
5976 MonoInst *imt_arg = NULL;
5977 gboolean pass_vtable = FALSE;
5978 gboolean pass_mrgctx = FALSE;
5979 MonoInst *vtable_arg = NULL;
5980 gboolean check_this = FALSE;
5983 token = read32 (ip + 1);
5990 if (method->wrapper_type != MONO_WRAPPER_NONE)
5991 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
5993 fsig = mono_metadata_parse_signature (image, token);
5995 n = fsig->param_count + fsig->hasthis;
5997 MonoMethod *cil_method;
5999 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6000 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6001 cil_method = cmethod;
6002 } else if (constrained_call) {
6003 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6005 * This is needed since get_method_constrained can't find
6006 * the method in klass representing a type var.
6007 * The type var is guaranteed to be a reference type in this
6010 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6011 cil_method = cmethod;
6012 g_assert (!cmethod->klass->valuetype);
6014 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6017 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6018 cil_method = cmethod;
6023 if (!dont_verify && !cfg->skip_visibility) {
6024 MonoMethod *target_method = cil_method;
6025 if (method->is_inflated) {
6026 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6028 if (!mono_method_can_access_method (method_definition, target_method) &&
6029 !mono_method_can_access_method (method, cil_method))
6030 METHOD_ACCESS_FAILURE;
6033 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6034 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6036 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6037 /* MS.NET seems to silently convert this to a callvirt */
6040 if (!cmethod->klass->inited)
6041 if (!mono_class_init (cmethod->klass))
6044 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6045 mini_class_is_system_array (cmethod->klass)) {
6046 array_rank = cmethod->klass->rank;
6047 fsig = mono_method_signature (cmethod);
6049 if (mono_method_signature (cmethod)->pinvoke) {
6050 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6051 check_for_pending_exc, FALSE);
6052 fsig = mono_method_signature (wrapper);
6053 } else if (constrained_call) {
6054 fsig = mono_method_signature (cmethod);
6056 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6060 mono_save_token_info (cfg, image, token, cil_method);
6062 n = fsig->param_count + fsig->hasthis;
6064 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6065 if (check_linkdemand (cfg, method, cmethod))
6067 CHECK_CFG_EXCEPTION;
6070 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6071 g_assert_not_reached ();
6074 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6077 if (!cfg->generic_sharing_context && cmethod)
6078 g_assert (!mono_method_check_context_used (cmethod));
6082 //g_assert (!virtual || fsig->hasthis);
6086 if (constrained_call) {
6088 * We have the `constrained.' prefix opcode.
6090 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6094 * The type parameter is instantiated as a valuetype,
6095 * but that type doesn't override the method we're
6096 * calling, so we need to box `this'.
6098 dreg = alloc_dreg (cfg, STACK_VTYPE);
6099 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADV_MEMBASE, dreg, sp [0]->dreg, 0);
6100 ins->klass = constrained_call;
6101 sp [0] = handle_box (cfg, ins, constrained_call);
6102 } else if (!constrained_call->valuetype) {
6103 int dreg = alloc_preg (cfg);
6106 * The type parameter is instantiated as a reference
6107 * type. We have a managed pointer on the stack, so
6108 * we need to dereference it here.
6110 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6111 ins->type = STACK_OBJ;
6113 } else if (cmethod->klass->valuetype)
6115 constrained_call = NULL;
6118 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6122 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6123 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6124 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6125 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6126 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6129 * Pass vtable iff target method might
6130 * be shared, which means that sharing
6131 * is enabled for its class and its
6132 * context is sharable (and it's not a
6135 if (sharing_enabled && context_sharable &&
6136 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6140 if (cmethod && mini_method_get_context (cmethod) &&
6141 mini_method_get_context (cmethod)->method_inst) {
6142 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6143 MonoGenericContext *context = mini_method_get_context (cmethod);
6144 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6146 g_assert (!pass_vtable);
6148 if (sharing_enabled && context_sharable)
6152 if (cfg->generic_sharing_context && cmethod) {
6153 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6155 context_used = mono_method_check_context_used (cmethod);
6157 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6158 /* Generic method interface
6159 calls are resolved via a
6160 helper function and don't
6162 if (!cmethod_context || !cmethod_context->method_inst)
6163 pass_imt_from_rgctx = TRUE;
6167 * If a shared method calls another
6168 * shared method then the caller must
6169 * have a generic sharing context
6170 * because the magic trampoline
6171 * requires it. FIXME: We shouldn't
6172 * have to force the vtable/mrgctx
6173 * variable here. Instead there
6174 * should be a flag in the cfg to
6175 * request a generic sharing context.
6178 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6179 mono_get_vtable_var (cfg);
6184 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6186 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6188 CHECK_TYPELOAD (cmethod->klass);
6189 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6194 g_assert (!vtable_arg);
6197 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6199 EMIT_NEW_METHOD_RGCTX_CONST (cfg, vtable_arg, cmethod);
6202 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6203 MONO_METHOD_IS_FINAL (cmethod)) {
6210 if (pass_imt_from_rgctx) {
6211 g_assert (!pass_vtable);
6214 imt_arg = emit_get_rgctx_method (cfg, context_used,
6215 cmethod, MONO_RGCTX_INFO_METHOD);
6221 MONO_INST_NEW (cfg, check, OP_CHECK_THIS);
6222 check->sreg1 = sp [0]->dreg;
6223 MONO_ADD_INS (cfg->cbb, check);
6226 /* Calling virtual generic methods */
6227 if (cmethod && virtual &&
6228 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6229 !(MONO_METHOD_IS_FINAL (cmethod) &&
6230 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6231 mono_method_signature (cmethod)->generic_param_count) {
6232 MonoInst *this_temp, *this_arg_temp, *store;
6233 MonoInst *iargs [4];
6235 g_assert (mono_method_signature (cmethod)->is_inflated);
6237 /* Prevent inlining of methods that contain indirect calls */
6240 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK
6241 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt) {
6242 g_assert (!imt_arg);
6244 imt_arg = emit_get_rgctx_method (cfg, context_used,
6245 cmethod, MONO_RGCTX_INFO_METHOD);
6248 g_assert (cmethod->is_inflated);
6249 EMIT_NEW_METHODCONST (cfg, imt_arg, cmethod);
6251 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6255 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6256 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6257 MONO_ADD_INS (bblock, store);
6259 /* FIXME: This should be a managed pointer */
6260 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6262 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6264 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6265 cmethod, MONO_RGCTX_INFO_METHOD);
6266 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6267 addr = mono_emit_jit_icall (cfg,
6268 mono_helper_compile_generic_method, iargs);
6270 EMIT_NEW_METHODCONST (cfg, iargs [1], cmethod);
6271 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6272 addr = mono_emit_jit_icall (cfg, mono_helper_compile_generic_method, iargs);
6275 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6277 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6280 if (!MONO_TYPE_IS_VOID (fsig->ret))
6281 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6289 /* FIXME: runtime generic context pointer for jumps? */
6290 /* FIXME: handle this for generic sharing eventually */
6291 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) &&
6292 (mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod))) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret)) {
6295 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6298 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6299 call->tail_call = TRUE;
6300 call->method = cmethod;
6301 call->signature = mono_method_signature (cmethod);
6304 /* Handle tail calls similarly to calls */
6305 call->inst.opcode = OP_TAILCALL;
6307 mono_arch_emit_call (cfg, call);
6310 * We implement tail calls by storing the actual arguments into the
6311 * argument variables, then emitting a CEE_JMP.
6313 for (i = 0; i < n; ++i) {
6314 /* Prevent argument from being register allocated */
6315 arg_array [i]->flags |= MONO_INST_VOLATILE;
6316 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6320 ins = (MonoInst*)call;
6321 ins->inst_p0 = cmethod;
6322 ins->inst_p1 = arg_array [0];
6323 MONO_ADD_INS (bblock, ins);
6324 link_bblock (cfg, bblock, end_bblock);
6325 start_new_bblock = 1;
6326 /* skip CEE_RET as well */
6332 /* Conversion to a JIT intrinsic */
6333 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6334 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6335 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6346 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6347 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6348 mono_method_check_inlining (cfg, cmethod) &&
6349 !g_list_find (dont_inline, cmethod)) {
6351 gboolean allways = FALSE;
6353 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6354 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6355 /* Prevent inlining of methods that call wrappers */
6357 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6361 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6363 cfg->real_offset += 5;
6366 if (!MONO_TYPE_IS_VOID (fsig->ret))
6367 /* *sp is already set by inline_method */
6370 inline_costs += costs;
6376 inline_costs += 10 * num_calls++;
6378 /* Tail recursion elimination */
6379 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6380 gboolean has_vtargs = FALSE;
6383 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6386 /* keep it simple */
6387 for (i = fsig->param_count - 1; i >= 0; i--) {
6388 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6393 for (i = 0; i < n; ++i)
6394 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6395 MONO_INST_NEW (cfg, ins, OP_BR);
6396 MONO_ADD_INS (bblock, ins);
6397 tblock = start_bblock->out_bb [0];
6398 link_bblock (cfg, bblock, tblock);
6399 ins->inst_target_bb = tblock;
6400 start_new_bblock = 1;
6402 /* skip the CEE_RET, too */
6403 if (ip_in_bb (cfg, bblock, ip + 5))
6413 /* Generic sharing */
6414 /* FIXME: only do this for generic methods if
6415 they are not shared! */
6416 if (context_used && !imt_arg && !array_rank &&
6417 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6418 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6419 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6420 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6423 g_assert (cfg->generic_sharing_context && cmethod);
6427 * We are compiling a call to a
6428 * generic method from shared code,
6429 * which means that we have to look up
6430 * the method in the rgctx and do an
6433 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6436 /* Indirect calls */
6438 g_assert (!imt_arg);
6440 if (*ip == CEE_CALL)
6441 g_assert (context_used);
6442 else if (*ip == CEE_CALLI)
6443 g_assert (!vtable_arg);
6445 /* FIXME: what the hell is this??? */
6446 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6447 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6449 /* Prevent inlining of methods with indirect calls */
6453 #ifdef MONO_ARCH_RGCTX_REG
6455 int rgctx_reg = mono_alloc_preg (cfg);
6457 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6458 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6459 call = (MonoCallInst*)ins;
6460 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
6461 cfg->uses_rgctx_reg = TRUE;
6462 call->rgctx_reg = TRUE;
6467 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6469 * Instead of emitting an indirect call, emit a direct call
6470 * with the contents of the aotconst as the patch info.
6472 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
6475 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6478 if (!MONO_TYPE_IS_VOID (fsig->ret))
6479 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6490 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6491 if (sp [fsig->param_count]->type == STACK_OBJ) {
6492 MonoInst *iargs [2];
6495 iargs [1] = sp [fsig->param_count];
6497 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6500 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6501 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6502 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6503 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6505 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6508 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6509 if (!cmethod->klass->element_class->valuetype && !readonly)
6510 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6513 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6516 g_assert_not_reached ();
6524 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6526 if (!MONO_TYPE_IS_VOID (fsig->ret))
6527 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6537 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6539 } else if (imt_arg) {
6540 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6542 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6545 if (!MONO_TYPE_IS_VOID (fsig->ret))
6546 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6553 if (cfg->method != method) {
6554 /* return from inlined method */
6556 * If in_count == 0, that means the ret is unreachable due to
6557 * being preceeded by a throw. In that case, inline_method () will
6558 * handle setting the return value
6559 * (test case: test_0_inline_throw ()).
6561 if (return_var && cfg->cbb->in_count) {
6565 //g_assert (returnvar != -1);
6566 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6567 cfg->ret_var_set = TRUE;
6571 MonoType *ret_type = mono_method_signature (method)->ret;
6573 g_assert (!return_var);
6576 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6579 if (!cfg->vret_addr) {
6582 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6584 EMIT_NEW_RETLOADA (cfg, ret_addr);
6586 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6587 ins->klass = mono_class_from_mono_type (ret_type);
6590 #ifdef MONO_ARCH_SOFT_FLOAT
6591 if (!ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6592 MonoInst *iargs [1];
6596 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6597 mono_arch_emit_setret (cfg, method, conv);
6599 mono_arch_emit_setret (cfg, method, *sp);
6602 mono_arch_emit_setret (cfg, method, *sp);
6607 if (sp != stack_start)
6609 MONO_INST_NEW (cfg, ins, OP_BR);
6611 ins->inst_target_bb = end_bblock;
6612 MONO_ADD_INS (bblock, ins);
6613 link_bblock (cfg, bblock, end_bblock);
6614 start_new_bblock = 1;
6618 MONO_INST_NEW (cfg, ins, OP_BR);
6620 target = ip + 1 + (signed char)(*ip);
6622 GET_BBLOCK (cfg, tblock, target);
6623 link_bblock (cfg, bblock, tblock);
6624 ins->inst_target_bb = tblock;
6625 if (sp != stack_start) {
6626 handle_stack_args (cfg, stack_start, sp - stack_start);
6628 CHECK_UNVERIFIABLE (cfg);
6630 MONO_ADD_INS (bblock, ins);
6631 start_new_bblock = 1;
6632 inline_costs += BRANCH_COST;
6646 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6648 target = ip + 1 + *(signed char*)ip;
6654 inline_costs += BRANCH_COST;
6658 MONO_INST_NEW (cfg, ins, OP_BR);
6661 target = ip + 4 + (gint32)read32(ip);
6663 GET_BBLOCK (cfg, tblock, target);
6664 link_bblock (cfg, bblock, tblock);
6665 ins->inst_target_bb = tblock;
6666 if (sp != stack_start) {
6667 handle_stack_args (cfg, stack_start, sp - stack_start);
6669 CHECK_UNVERIFIABLE (cfg);
6672 MONO_ADD_INS (bblock, ins);
6674 start_new_bblock = 1;
6675 inline_costs += BRANCH_COST;
6682 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6683 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6684 guint32 opsize = is_short ? 1 : 4;
6686 CHECK_OPSIZE (opsize);
6688 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6691 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6696 GET_BBLOCK (cfg, tblock, target);
6697 link_bblock (cfg, bblock, tblock);
6698 GET_BBLOCK (cfg, tblock, ip);
6699 link_bblock (cfg, bblock, tblock);
6701 if (sp != stack_start) {
6702 handle_stack_args (cfg, stack_start, sp - stack_start);
6703 CHECK_UNVERIFIABLE (cfg);
6706 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6707 cmp->sreg1 = sp [0]->dreg;
6708 type_from_op (cmp, sp [0], NULL);
6711 #if SIZEOF_REGISTER == 4
6712 if (cmp->opcode == OP_LCOMPARE_IMM) {
6713 /* Convert it to OP_LCOMPARE */
6714 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6715 ins->type = STACK_I8;
6716 ins->dreg = alloc_dreg (cfg, STACK_I8);
6718 MONO_ADD_INS (bblock, ins);
6719 cmp->opcode = OP_LCOMPARE;
6720 cmp->sreg2 = ins->dreg;
6723 MONO_ADD_INS (bblock, cmp);
6725 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
6726 type_from_op (ins, sp [0], NULL);
6727 MONO_ADD_INS (bblock, ins);
6728 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
6729 GET_BBLOCK (cfg, tblock, target);
6730 ins->inst_true_bb = tblock;
6731 GET_BBLOCK (cfg, tblock, ip);
6732 ins->inst_false_bb = tblock;
6733 start_new_bblock = 2;
6736 inline_costs += BRANCH_COST;
6751 MONO_INST_NEW (cfg, ins, *ip);
6753 target = ip + 4 + (gint32)read32(ip);
6759 inline_costs += BRANCH_COST;
6763 MonoBasicBlock **targets;
6764 MonoBasicBlock *default_bblock;
6765 MonoJumpInfoBBTable *table;
6766 int offset_reg = alloc_preg (cfg);
6767 int target_reg = alloc_preg (cfg);
6768 int table_reg = alloc_preg (cfg);
6769 int sum_reg = alloc_preg (cfg);
6770 gboolean use_op_switch;
6774 n = read32 (ip + 1);
6777 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
6781 CHECK_OPSIZE (n * sizeof (guint32));
6782 target = ip + n * sizeof (guint32);
6784 GET_BBLOCK (cfg, default_bblock, target);
6786 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
6787 for (i = 0; i < n; ++i) {
6788 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
6789 targets [i] = tblock;
6793 if (sp != stack_start) {
6795 * Link the current bb with the targets as well, so handle_stack_args
6796 * will set their in_stack correctly.
6798 link_bblock (cfg, bblock, default_bblock);
6799 for (i = 0; i < n; ++i)
6800 link_bblock (cfg, bblock, targets [i]);
6802 handle_stack_args (cfg, stack_start, sp - stack_start);
6804 CHECK_UNVERIFIABLE (cfg);
6807 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
6808 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
6811 for (i = 0; i < n; ++i)
6812 link_bblock (cfg, bblock, targets [i]);
6814 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
6815 table->table = targets;
6816 table->table_size = n;
6818 use_op_switch = FALSE;
6820 /* ARM implements SWITCH statements differently */
6821 /* FIXME: Make it use the generic implementation */
6822 if (!cfg->compile_aot)
6823 use_op_switch = TRUE;
6826 if (COMPILE_LLVM (cfg))
6827 use_op_switch = TRUE;
6829 if (use_op_switch) {
6830 MONO_INST_NEW (cfg, ins, OP_SWITCH);
6831 ins->sreg1 = src1->dreg;
6832 ins->inst_p0 = table;
6833 ins->inst_many_bb = targets;
6834 ins->klass = GUINT_TO_POINTER (n);
6835 MONO_ADD_INS (cfg->cbb, ins);
6837 if (sizeof (gpointer) == 8)
6838 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
6840 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
6842 #if SIZEOF_REGISTER == 8
6843 /* The upper word might not be zero, and we add it to a 64 bit address later */
6844 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
6847 if (cfg->compile_aot) {
6848 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
6850 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
6851 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
6852 ins->inst_p0 = table;
6853 ins->dreg = table_reg;
6854 MONO_ADD_INS (cfg->cbb, ins);
6857 /* FIXME: Use load_memindex */
6858 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
6859 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
6860 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
6862 start_new_bblock = 1;
6863 inline_costs += (BRANCH_COST * 2);
6883 dreg = alloc_freg (cfg);
6886 dreg = alloc_lreg (cfg);
6889 dreg = alloc_preg (cfg);
6892 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
6893 ins->type = ldind_type [*ip - CEE_LDIND_I1];
6894 ins->flags |= ins_flag;
6896 MONO_ADD_INS (bblock, ins);
6911 #if HAVE_WRITE_BARRIERS
6912 if (*ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0))) {
6913 /* insert call to write barrier */
6914 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
6915 mono_emit_method_call (cfg, write_barrier, sp, NULL);
6922 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
6923 ins->flags |= ins_flag;
6925 MONO_ADD_INS (bblock, ins);
6933 MONO_INST_NEW (cfg, ins, (*ip));
6935 ins->sreg1 = sp [0]->dreg;
6936 ins->sreg2 = sp [1]->dreg;
6937 type_from_op (ins, sp [0], sp [1]);
6939 ins->dreg = alloc_dreg ((cfg), (ins)->type);
6941 /* Use the immediate opcodes if possible */
6942 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
6943 int imm_opcode = mono_op_to_op_imm (ins->opcode);
6944 if (imm_opcode != -1) {
6945 ins->opcode = imm_opcode;
6946 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
6949 sp [1]->opcode = OP_NOP;
6953 MONO_ADD_INS ((cfg)->cbb, (ins));
6955 *sp++ = mono_decompose_opcode (cfg, ins);
6972 MONO_INST_NEW (cfg, ins, (*ip));
6974 ins->sreg1 = sp [0]->dreg;
6975 ins->sreg2 = sp [1]->dreg;
6976 type_from_op (ins, sp [0], sp [1]);
6978 ADD_WIDEN_OP (ins, sp [0], sp [1]);
6979 ins->dreg = alloc_dreg ((cfg), (ins)->type);
6981 /* FIXME: Pass opcode to is_inst_imm */
6983 /* Use the immediate opcodes if possible */
6984 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
6987 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
6988 if (imm_opcode != -1) {
6989 ins->opcode = imm_opcode;
6990 if (sp [1]->opcode == OP_I8CONST) {
6991 #if SIZEOF_REGISTER == 8
6992 ins->inst_imm = sp [1]->inst_l;
6994 ins->inst_ls_word = sp [1]->inst_ls_word;
6995 ins->inst_ms_word = sp [1]->inst_ms_word;
6999 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7002 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7003 if (sp [1]->next == NULL)
7004 sp [1]->opcode = OP_NOP;
7007 MONO_ADD_INS ((cfg)->cbb, (ins));
7009 *sp++ = mono_decompose_opcode (cfg, ins);
7022 case CEE_CONV_OVF_I8:
7023 case CEE_CONV_OVF_U8:
7027 /* Special case this earlier so we have long constants in the IR */
7028 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7029 int data = sp [-1]->inst_c0;
7030 sp [-1]->opcode = OP_I8CONST;
7031 sp [-1]->type = STACK_I8;
7032 #if SIZEOF_REGISTER == 8
7033 if ((*ip) == CEE_CONV_U8)
7034 sp [-1]->inst_c0 = (guint32)data;
7036 sp [-1]->inst_c0 = data;
7038 sp [-1]->inst_ls_word = data;
7039 if ((*ip) == CEE_CONV_U8)
7040 sp [-1]->inst_ms_word = 0;
7042 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7044 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7051 case CEE_CONV_OVF_I4:
7052 case CEE_CONV_OVF_I1:
7053 case CEE_CONV_OVF_I2:
7054 case CEE_CONV_OVF_I:
7055 case CEE_CONV_OVF_U:
7058 if (sp [-1]->type == STACK_R8) {
7059 ADD_UNOP (CEE_CONV_OVF_I8);
7066 case CEE_CONV_OVF_U1:
7067 case CEE_CONV_OVF_U2:
7068 case CEE_CONV_OVF_U4:
7071 if (sp [-1]->type == STACK_R8) {
7072 ADD_UNOP (CEE_CONV_OVF_U8);
7079 case CEE_CONV_OVF_I1_UN:
7080 case CEE_CONV_OVF_I2_UN:
7081 case CEE_CONV_OVF_I4_UN:
7082 case CEE_CONV_OVF_I8_UN:
7083 case CEE_CONV_OVF_U1_UN:
7084 case CEE_CONV_OVF_U2_UN:
7085 case CEE_CONV_OVF_U4_UN:
7086 case CEE_CONV_OVF_U8_UN:
7087 case CEE_CONV_OVF_I_UN:
7088 case CEE_CONV_OVF_U_UN:
7098 case CEE_ADD_OVF_UN:
7100 case CEE_MUL_OVF_UN:
7102 case CEE_SUB_OVF_UN:
7110 token = read32 (ip + 1);
7111 klass = mini_get_class (method, token, generic_context);
7112 CHECK_TYPELOAD (klass);
7114 if (generic_class_is_reference_type (cfg, klass)) {
7115 MonoInst *store, *load;
7116 int dreg = alloc_preg (cfg);
7118 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7119 load->flags |= ins_flag;
7120 MONO_ADD_INS (cfg->cbb, load);
7122 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7123 store->flags |= ins_flag;
7124 MONO_ADD_INS (cfg->cbb, store);
7126 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7138 token = read32 (ip + 1);
7139 klass = mini_get_class (method, token, generic_context);
7140 CHECK_TYPELOAD (klass);
7142 /* Optimize the common ldobj+stloc combination */
7152 loc_index = ip [5] - CEE_STLOC_0;
7159 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7160 CHECK_LOCAL (loc_index);
7162 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7163 ins->dreg = cfg->locals [loc_index]->dreg;
7169 /* Optimize the ldobj+stobj combination */
7170 /* The reference case ends up being a load+store anyway */
7171 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 9) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7176 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7183 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7192 CHECK_STACK_OVF (1);
7194 n = read32 (ip + 1);
7196 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7197 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7198 ins->type = STACK_OBJ;
7201 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7202 MonoInst *iargs [1];
7204 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7205 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7207 if (cfg->opt & MONO_OPT_SHARED) {
7208 MonoInst *iargs [3];
7210 if (cfg->compile_aot) {
7211 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7213 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7214 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7215 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7216 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7217 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7219 if (bblock->out_of_line) {
7220 MonoInst *iargs [2];
7222 if (image == mono_defaults.corlib) {
7224 * Avoid relocations in AOT and save some space by using a
7225 * version of helper_ldstr specialized to mscorlib.
7227 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7228 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7230 /* Avoid creating the string object */
7231 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7232 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7233 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7237 if (cfg->compile_aot) {
7238 NEW_LDSTRCONST (cfg, ins, image, n);
7240 MONO_ADD_INS (bblock, ins);
7243 NEW_PCONST (cfg, ins, NULL);
7244 ins->type = STACK_OBJ;
7245 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7247 MONO_ADD_INS (bblock, ins);
7256 MonoInst *iargs [2];
7257 MonoMethodSignature *fsig;
7260 MonoInst *vtable_arg = NULL;
7263 token = read32 (ip + 1);
7264 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7267 fsig = mono_method_get_signature (cmethod, image, token);
7269 mono_save_token_info (cfg, image, token, cmethod);
7271 if (!mono_class_init (cmethod->klass))
7274 if (cfg->generic_sharing_context)
7275 context_used = mono_method_check_context_used (cmethod);
7277 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7278 if (check_linkdemand (cfg, method, cmethod))
7280 CHECK_CFG_EXCEPTION;
7281 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7282 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7285 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7286 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7287 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7289 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7290 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7292 EMIT_NEW_METHOD_RGCTX_CONST (cfg, vtable_arg, cmethod);
7296 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7297 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7299 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7301 CHECK_TYPELOAD (cmethod->klass);
7302 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7307 n = fsig->param_count;
7311 * Generate smaller code for the common newobj <exception> instruction in
7312 * argument checking code.
7314 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7315 is_exception_class (cmethod->klass) && n <= 2 &&
7316 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7317 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7318 MonoInst *iargs [3];
7320 g_assert (!vtable_arg);
7324 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7327 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7331 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7336 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7339 g_assert_not_reached ();
7347 /* move the args to allow room for 'this' in the first position */
7353 /* check_call_signature () requires sp[0] to be set */
7354 this_ins.type = STACK_OBJ;
7356 if (check_call_signature (cfg, fsig, sp))
7361 if (mini_class_is_system_array (cmethod->klass)) {
7362 g_assert (!vtable_arg);
7365 *sp = emit_get_rgctx_method (cfg, context_used,
7366 cmethod, MONO_RGCTX_INFO_METHOD);
7368 EMIT_NEW_METHODCONST (cfg, *sp, cmethod);
7371 /* Avoid varargs in the common case */
7372 if (fsig->param_count == 1)
7373 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7374 else if (fsig->param_count == 2)
7375 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7377 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7378 } else if (cmethod->string_ctor) {
7379 g_assert (!context_used);
7380 g_assert (!vtable_arg);
7381 /* we simply pass a null pointer */
7382 EMIT_NEW_PCONST (cfg, *sp, NULL);
7383 /* now call the string ctor */
7384 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7386 MonoInst* callvirt_this_arg = NULL;
7388 if (cmethod->klass->valuetype) {
7389 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7390 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7391 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7396 * The code generated by mini_emit_virtual_call () expects
7397 * iargs [0] to be a boxed instance, but luckily the vcall
7398 * will be transformed into a normal call there.
7400 } else if (context_used) {
7404 if (cfg->opt & MONO_OPT_SHARED)
7405 rgctx_info = MONO_RGCTX_INFO_KLASS;
7407 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7408 data = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, rgctx_info);
7410 alloc = handle_alloc_from_inst (cfg, cmethod->klass, data, FALSE);
7413 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7415 CHECK_TYPELOAD (cmethod->klass);
7418 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7419 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7420 * As a workaround, we call class cctors before allocating objects.
7422 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7423 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7424 if (cfg->verbose_level > 2)
7425 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7426 class_inits = g_slist_prepend (class_inits, vtable);
7429 alloc = handle_alloc (cfg, cmethod->klass, FALSE);
7434 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7436 /* Now call the actual ctor */
7437 /* Avoid virtual calls to ctors if possible */
7438 if (cmethod->klass->marshalbyref)
7439 callvirt_this_arg = sp [0];
7441 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7442 mono_method_check_inlining (cfg, cmethod) &&
7443 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7444 !g_list_find (dont_inline, cmethod)) {
7447 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7448 cfg->real_offset += 5;
7451 inline_costs += costs - 5;
7454 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7456 } else if (context_used &&
7457 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7458 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7459 MonoInst *cmethod_addr;
7461 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7462 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7464 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7467 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7468 callvirt_this_arg, NULL, vtable_arg);
7469 if (mono_method_is_generic_sharable_impl (cmethod, TRUE) && ((MonoCallInst*)ins)->method->wrapper_type == MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)
7470 GENERIC_SHARING_FAILURE (*ip);
7474 if (alloc == NULL) {
7476 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7477 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7491 token = read32 (ip + 1);
7492 klass = mini_get_class (method, token, generic_context);
7493 CHECK_TYPELOAD (klass);
7494 if (sp [0]->type != STACK_OBJ)
7497 if (cfg->generic_sharing_context)
7498 context_used = mono_class_check_context_used (klass);
7507 args [1] = emit_get_rgctx_klass (cfg, context_used,
7508 klass, MONO_RGCTX_INFO_KLASS);
7510 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7514 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7515 MonoMethod *mono_castclass;
7516 MonoInst *iargs [1];
7519 mono_castclass = mono_marshal_get_castclass (klass);
7522 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7523 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7524 g_assert (costs > 0);
7527 cfg->real_offset += 5;
7532 inline_costs += costs;
7535 ins = handle_castclass (cfg, klass, *sp);
7545 token = read32 (ip + 1);
7546 klass = mini_get_class (method, token, generic_context);
7547 CHECK_TYPELOAD (klass);
7548 if (sp [0]->type != STACK_OBJ)
7551 if (cfg->generic_sharing_context)
7552 context_used = mono_class_check_context_used (klass);
7561 args [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7563 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7567 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7568 MonoMethod *mono_isinst;
7569 MonoInst *iargs [1];
7572 mono_isinst = mono_marshal_get_isinst (klass);
7575 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7576 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7577 g_assert (costs > 0);
7580 cfg->real_offset += 5;
7585 inline_costs += costs;
7588 ins = handle_isinst (cfg, klass, *sp);
7595 case CEE_UNBOX_ANY: {
7599 token = read32 (ip + 1);
7600 klass = mini_get_class (method, token, generic_context);
7601 CHECK_TYPELOAD (klass);
7603 mono_save_token_info (cfg, image, token, klass);
7605 if (cfg->generic_sharing_context)
7606 context_used = mono_class_check_context_used (klass);
7608 if (generic_class_is_reference_type (cfg, klass)) {
7611 MonoInst *iargs [2];
7616 iargs [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7617 ins = mono_emit_jit_icall (cfg, mono_object_castclass, iargs);
7621 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7622 MonoMethod *mono_castclass;
7623 MonoInst *iargs [1];
7626 mono_castclass = mono_marshal_get_castclass (klass);
7629 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7630 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7632 g_assert (costs > 0);
7635 cfg->real_offset += 5;
7639 inline_costs += costs;
7641 ins = handle_castclass (cfg, klass, *sp);
7649 if (mono_class_is_nullable (klass)) {
7650 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
7657 ins = handle_unbox (cfg, klass, sp, context_used);
7663 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7676 token = read32 (ip + 1);
7677 klass = mini_get_class (method, token, generic_context);
7678 CHECK_TYPELOAD (klass);
7680 mono_save_token_info (cfg, image, token, klass);
7682 if (cfg->generic_sharing_context)
7683 context_used = mono_class_check_context_used (klass);
7685 if (generic_class_is_reference_type (cfg, klass)) {
7691 if (klass == mono_defaults.void_class)
7693 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7695 /* frequent check in generic code: box (struct), brtrue */
7696 if (!mono_class_is_nullable (klass) &&
7697 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) && (ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S)) {
7698 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7700 MONO_INST_NEW (cfg, ins, OP_BR);
7701 if (*ip == CEE_BRTRUE_S) {
7704 target = ip + 1 + (signed char)(*ip);
7709 target = ip + 4 + (gint)(read32 (ip));
7712 GET_BBLOCK (cfg, tblock, target);
7713 link_bblock (cfg, bblock, tblock);
7714 ins->inst_target_bb = tblock;
7715 GET_BBLOCK (cfg, tblock, ip);
7717 * This leads to some inconsistency, since the two bblocks are
7718 * not really connected, but it is needed for handling stack
7719 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
7720 * FIXME: This should only be needed if sp != stack_start, but that
7721 * doesn't work for some reason (test failure in mcs/tests on x86).
7723 link_bblock (cfg, bblock, tblock);
7724 if (sp != stack_start) {
7725 handle_stack_args (cfg, stack_start, sp - stack_start);
7727 CHECK_UNVERIFIABLE (cfg);
7729 MONO_ADD_INS (bblock, ins);
7730 start_new_bblock = 1;
7738 if (cfg->opt & MONO_OPT_SHARED)
7739 rgctx_info = MONO_RGCTX_INFO_KLASS;
7741 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7742 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
7743 *sp++ = handle_box_from_inst (cfg, val, klass, context_used, data);
7745 *sp++ = handle_box (cfg, val, klass);
7756 token = read32 (ip + 1);
7757 klass = mini_get_class (method, token, generic_context);
7758 CHECK_TYPELOAD (klass);
7760 mono_save_token_info (cfg, image, token, klass);
7762 if (cfg->generic_sharing_context)
7763 context_used = mono_class_check_context_used (klass);
7765 if (mono_class_is_nullable (klass)) {
7768 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
7769 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
7773 ins = handle_unbox (cfg, klass, sp, context_used);
7783 MonoClassField *field;
7787 if (*ip == CEE_STFLD) {
7794 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
7796 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
7799 token = read32 (ip + 1);
7800 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7801 field = mono_method_get_wrapper_data (method, token);
7802 klass = field->parent;
7805 field = mono_field_from_token (image, token, &klass, generic_context);
7809 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7810 FIELD_ACCESS_FAILURE;
7811 mono_class_init (klass);
7813 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
7814 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
7815 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
7816 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
7819 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
7820 if (*ip == CEE_STFLD) {
7821 if (target_type_is_incompatible (cfg, field->type, sp [1]))
7823 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7824 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
7825 MonoInst *iargs [5];
7828 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7829 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7830 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
7834 if (cfg->opt & MONO_OPT_INLINE) {
7835 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
7836 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7837 g_assert (costs > 0);
7839 cfg->real_offset += 5;
7842 inline_costs += costs;
7844 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
7849 #if HAVE_WRITE_BARRIERS
7850 if (mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
7851 /* insert call to write barrier */
7852 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
7853 MonoInst *iargs [2];
7856 dreg = alloc_preg (cfg);
7857 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7859 mono_emit_method_call (cfg, write_barrier, iargs, NULL);
7863 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
7865 store->flags |= ins_flag;
7872 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7873 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
7874 MonoInst *iargs [4];
7877 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7878 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7879 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
7880 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
7881 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
7882 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7884 g_assert (costs > 0);
7886 cfg->real_offset += 5;
7890 inline_costs += costs;
7892 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
7896 if (sp [0]->type == STACK_VTYPE) {
7899 /* Have to compute the address of the variable */
7901 var = get_vreg_to_inst (cfg, sp [0]->dreg);
7903 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
7905 g_assert (var->klass == klass);
7907 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
7911 if (*ip == CEE_LDFLDA) {
7912 dreg = alloc_preg (cfg);
7914 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7915 ins->klass = mono_class_from_mono_type (field->type);
7916 ins->type = STACK_MP;
7921 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
7922 load->flags |= ins_flag;
7933 MonoClassField *field;
7934 gpointer addr = NULL;
7935 gboolean is_special_static;
7938 token = read32 (ip + 1);
7940 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7941 field = mono_method_get_wrapper_data (method, token);
7942 klass = field->parent;
7945 field = mono_field_from_token (image, token, &klass, generic_context);
7948 mono_class_init (klass);
7949 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7950 FIELD_ACCESS_FAILURE;
7952 /* if the class is Critical then transparent code cannot access it's fields */
7953 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
7954 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
7957 * We can only support shared generic static
7958 * field access on architectures where the
7959 * trampoline code has been extended to handle
7960 * the generic class init.
7962 #ifndef MONO_ARCH_VTABLE_REG
7963 GENERIC_SHARING_FAILURE (*ip);
7966 if (cfg->generic_sharing_context)
7967 context_used = mono_class_check_context_used (klass);
7969 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
7971 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
7972 * to be called here.
7974 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
7975 mono_class_vtable (cfg->domain, klass);
7976 CHECK_TYPELOAD (klass);
7978 mono_domain_lock (cfg->domain);
7979 if (cfg->domain->special_static_fields)
7980 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
7981 mono_domain_unlock (cfg->domain);
7983 is_special_static = mono_class_field_is_special_static (field);
7985 /* Generate IR to compute the field address */
7987 if ((cfg->opt & MONO_OPT_SHARED) ||
7988 (cfg->compile_aot && is_special_static) ||
7989 (context_used && is_special_static)) {
7990 MonoInst *iargs [2];
7992 g_assert (field->parent);
7993 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7995 iargs [1] = emit_get_rgctx_field (cfg, context_used,
7996 field, MONO_RGCTX_INFO_CLASS_FIELD);
7998 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8000 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8001 } else if (context_used) {
8002 MonoInst *static_data;
8005 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8006 method->klass->name_space, method->klass->name, method->name,
8007 depth, field->offset);
8010 if (mono_class_needs_cctor_run (klass, method)) {
8014 vtable = emit_get_rgctx_klass (cfg, context_used,
8015 klass, MONO_RGCTX_INFO_VTABLE);
8017 // FIXME: This doesn't work since it tries to pass the argument
8018 // in the normal way, instead of using MONO_ARCH_VTABLE_REG
8020 * The vtable pointer is always passed in a register regardless of
8021 * the calling convention, so assign it manually, and make a call
8022 * using a signature without parameters.
8024 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable);
8025 #ifdef MONO_ARCH_VTABLE_REG
8026 mono_call_inst_add_outarg_reg (cfg, call, vtable->dreg, MONO_ARCH_VTABLE_REG, FALSE);
8027 cfg->uses_vtable_reg = TRUE;
8034 * The pointer we're computing here is
8036 * super_info.static_data + field->offset
8038 static_data = emit_get_rgctx_klass (cfg, context_used,
8039 klass, MONO_RGCTX_INFO_STATIC_DATA);
8041 if (field->offset == 0) {
8044 int addr_reg = mono_alloc_preg (cfg);
8045 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8047 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8048 MonoInst *iargs [2];
8050 g_assert (field->parent);
8051 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8052 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8053 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8055 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8057 CHECK_TYPELOAD (klass);
8059 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8060 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8061 if (cfg->verbose_level > 2)
8062 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8063 class_inits = g_slist_prepend (class_inits, vtable);
8065 if (cfg->run_cctors) {
8067 /* This makes so that inline cannot trigger */
8068 /* .cctors: too many apps depend on them */
8069 /* running with a specific order... */
8070 if (! vtable->initialized)
8072 ex = mono_runtime_class_init_full (vtable, FALSE);
8074 set_exception_object (cfg, ex);
8075 goto exception_exit;
8079 addr = (char*)vtable->data + field->offset;
8081 if (cfg->compile_aot)
8082 EMIT_NEW_SFLDACONST (cfg, ins, field);
8084 EMIT_NEW_PCONST (cfg, ins, addr);
8087 * insert call to mono_threads_get_static_data (GPOINTER_TO_UINT (addr))
8088 * This could be later optimized to do just a couple of
8089 * memory dereferences with constant offsets.
8091 MonoInst *iargs [1];
8092 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8093 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8097 /* Generate IR to do the actual load/store operation */
8099 if (*ip == CEE_LDSFLDA) {
8100 ins->klass = mono_class_from_mono_type (field->type);
8101 ins->type = STACK_PTR;
8103 } else if (*ip == CEE_STSFLD) {
8108 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
8109 store->flags |= ins_flag;
8111 gboolean is_const = FALSE;
8112 MonoVTable *vtable = NULL;
8114 if (!context_used) {
8115 vtable = mono_class_vtable (cfg->domain, klass);
8116 CHECK_TYPELOAD (klass);
8118 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8119 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8120 gpointer addr = (char*)vtable->data + field->offset;
8121 int ro_type = field->type->type;
8122 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
8123 ro_type = mono_class_enum_basetype (field->type->data.klass)->type;
8125 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8128 case MONO_TYPE_BOOLEAN:
8130 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8134 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8137 case MONO_TYPE_CHAR:
8139 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8143 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8148 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8152 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8155 #ifndef HAVE_MOVING_COLLECTOR
8158 case MONO_TYPE_STRING:
8159 case MONO_TYPE_OBJECT:
8160 case MONO_TYPE_CLASS:
8161 case MONO_TYPE_SZARRAY:
8163 case MONO_TYPE_FNPTR:
8164 case MONO_TYPE_ARRAY:
8165 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8166 type_to_eval_stack_type ((cfg), field->type, *sp);
8172 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8177 case MONO_TYPE_VALUETYPE:
8187 CHECK_STACK_OVF (1);
8189 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8190 load->flags |= ins_flag;
8203 token = read32 (ip + 1);
8204 klass = mini_get_class (method, token, generic_context);
8205 CHECK_TYPELOAD (klass);
8206 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8207 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8218 const char *data_ptr;
8220 guint32 field_token;
8226 token = read32 (ip + 1);
8228 klass = mini_get_class (method, token, generic_context);
8229 CHECK_TYPELOAD (klass);
8231 if (cfg->generic_sharing_context)
8232 context_used = mono_class_check_context_used (klass);
8237 /* FIXME: Decompose later to help abcrem */
8240 args [0] = emit_get_rgctx_klass (cfg, context_used,
8241 mono_array_class_get (klass, 1), MONO_RGCTX_INFO_VTABLE);
8246 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8248 if (cfg->opt & MONO_OPT_SHARED) {
8249 /* Decompose now to avoid problems with references to the domainvar */
8250 MonoInst *iargs [3];
8252 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8253 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8256 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8258 /* Decompose later since it is needed by abcrem */
8259 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8260 ins->dreg = alloc_preg (cfg);
8261 ins->sreg1 = sp [0]->dreg;
8262 ins->inst_newa_class = klass;
8263 ins->type = STACK_OBJ;
8265 MONO_ADD_INS (cfg->cbb, ins);
8266 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8267 cfg->cbb->has_array_access = TRUE;
8269 /* Needed so mono_emit_load_get_addr () gets called */
8270 mono_get_got_var (cfg);
8280 * we inline/optimize the initialization sequence if possible.
8281 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8282 * for small sizes open code the memcpy
8283 * ensure the rva field is big enough
8285 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8286 MonoMethod *memcpy_method = get_memcpy_method ();
8287 MonoInst *iargs [3];
8288 int add_reg = alloc_preg (cfg);
8290 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8291 if (cfg->compile_aot) {
8292 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8294 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8296 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8297 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8306 if (sp [0]->type != STACK_OBJ)
8309 dreg = alloc_preg (cfg);
8310 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8311 ins->dreg = alloc_preg (cfg);
8312 ins->sreg1 = sp [0]->dreg;
8313 ins->type = STACK_I4;
8314 MONO_ADD_INS (cfg->cbb, ins);
8315 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8316 cfg->cbb->has_array_access = TRUE;
8324 if (sp [0]->type != STACK_OBJ)
8327 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8329 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8330 CHECK_TYPELOAD (klass);
8331 /* we need to make sure that this array is exactly the type it needs
8332 * to be for correctness. the wrappers are lax with their usage
8333 * so we need to ignore them here
8335 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly)
8336 mini_emit_check_array_type (cfg, sp [0], mono_array_class_get (klass, 1));
8339 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8343 case CEE_LDELEM_ANY:
8354 case CEE_LDELEM_REF: {
8360 if (*ip == CEE_LDELEM_ANY) {
8362 token = read32 (ip + 1);
8363 klass = mini_get_class (method, token, generic_context);
8364 CHECK_TYPELOAD (klass);
8365 mono_class_init (klass);
8368 klass = array_access_to_klass (*ip);
8370 if (sp [0]->type != STACK_OBJ)
8373 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8375 if (sp [1]->opcode == OP_ICONST) {
8376 int array_reg = sp [0]->dreg;
8377 int index_reg = sp [1]->dreg;
8378 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8380 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8381 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8383 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8384 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8387 if (*ip == CEE_LDELEM_ANY)
8400 case CEE_STELEM_REF:
8401 case CEE_STELEM_ANY: {
8407 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8409 if (*ip == CEE_STELEM_ANY) {
8411 token = read32 (ip + 1);
8412 klass = mini_get_class (method, token, generic_context);
8413 CHECK_TYPELOAD (klass);
8414 mono_class_init (klass);
8417 klass = array_access_to_klass (*ip);
8419 if (sp [0]->type != STACK_OBJ)
8422 /* storing a NULL doesn't need any of the complex checks in stelemref */
8423 if (generic_class_is_reference_type (cfg, klass) &&
8424 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8425 MonoMethod* helper = mono_marshal_get_stelemref ();
8426 MonoInst *iargs [3];
8428 if (sp [0]->type != STACK_OBJ)
8430 if (sp [2]->type != STACK_OBJ)
8437 mono_emit_method_call (cfg, helper, iargs, NULL);
8439 if (sp [1]->opcode == OP_ICONST) {
8440 int array_reg = sp [0]->dreg;
8441 int index_reg = sp [1]->dreg;
8442 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8444 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8445 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8447 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8448 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8452 if (*ip == CEE_STELEM_ANY)
8459 case CEE_CKFINITE: {
8463 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8464 ins->sreg1 = sp [0]->dreg;
8465 ins->dreg = alloc_freg (cfg);
8466 ins->type = STACK_R8;
8467 MONO_ADD_INS (bblock, ins);
8469 *sp++ = mono_decompose_opcode (cfg, ins);
8474 case CEE_REFANYVAL: {
8475 MonoInst *src_var, *src;
8477 int klass_reg = alloc_preg (cfg);
8478 int dreg = alloc_preg (cfg);
8481 MONO_INST_NEW (cfg, ins, *ip);
8484 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8485 CHECK_TYPELOAD (klass);
8486 mono_class_init (klass);
8488 if (cfg->generic_sharing_context)
8489 context_used = mono_class_check_context_used (klass);
8492 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8494 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8495 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8496 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8499 MonoInst *klass_ins;
8501 klass_ins = emit_get_rgctx_klass (cfg, context_used,
8502 klass, MONO_RGCTX_INFO_KLASS);
8505 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8506 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8508 mini_emit_class_check (cfg, klass_reg, klass);
8510 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8511 ins->type = STACK_MP;
8516 case CEE_MKREFANY: {
8517 MonoInst *loc, *addr;
8520 MONO_INST_NEW (cfg, ins, *ip);
8523 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8524 CHECK_TYPELOAD (klass);
8525 mono_class_init (klass);
8527 if (cfg->generic_sharing_context)
8528 context_used = mono_class_check_context_used (klass);
8530 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8531 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8534 MonoInst *const_ins;
8535 int type_reg = alloc_preg (cfg);
8537 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
8538 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8539 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8540 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8541 } else if (cfg->compile_aot) {
8542 int const_reg = alloc_preg (cfg);
8543 int type_reg = alloc_preg (cfg);
8545 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8546 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8547 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8548 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8550 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8551 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8553 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8555 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8556 ins->type = STACK_VTYPE;
8557 ins->klass = mono_defaults.typed_reference_class;
8564 MonoClass *handle_class;
8566 CHECK_STACK_OVF (1);
8569 n = read32 (ip + 1);
8571 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
8572 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8573 handle = mono_method_get_wrapper_data (method, n);
8574 handle_class = mono_method_get_wrapper_data (method, n + 1);
8575 if (handle_class == mono_defaults.typehandle_class)
8576 handle = &((MonoClass*)handle)->byval_arg;
8579 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8583 mono_class_init (handle_class);
8584 if (cfg->generic_sharing_context) {
8585 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
8586 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
8587 /* This case handles ldtoken
8588 of an open type, like for
8591 } else if (handle_class == mono_defaults.typehandle_class) {
8592 /* If we get a MONO_TYPE_CLASS
8593 then we need to provide the
8595 instantiation of it. */
8596 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8599 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8600 } else if (handle_class == mono_defaults.fieldhandle_class)
8601 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8602 else if (handle_class == mono_defaults.methodhandle_class)
8603 context_used = mono_method_check_context_used (handle);
8605 g_assert_not_reached ();
8608 if ((cfg->opt & MONO_OPT_SHARED) &&
8609 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
8610 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
8611 MonoInst *addr, *vtvar, *iargs [3];
8612 int method_context_used;
8614 if (cfg->generic_sharing_context)
8615 method_context_used = mono_method_check_context_used (method);
8617 method_context_used = 0;
8619 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8621 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8622 EMIT_NEW_ICONST (cfg, iargs [1], n);
8623 if (method_context_used) {
8624 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
8625 method, MONO_RGCTX_INFO_METHOD);
8626 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
8628 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
8629 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
8631 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8633 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8635 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8637 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
8638 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
8639 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
8640 (cmethod->klass == mono_defaults.monotype_class->parent) &&
8641 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
8642 MonoClass *tclass = mono_class_from_mono_type (handle);
8644 mono_class_init (tclass);
8646 ins = emit_get_rgctx_klass (cfg, context_used,
8647 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
8648 } else if (cfg->compile_aot) {
8649 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
8651 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
8653 ins->type = STACK_OBJ;
8654 ins->klass = cmethod->klass;
8657 MonoInst *addr, *vtvar;
8659 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8662 if (handle_class == mono_defaults.typehandle_class) {
8663 ins = emit_get_rgctx_klass (cfg, context_used,
8664 mono_class_from_mono_type (handle),
8665 MONO_RGCTX_INFO_TYPE);
8666 } else if (handle_class == mono_defaults.methodhandle_class) {
8667 ins = emit_get_rgctx_method (cfg, context_used,
8668 handle, MONO_RGCTX_INFO_METHOD);
8669 } else if (handle_class == mono_defaults.fieldhandle_class) {
8670 ins = emit_get_rgctx_field (cfg, context_used,
8671 handle, MONO_RGCTX_INFO_CLASS_FIELD);
8673 g_assert_not_reached ();
8675 } else if (cfg->compile_aot) {
8676 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
8678 EMIT_NEW_PCONST (cfg, ins, handle);
8680 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8681 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8682 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8692 MONO_INST_NEW (cfg, ins, OP_THROW);
8694 ins->sreg1 = sp [0]->dreg;
8696 bblock->out_of_line = TRUE;
8697 MONO_ADD_INS (bblock, ins);
8698 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
8699 MONO_ADD_INS (bblock, ins);
8702 link_bblock (cfg, bblock, end_bblock);
8703 start_new_bblock = 1;
8705 case CEE_ENDFINALLY:
8706 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
8707 MONO_ADD_INS (bblock, ins);
8709 start_new_bblock = 1;
8712 * Control will leave the method so empty the stack, otherwise
8713 * the next basic block will start with a nonempty stack.
8715 while (sp != stack_start) {
8723 if (*ip == CEE_LEAVE) {
8725 target = ip + 5 + (gint32)read32(ip + 1);
8728 target = ip + 2 + (signed char)(ip [1]);
8731 /* empty the stack */
8732 while (sp != stack_start) {
8737 * If this leave statement is in a catch block, check for a
8738 * pending exception, and rethrow it if necessary.
8740 for (i = 0; i < header->num_clauses; ++i) {
8741 MonoExceptionClause *clause = &header->clauses [i];
8744 * Use <= in the final comparison to handle clauses with multiple
8745 * leave statements, like in bug #78024.
8746 * The ordering of the exception clauses guarantees that we find the
8749 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len)) {
8751 MonoBasicBlock *dont_throw;
8756 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
8759 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
8761 NEW_BBLOCK (cfg, dont_throw);
8764 * Currently, we allways rethrow the abort exception, despite the
8765 * fact that this is not correct. See thread6.cs for an example.
8766 * But propagating the abort exception is more important than
8767 * getting the sematics right.
8769 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
8770 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
8771 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
8773 MONO_START_BB (cfg, dont_throw);
8778 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
8780 for (tmp = handlers; tmp; tmp = tmp->next) {
8782 link_bblock (cfg, bblock, tblock);
8783 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
8784 ins->inst_target_bb = tblock;
8785 MONO_ADD_INS (bblock, ins);
8787 g_list_free (handlers);
8790 MONO_INST_NEW (cfg, ins, OP_BR);
8791 MONO_ADD_INS (bblock, ins);
8792 GET_BBLOCK (cfg, tblock, target);
8793 link_bblock (cfg, bblock, tblock);
8794 ins->inst_target_bb = tblock;
8795 start_new_bblock = 1;
8797 if (*ip == CEE_LEAVE)
8806 * Mono specific opcodes
8808 case MONO_CUSTOM_PREFIX: {
8810 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
8814 case CEE_MONO_ICALL: {
8816 MonoJitICallInfo *info;
8818 token = read32 (ip + 2);
8819 func = mono_method_get_wrapper_data (method, token);
8820 info = mono_find_jit_icall_by_addr (func);
8823 CHECK_STACK (info->sig->param_count);
8824 sp -= info->sig->param_count;
8826 ins = mono_emit_jit_icall (cfg, info->func, sp);
8827 if (!MONO_TYPE_IS_VOID (info->sig->ret))
8831 inline_costs += 10 * num_calls++;
8835 case CEE_MONO_LDPTR: {
8838 CHECK_STACK_OVF (1);
8840 token = read32 (ip + 2);
8842 ptr = mono_method_get_wrapper_data (method, token);
8843 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
8844 MonoJitICallInfo *callinfo;
8845 const char *icall_name;
8847 icall_name = method->name + strlen ("__icall_wrapper_");
8848 g_assert (icall_name);
8849 callinfo = mono_find_jit_icall_by_name (icall_name);
8850 g_assert (callinfo);
8852 if (ptr == callinfo->func) {
8853 /* Will be transformed into an AOTCONST later */
8854 EMIT_NEW_PCONST (cfg, ins, ptr);
8860 /* FIXME: Generalize this */
8861 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
8862 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
8867 EMIT_NEW_PCONST (cfg, ins, ptr);
8870 inline_costs += 10 * num_calls++;
8871 /* Can't embed random pointers into AOT code */
8872 cfg->disable_aot = 1;
8875 case CEE_MONO_ICALL_ADDR: {
8876 MonoMethod *cmethod;
8879 CHECK_STACK_OVF (1);
8881 token = read32 (ip + 2);
8883 cmethod = mono_method_get_wrapper_data (method, token);
8885 if (cfg->compile_aot) {
8886 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
8888 ptr = mono_lookup_internal_call (cmethod);
8890 EMIT_NEW_PCONST (cfg, ins, ptr);
8896 case CEE_MONO_VTADDR: {
8897 MonoInst *src_var, *src;
8903 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8904 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
8909 case CEE_MONO_NEWOBJ: {
8910 MonoInst *iargs [2];
8912 CHECK_STACK_OVF (1);
8914 token = read32 (ip + 2);
8915 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8916 mono_class_init (klass);
8917 NEW_DOMAINCONST (cfg, iargs [0]);
8918 MONO_ADD_INS (cfg->cbb, iargs [0]);
8919 NEW_CLASSCONST (cfg, iargs [1], klass);
8920 MONO_ADD_INS (cfg->cbb, iargs [1]);
8921 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
8923 inline_costs += 10 * num_calls++;
8926 case CEE_MONO_OBJADDR:
8929 MONO_INST_NEW (cfg, ins, OP_MOVE);
8930 ins->dreg = alloc_preg (cfg);
8931 ins->sreg1 = sp [0]->dreg;
8932 ins->type = STACK_MP;
8933 MONO_ADD_INS (cfg->cbb, ins);
8937 case CEE_MONO_LDNATIVEOBJ:
8939 * Similar to LDOBJ, but instead load the unmanaged
8940 * representation of the vtype to the stack.
8945 token = read32 (ip + 2);
8946 klass = mono_method_get_wrapper_data (method, token);
8947 g_assert (klass->valuetype);
8948 mono_class_init (klass);
8951 MonoInst *src, *dest, *temp;
8954 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
8955 temp->backend.is_pinvoke = 1;
8956 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
8957 mini_emit_stobj (cfg, dest, src, klass, TRUE);
8959 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
8960 dest->type = STACK_VTYPE;
8961 dest->klass = klass;
8967 case CEE_MONO_RETOBJ: {
8969 * Same as RET, but return the native representation of a vtype
8972 g_assert (cfg->ret);
8973 g_assert (mono_method_signature (method)->pinvoke);
8978 token = read32 (ip + 2);
8979 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8981 if (!cfg->vret_addr) {
8982 g_assert (cfg->ret_var_is_local);
8984 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
8986 EMIT_NEW_RETLOADA (cfg, ins);
8988 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
8990 if (sp != stack_start)
8993 MONO_INST_NEW (cfg, ins, OP_BR);
8994 ins->inst_target_bb = end_bblock;
8995 MONO_ADD_INS (bblock, ins);
8996 link_bblock (cfg, bblock, end_bblock);
8997 start_new_bblock = 1;
9001 case CEE_MONO_CISINST:
9002 case CEE_MONO_CCASTCLASS: {
9007 token = read32 (ip + 2);
9008 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9009 if (ip [1] == CEE_MONO_CISINST)
9010 ins = handle_cisinst (cfg, klass, sp [0]);
9012 ins = handle_ccastclass (cfg, klass, sp [0]);
9018 case CEE_MONO_SAVE_LMF:
9019 case CEE_MONO_RESTORE_LMF:
9020 #ifdef MONO_ARCH_HAVE_LMF_OPS
9021 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9022 MONO_ADD_INS (bblock, ins);
9023 cfg->need_lmf_area = TRUE;
9027 case CEE_MONO_CLASSCONST:
9028 CHECK_STACK_OVF (1);
9030 token = read32 (ip + 2);
9031 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9034 inline_costs += 10 * num_calls++;
9036 case CEE_MONO_NOT_TAKEN:
9037 bblock->out_of_line = TRUE;
9041 CHECK_STACK_OVF (1);
9043 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9044 ins->dreg = alloc_preg (cfg);
9045 ins->inst_offset = (gint32)read32 (ip + 2);
9046 ins->type = STACK_PTR;
9047 MONO_ADD_INS (bblock, ins);
9052 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9062 /* somewhat similar to LDTOKEN */
9063 MonoInst *addr, *vtvar;
9064 CHECK_STACK_OVF (1);
9065 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9067 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9068 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9070 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9071 ins->type = STACK_VTYPE;
9072 ins->klass = mono_defaults.argumenthandle_class;
9085 * The following transforms:
9086 * CEE_CEQ into OP_CEQ
9087 * CEE_CGT into OP_CGT
9088 * CEE_CGT_UN into OP_CGT_UN
9089 * CEE_CLT into OP_CLT
9090 * CEE_CLT_UN into OP_CLT_UN
9092 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9094 MONO_INST_NEW (cfg, ins, cmp->opcode);
9096 cmp->sreg1 = sp [0]->dreg;
9097 cmp->sreg2 = sp [1]->dreg;
9098 type_from_op (cmp, sp [0], sp [1]);
9100 if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9101 cmp->opcode = OP_LCOMPARE;
9102 else if (sp [0]->type == STACK_R8)
9103 cmp->opcode = OP_FCOMPARE;
9105 cmp->opcode = OP_ICOMPARE;
9106 MONO_ADD_INS (bblock, cmp);
9107 ins->type = STACK_I4;
9108 ins->dreg = alloc_dreg (cfg, ins->type);
9109 type_from_op (ins, sp [0], sp [1]);
9111 if (cmp->opcode == OP_FCOMPARE) {
9113 * The backends expect the fceq opcodes to do the
9116 cmp->opcode = OP_NOP;
9117 ins->sreg1 = cmp->sreg1;
9118 ins->sreg2 = cmp->sreg2;
9120 MONO_ADD_INS (bblock, ins);
9127 MonoMethod *cil_method;
9128 gboolean needs_static_rgctx_invoke;
9130 CHECK_STACK_OVF (1);
9132 n = read32 (ip + 2);
9133 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9136 mono_class_init (cmethod->klass);
9138 mono_save_token_info (cfg, image, n, cmethod);
9140 if (cfg->generic_sharing_context)
9141 context_used = mono_method_check_context_used (cmethod);
9143 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9145 cil_method = cmethod;
9146 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9147 METHOD_ACCESS_FAILURE;
9149 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9150 if (check_linkdemand (cfg, method, cmethod))
9152 CHECK_CFG_EXCEPTION;
9153 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9154 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9158 * Optimize the common case of ldftn+delegate creation
9160 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
9161 /* FIXME: SGEN support */
9162 /* FIXME: handle shared static generic methods */
9163 /* FIXME: handle this in shared code */
9164 if (!needs_static_rgctx_invoke && !context_used && (sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9165 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9166 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9167 MonoInst *target_ins;
9170 invoke = mono_get_delegate_invoke (ctor_method->klass);
9171 if (!invoke || !mono_method_signature (invoke))
9175 if (cfg->verbose_level > 3)
9176 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9177 target_ins = sp [-1];
9179 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod);
9188 if (needs_static_rgctx_invoke)
9189 cmethod = mono_marshal_get_static_rgctx_invoke (cmethod);
9191 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
9192 } else if (needs_static_rgctx_invoke) {
9193 EMIT_NEW_METHODCONST (cfg, argconst, mono_marshal_get_static_rgctx_invoke (cmethod));
9195 EMIT_NEW_METHODCONST (cfg, argconst, cmethod);
9197 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
9201 inline_costs += 10 * num_calls++;
9204 case CEE_LDVIRTFTN: {
9209 n = read32 (ip + 2);
9210 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9213 mono_class_init (cmethod->klass);
9215 if (cfg->generic_sharing_context)
9216 context_used = mono_method_check_context_used (cmethod);
9218 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9219 if (check_linkdemand (cfg, method, cmethod))
9221 CHECK_CFG_EXCEPTION;
9222 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9223 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9230 args [1] = emit_get_rgctx_method (cfg, context_used,
9231 cmethod, MONO_RGCTX_INFO_METHOD);
9232 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
9234 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
9235 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
9239 inline_costs += 10 * num_calls++;
9243 CHECK_STACK_OVF (1);
9245 n = read16 (ip + 2);
9247 EMIT_NEW_ARGLOAD (cfg, ins, n);
9252 CHECK_STACK_OVF (1);
9254 n = read16 (ip + 2);
9256 NEW_ARGLOADA (cfg, ins, n);
9257 MONO_ADD_INS (cfg->cbb, ins);
9265 n = read16 (ip + 2);
9267 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9269 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9273 CHECK_STACK_OVF (1);
9275 n = read16 (ip + 2);
9277 EMIT_NEW_LOCLOAD (cfg, ins, n);
9282 unsigned char *tmp_ip;
9283 CHECK_STACK_OVF (1);
9285 n = read16 (ip + 2);
9288 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9294 EMIT_NEW_LOCLOADA (cfg, ins, n);
9303 n = read16 (ip + 2);
9305 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9307 emit_stloc_ir (cfg, sp, header, n);
9314 if (sp != stack_start)
9316 if (cfg->method != method)
9318 * Inlining this into a loop in a parent could lead to
9319 * stack overflows which is different behavior than the
9320 * non-inlined case, thus disable inlining in this case.
9322 goto inline_failure;
9324 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9325 ins->dreg = alloc_preg (cfg);
9326 ins->sreg1 = sp [0]->dreg;
9327 ins->type = STACK_PTR;
9328 MONO_ADD_INS (cfg->cbb, ins);
9330 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9331 if (header->init_locals)
9332 ins->flags |= MONO_INST_INIT;
9337 case CEE_ENDFILTER: {
9338 MonoExceptionClause *clause, *nearest;
9339 int cc, nearest_num;
9343 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9345 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9346 ins->sreg1 = (*sp)->dreg;
9347 MONO_ADD_INS (bblock, ins);
9348 start_new_bblock = 1;
9353 for (cc = 0; cc < header->num_clauses; ++cc) {
9354 clause = &header->clauses [cc];
9355 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9356 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9357 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9363 if ((ip - header->code) != nearest->handler_offset)
9368 case CEE_UNALIGNED_:
9369 ins_flag |= MONO_INST_UNALIGNED;
9370 /* FIXME: record alignment? we can assume 1 for now */
9375 ins_flag |= MONO_INST_VOLATILE;
9379 ins_flag |= MONO_INST_TAILCALL;
9380 cfg->flags |= MONO_CFG_HAS_TAIL;
9381 /* Can't inline tail calls at this time */
9382 inline_costs += 100000;
9389 token = read32 (ip + 2);
9390 klass = mini_get_class (method, token, generic_context);
9391 CHECK_TYPELOAD (klass);
9392 if (generic_class_is_reference_type (cfg, klass))
9393 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9395 mini_emit_initobj (cfg, *sp, NULL, klass);
9399 case CEE_CONSTRAINED_:
9401 token = read32 (ip + 2);
9402 constrained_call = mono_class_get_full (image, token, generic_context);
9403 CHECK_TYPELOAD (constrained_call);
9408 MonoInst *iargs [3];
9412 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9413 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9414 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9415 /* emit_memset only works when val == 0 */
9416 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9421 if (ip [1] == CEE_CPBLK) {
9422 MonoMethod *memcpy_method = get_memcpy_method ();
9423 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9425 MonoMethod *memset_method = get_memset_method ();
9426 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9436 ins_flag |= MONO_INST_NOTYPECHECK;
9438 ins_flag |= MONO_INST_NORANGECHECK;
9439 /* we ignore the no-nullcheck for now since we
9440 * really do it explicitly only when doing callvirt->call
9446 int handler_offset = -1;
9448 for (i = 0; i < header->num_clauses; ++i) {
9449 MonoExceptionClause *clause = &header->clauses [i];
9450 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9451 handler_offset = clause->handler_offset;
9456 bblock->flags |= BB_EXCEPTION_UNSAFE;
9458 g_assert (handler_offset != -1);
9460 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9461 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9462 ins->sreg1 = load->dreg;
9463 MONO_ADD_INS (bblock, ins);
9465 link_bblock (cfg, bblock, end_bblock);
9466 start_new_bblock = 1;
9474 CHECK_STACK_OVF (1);
9476 token = read32 (ip + 2);
9477 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC) {
9478 MonoType *type = mono_type_create_from_typespec (image, token);
9479 token = mono_type_size (type, &ialign);
9481 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9482 CHECK_TYPELOAD (klass);
9483 mono_class_init (klass);
9484 token = mono_class_value_size (klass, &align);
9486 EMIT_NEW_ICONST (cfg, ins, token);
9491 case CEE_REFANYTYPE: {
9492 MonoInst *src_var, *src;
9498 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9500 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9501 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9502 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9512 g_error ("opcode 0xfe 0x%02x not handled", ip [1]);
9517 g_error ("opcode 0x%02x not handled", *ip);
9520 if (start_new_bblock != 1)
9523 bblock->cil_length = ip - bblock->cil_code;
9524 bblock->next_bb = end_bblock;
9526 if (cfg->method == method && cfg->domainvar) {
9528 MonoInst *get_domain;
9530 cfg->cbb = init_localsbb;
9532 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9533 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9536 get_domain->dreg = alloc_preg (cfg);
9537 MONO_ADD_INS (cfg->cbb, get_domain);
9539 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9540 MONO_ADD_INS (cfg->cbb, store);
9543 if (cfg->method == method && cfg->got_var)
9544 mono_emit_load_got_addr (cfg);
9546 if (header->init_locals) {
9549 cfg->cbb = init_localsbb;
9551 for (i = 0; i < header->num_locals; ++i) {
9552 MonoType *ptype = header->locals [i];
9553 int t = ptype->type;
9554 dreg = cfg->locals [i]->dreg;
9556 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
9557 t = mono_class_enum_basetype (ptype->data.klass)->type;
9559 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9560 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
9561 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
9562 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
9563 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
9564 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
9565 MONO_INST_NEW (cfg, ins, OP_R8CONST);
9566 ins->type = STACK_R8;
9567 ins->inst_p0 = (void*)&r8_0;
9568 ins->dreg = alloc_dreg (cfg, STACK_R8);
9569 MONO_ADD_INS (init_localsbb, ins);
9570 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
9571 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
9572 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
9573 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
9575 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9582 if (cfg->method == method) {
9584 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9585 bb->region = mono_find_block_region (cfg, bb->real_offset);
9587 mono_create_spvar_for_region (cfg, bb->region);
9588 if (cfg->verbose_level > 2)
9589 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
9593 g_slist_free (class_inits);
9594 dont_inline = g_list_remove (dont_inline, method);
9596 if (inline_costs < 0) {
9599 /* Method is too large */
9600 mname = mono_method_full_name (method, TRUE);
9601 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
9602 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
9607 if ((cfg->verbose_level > 2) && (cfg->method == method))
9608 mono_print_code (cfg, "AFTER METHOD-TO-IR");
9610 return inline_costs;
9613 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
9614 g_slist_free (class_inits);
9615 dont_inline = g_list_remove (dont_inline, method);
9619 g_slist_free (class_inits);
9620 dont_inline = g_list_remove (dont_inline, method);
9624 g_slist_free (class_inits);
9625 dont_inline = g_list_remove (dont_inline, method);
9626 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
9630 g_slist_free (class_inits);
9631 dont_inline = g_list_remove (dont_inline, method);
9632 set_exception_type_from_invalid_il (cfg, method, ip);
9637 store_membase_reg_to_store_membase_imm (int opcode)
9640 case OP_STORE_MEMBASE_REG:
9641 return OP_STORE_MEMBASE_IMM;
9642 case OP_STOREI1_MEMBASE_REG:
9643 return OP_STOREI1_MEMBASE_IMM;
9644 case OP_STOREI2_MEMBASE_REG:
9645 return OP_STOREI2_MEMBASE_IMM;
9646 case OP_STOREI4_MEMBASE_REG:
9647 return OP_STOREI4_MEMBASE_IMM;
9648 case OP_STOREI8_MEMBASE_REG:
9649 return OP_STOREI8_MEMBASE_IMM;
9651 g_assert_not_reached ();
9657 #endif /* DISABLE_JIT */
9660 mono_op_to_op_imm (int opcode)
9670 return OP_IDIV_UN_IMM;
9674 return OP_IREM_UN_IMM;
9688 return OP_ISHR_UN_IMM;
9705 return OP_LSHR_UN_IMM;
9708 return OP_COMPARE_IMM;
9710 return OP_ICOMPARE_IMM;
9712 return OP_LCOMPARE_IMM;
9714 case OP_STORE_MEMBASE_REG:
9715 return OP_STORE_MEMBASE_IMM;
9716 case OP_STOREI1_MEMBASE_REG:
9717 return OP_STOREI1_MEMBASE_IMM;
9718 case OP_STOREI2_MEMBASE_REG:
9719 return OP_STOREI2_MEMBASE_IMM;
9720 case OP_STOREI4_MEMBASE_REG:
9721 return OP_STOREI4_MEMBASE_IMM;
9723 #if defined(__i386__) || defined (__x86_64__)
9725 return OP_X86_PUSH_IMM;
9726 case OP_X86_COMPARE_MEMBASE_REG:
9727 return OP_X86_COMPARE_MEMBASE_IMM;
9729 #if defined(__x86_64__)
9730 case OP_AMD64_ICOMPARE_MEMBASE_REG:
9731 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9733 case OP_VOIDCALL_REG:
9742 return OP_LOCALLOC_IMM;
9749 ldind_to_load_membase (int opcode)
9753 return OP_LOADI1_MEMBASE;
9755 return OP_LOADU1_MEMBASE;
9757 return OP_LOADI2_MEMBASE;
9759 return OP_LOADU2_MEMBASE;
9761 return OP_LOADI4_MEMBASE;
9763 return OP_LOADU4_MEMBASE;
9765 return OP_LOAD_MEMBASE;
9767 return OP_LOAD_MEMBASE;
9769 return OP_LOADI8_MEMBASE;
9771 return OP_LOADR4_MEMBASE;
9773 return OP_LOADR8_MEMBASE;
9775 g_assert_not_reached ();
9782 stind_to_store_membase (int opcode)
9786 return OP_STOREI1_MEMBASE_REG;
9788 return OP_STOREI2_MEMBASE_REG;
9790 return OP_STOREI4_MEMBASE_REG;
9793 return OP_STORE_MEMBASE_REG;
9795 return OP_STOREI8_MEMBASE_REG;
9797 return OP_STORER4_MEMBASE_REG;
9799 return OP_STORER8_MEMBASE_REG;
9801 g_assert_not_reached ();
9808 mono_load_membase_to_load_mem (int opcode)
9810 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
9811 #if defined(__i386__) || defined(__x86_64__)
9813 case OP_LOAD_MEMBASE:
9815 case OP_LOADU1_MEMBASE:
9816 return OP_LOADU1_MEM;
9817 case OP_LOADU2_MEMBASE:
9818 return OP_LOADU2_MEM;
9819 case OP_LOADI4_MEMBASE:
9820 return OP_LOADI4_MEM;
9821 case OP_LOADU4_MEMBASE:
9822 return OP_LOADU4_MEM;
9823 #if SIZEOF_REGISTER == 8
9824 case OP_LOADI8_MEMBASE:
9825 return OP_LOADI8_MEM;
9834 op_to_op_dest_membase (int store_opcode, int opcode)
9836 #if defined(__i386__)
9837 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
9842 return OP_X86_ADD_MEMBASE_REG;
9844 return OP_X86_SUB_MEMBASE_REG;
9846 return OP_X86_AND_MEMBASE_REG;
9848 return OP_X86_OR_MEMBASE_REG;
9850 return OP_X86_XOR_MEMBASE_REG;
9853 return OP_X86_ADD_MEMBASE_IMM;
9856 return OP_X86_SUB_MEMBASE_IMM;
9859 return OP_X86_AND_MEMBASE_IMM;
9862 return OP_X86_OR_MEMBASE_IMM;
9865 return OP_X86_XOR_MEMBASE_IMM;
9871 #if defined(__x86_64__)
9872 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
9877 return OP_X86_ADD_MEMBASE_REG;
9879 return OP_X86_SUB_MEMBASE_REG;
9881 return OP_X86_AND_MEMBASE_REG;
9883 return OP_X86_OR_MEMBASE_REG;
9885 return OP_X86_XOR_MEMBASE_REG;
9887 return OP_X86_ADD_MEMBASE_IMM;
9889 return OP_X86_SUB_MEMBASE_IMM;
9891 return OP_X86_AND_MEMBASE_IMM;
9893 return OP_X86_OR_MEMBASE_IMM;
9895 return OP_X86_XOR_MEMBASE_IMM;
9897 return OP_AMD64_ADD_MEMBASE_REG;
9899 return OP_AMD64_SUB_MEMBASE_REG;
9901 return OP_AMD64_AND_MEMBASE_REG;
9903 return OP_AMD64_OR_MEMBASE_REG;
9905 return OP_AMD64_XOR_MEMBASE_REG;
9908 return OP_AMD64_ADD_MEMBASE_IMM;
9911 return OP_AMD64_SUB_MEMBASE_IMM;
9914 return OP_AMD64_AND_MEMBASE_IMM;
9917 return OP_AMD64_OR_MEMBASE_IMM;
9920 return OP_AMD64_XOR_MEMBASE_IMM;
9930 op_to_op_store_membase (int store_opcode, int opcode)
9932 #if defined(__i386__) || defined(__x86_64__)
9935 if (store_opcode == OP_STOREI1_MEMBASE_REG)
9936 return OP_X86_SETEQ_MEMBASE;
9938 if (store_opcode == OP_STOREI1_MEMBASE_REG)
9939 return OP_X86_SETNE_MEMBASE;
9947 op_to_op_src1_membase (int load_opcode, int opcode)
9950 /* FIXME: This has sign extension issues */
9952 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
9953 return OP_X86_COMPARE_MEMBASE8_IMM;
9956 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
9961 return OP_X86_PUSH_MEMBASE;
9962 case OP_COMPARE_IMM:
9963 case OP_ICOMPARE_IMM:
9964 return OP_X86_COMPARE_MEMBASE_IMM;
9967 return OP_X86_COMPARE_MEMBASE_REG;
9972 /* FIXME: This has sign extension issues */
9974 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
9975 return OP_X86_COMPARE_MEMBASE8_IMM;
9980 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9981 return OP_X86_PUSH_MEMBASE;
9983 /* FIXME: This only works for 32 bit immediates
9984 case OP_COMPARE_IMM:
9985 case OP_LCOMPARE_IMM:
9986 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9987 return OP_AMD64_COMPARE_MEMBASE_IMM;
9989 case OP_ICOMPARE_IMM:
9990 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9991 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9995 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9996 return OP_AMD64_COMPARE_MEMBASE_REG;
9999 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10000 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10009 op_to_op_src2_membase (int load_opcode, int opcode)
10012 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10018 return OP_X86_COMPARE_REG_MEMBASE;
10020 return OP_X86_ADD_REG_MEMBASE;
10022 return OP_X86_SUB_REG_MEMBASE;
10024 return OP_X86_AND_REG_MEMBASE;
10026 return OP_X86_OR_REG_MEMBASE;
10028 return OP_X86_XOR_REG_MEMBASE;
10035 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10036 return OP_AMD64_ICOMPARE_REG_MEMBASE;
10040 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10041 return OP_AMD64_COMPARE_REG_MEMBASE;
10044 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10045 return OP_X86_ADD_REG_MEMBASE;
10047 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10048 return OP_X86_SUB_REG_MEMBASE;
10050 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10051 return OP_X86_AND_REG_MEMBASE;
10053 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10054 return OP_X86_OR_REG_MEMBASE;
10056 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10057 return OP_X86_XOR_REG_MEMBASE;
10059 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10060 return OP_AMD64_ADD_REG_MEMBASE;
10062 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10063 return OP_AMD64_SUB_REG_MEMBASE;
10065 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10066 return OP_AMD64_AND_REG_MEMBASE;
10068 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10069 return OP_AMD64_OR_REG_MEMBASE;
10071 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10072 return OP_AMD64_XOR_REG_MEMBASE;
10080 mono_op_to_op_imm_noemul (int opcode)
10083 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10088 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10096 return mono_op_to_op_imm (opcode);
10100 #ifndef DISABLE_JIT
10103 * mono_handle_global_vregs:
10105 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10109 mono_handle_global_vregs (MonoCompile *cfg)
10111 gint32 *vreg_to_bb;
10112 MonoBasicBlock *bb;
10115 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
10117 #ifdef MONO_ARCH_SIMD_INTRINSICS
10118 if (cfg->uses_simd_intrinsics)
10119 mono_simd_simplify_indirection (cfg);
10122 /* Find local vregs used in more than one bb */
10123 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10124 MonoInst *ins = bb->code;
10125 int block_num = bb->block_num;
10127 if (cfg->verbose_level > 2)
10128 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
10131 for (; ins; ins = ins->next) {
10132 const char *spec = INS_INFO (ins->opcode);
10133 int regtype, regindex;
10136 if (G_UNLIKELY (cfg->verbose_level > 2))
10137 mono_print_ins (ins);
10139 g_assert (ins->opcode >= MONO_CEE_LAST);
10141 for (regindex = 0; regindex < 4; regindex ++) {
10144 if (regindex == 0) {
10145 regtype = spec [MONO_INST_DEST];
10146 if (regtype == ' ')
10149 } else if (regindex == 1) {
10150 regtype = spec [MONO_INST_SRC1];
10151 if (regtype == ' ')
10154 } else if (regindex == 2) {
10155 regtype = spec [MONO_INST_SRC2];
10156 if (regtype == ' ')
10159 } else if (regindex == 3) {
10160 regtype = spec [MONO_INST_SRC3];
10161 if (regtype == ' ')
10166 #if SIZEOF_REGISTER == 4
10167 if (regtype == 'l') {
10169 * Since some instructions reference the original long vreg,
10170 * and some reference the two component vregs, it is quite hard
10171 * to determine when it needs to be global. So be conservative.
10173 if (!get_vreg_to_inst (cfg, vreg)) {
10174 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10176 if (cfg->verbose_level > 2)
10177 printf ("LONG VREG R%d made global.\n", vreg);
10181 * Make the component vregs volatile since the optimizations can
10182 * get confused otherwise.
10184 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
10185 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
10189 g_assert (vreg != -1);
10191 prev_bb = vreg_to_bb [vreg];
10192 if (prev_bb == 0) {
10193 /* 0 is a valid block num */
10194 vreg_to_bb [vreg] = block_num + 1;
10195 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
10196 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
10199 if (!get_vreg_to_inst (cfg, vreg)) {
10200 if (G_UNLIKELY (cfg->verbose_level > 2))
10201 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
10205 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
10208 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
10211 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
10214 g_assert_not_reached ();
10218 /* Flag as having been used in more than one bb */
10219 vreg_to_bb [vreg] = -1;
10225 /* If a variable is used in only one bblock, convert it into a local vreg */
10226 for (i = 0; i < cfg->num_varinfo; i++) {
10227 MonoInst *var = cfg->varinfo [i];
10228 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
10230 switch (var->type) {
10236 #if SIZEOF_REGISTER == 8
10239 #if !defined(__i386__) && !defined(MONO_ARCH_SOFT_FLOAT)
10240 /* Enabling this screws up the fp stack on x86 */
10243 /* Arguments are implicitly global */
10244 /* Putting R4 vars into registers doesn't work currently */
10245 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
10247 * Make that the variable's liveness interval doesn't contain a call, since
10248 * that would cause the lvreg to be spilled, making the whole optimization
10251 /* This is too slow for JIT compilation */
10253 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
10255 int def_index, call_index, ins_index;
10256 gboolean spilled = FALSE;
10261 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
10262 const char *spec = INS_INFO (ins->opcode);
10264 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
10265 def_index = ins_index;
10267 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
10268 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
10269 if (call_index > def_index) {
10275 if (MONO_IS_CALL (ins))
10276 call_index = ins_index;
10286 if (G_UNLIKELY (cfg->verbose_level > 2))
10287 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10288 var->flags |= MONO_INST_IS_DEAD;
10289 cfg->vreg_to_inst [var->dreg] = NULL;
10296 * Compress the varinfo and vars tables so the liveness computation is faster and
10297 * takes up less space.
10300 for (i = 0; i < cfg->num_varinfo; ++i) {
10301 MonoInst *var = cfg->varinfo [i];
10302 if (pos < i && cfg->locals_start == i)
10303 cfg->locals_start = pos;
10304 if (!(var->flags & MONO_INST_IS_DEAD)) {
10306 cfg->varinfo [pos] = cfg->varinfo [i];
10307 cfg->varinfo [pos]->inst_c0 = pos;
10308 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10309 cfg->vars [pos].idx = pos;
10310 #if SIZEOF_REGISTER == 4
10311 if (cfg->varinfo [pos]->type == STACK_I8) {
10312 /* Modify the two component vars too */
10315 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10316 var1->inst_c0 = pos;
10317 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10318 var1->inst_c0 = pos;
10325 cfg->num_varinfo = pos;
10326 if (cfg->locals_start > cfg->num_varinfo)
10327 cfg->locals_start = cfg->num_varinfo;
10331 * mono_spill_global_vars:
10333 * Generate spill code for variables which are not allocated to registers,
10334 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10335 * code is generated which could be optimized by the local optimization passes.
10338 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10340 MonoBasicBlock *bb;
10342 int orig_next_vreg;
10343 guint32 *vreg_to_lvreg;
10345 guint32 i, lvregs_len;
10346 gboolean dest_has_lvreg = FALSE;
10347 guint32 stacktypes [128];
10348 MonoInst **live_range_start, **live_range_end;
10349 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
10351 *need_local_opts = FALSE;
10353 memset (spec2, 0, sizeof (spec2));
10355 /* FIXME: Move this function to mini.c */
10356 stacktypes ['i'] = STACK_PTR;
10357 stacktypes ['l'] = STACK_I8;
10358 stacktypes ['f'] = STACK_R8;
10359 #ifdef MONO_ARCH_SIMD_INTRINSICS
10360 stacktypes ['x'] = STACK_VTYPE;
10363 #if SIZEOF_REGISTER == 4
10364 /* Create MonoInsts for longs */
10365 for (i = 0; i < cfg->num_varinfo; i++) {
10366 MonoInst *ins = cfg->varinfo [i];
10368 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10369 switch (ins->type) {
10370 #ifdef MONO_ARCH_SOFT_FLOAT
10376 g_assert (ins->opcode == OP_REGOFFSET);
10378 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10380 tree->opcode = OP_REGOFFSET;
10381 tree->inst_basereg = ins->inst_basereg;
10382 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10384 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10386 tree->opcode = OP_REGOFFSET;
10387 tree->inst_basereg = ins->inst_basereg;
10388 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10398 /* FIXME: widening and truncation */
10401 * As an optimization, when a variable allocated to the stack is first loaded into
10402 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10403 * the variable again.
10405 orig_next_vreg = cfg->next_vreg;
10406 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10407 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10411 * These arrays contain the first and last instructions accessing a given
10413 * Since we emit bblocks in the same order we process them here, and we
10414 * don't split live ranges, these will precisely describe the live range of
10415 * the variable, i.e. the instruction range where a valid value can be found
10416 * in the variables location.
10418 /* FIXME: Only do this if debugging info is requested */
10419 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
10420 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
10421 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10422 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10424 /* Add spill loads/stores */
10425 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10428 if (cfg->verbose_level > 2)
10429 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10431 /* Clear vreg_to_lvreg array */
10432 for (i = 0; i < lvregs_len; i++)
10433 vreg_to_lvreg [lvregs [i]] = 0;
10437 MONO_BB_FOR_EACH_INS (bb, ins) {
10438 const char *spec = INS_INFO (ins->opcode);
10439 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
10440 gboolean store, no_lvreg;
10441 int sregs [MONO_MAX_SRC_REGS];
10443 if (G_UNLIKELY (cfg->verbose_level > 2))
10444 mono_print_ins (ins);
10446 if (ins->opcode == OP_NOP)
10450 * We handle LDADDR here as well, since it can only be decomposed
10451 * when variable addresses are known.
10453 if (ins->opcode == OP_LDADDR) {
10454 MonoInst *var = ins->inst_p0;
10456 if (var->opcode == OP_VTARG_ADDR) {
10457 /* Happens on SPARC/S390 where vtypes are passed by reference */
10458 MonoInst *vtaddr = var->inst_left;
10459 if (vtaddr->opcode == OP_REGVAR) {
10460 ins->opcode = OP_MOVE;
10461 ins->sreg1 = vtaddr->dreg;
10463 else if (var->inst_left->opcode == OP_REGOFFSET) {
10464 ins->opcode = OP_LOAD_MEMBASE;
10465 ins->inst_basereg = vtaddr->inst_basereg;
10466 ins->inst_offset = vtaddr->inst_offset;
10470 g_assert (var->opcode == OP_REGOFFSET);
10472 ins->opcode = OP_ADD_IMM;
10473 ins->sreg1 = var->inst_basereg;
10474 ins->inst_imm = var->inst_offset;
10477 *need_local_opts = TRUE;
10478 spec = INS_INFO (ins->opcode);
10481 if (ins->opcode < MONO_CEE_LAST) {
10482 mono_print_ins (ins);
10483 g_assert_not_reached ();
10487 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10491 if (MONO_IS_STORE_MEMBASE (ins)) {
10492 tmp_reg = ins->dreg;
10493 ins->dreg = ins->sreg2;
10494 ins->sreg2 = tmp_reg;
10497 spec2 [MONO_INST_DEST] = ' ';
10498 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10499 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10500 spec2 [MONO_INST_SRC3] = ' ';
10502 } else if (MONO_IS_STORE_MEMINDEX (ins))
10503 g_assert_not_reached ();
10508 if (G_UNLIKELY (cfg->verbose_level > 2)) {
10509 printf ("\t %.3s %d", spec, ins->dreg);
10510 num_sregs = mono_inst_get_src_registers (ins, sregs);
10511 for (srcindex = 0; srcindex < 3; ++srcindex)
10512 printf (" %d", sregs [srcindex]);
10519 regtype = spec [MONO_INST_DEST];
10520 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
10523 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
10524 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
10525 MonoInst *store_ins;
10527 MonoInst *def_ins = ins;
10528 int dreg = ins->dreg; /* The original vreg */
10530 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
10532 if (var->opcode == OP_REGVAR) {
10533 ins->dreg = var->dreg;
10534 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
10536 * Instead of emitting a load+store, use a _membase opcode.
10538 g_assert (var->opcode == OP_REGOFFSET);
10539 if (ins->opcode == OP_MOVE) {
10543 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
10544 ins->inst_basereg = var->inst_basereg;
10545 ins->inst_offset = var->inst_offset;
10548 spec = INS_INFO (ins->opcode);
10552 g_assert (var->opcode == OP_REGOFFSET);
10554 prev_dreg = ins->dreg;
10556 /* Invalidate any previous lvreg for this vreg */
10557 vreg_to_lvreg [ins->dreg] = 0;
10561 #ifdef MONO_ARCH_SOFT_FLOAT
10562 if (store_opcode == OP_STORER8_MEMBASE_REG) {
10564 store_opcode = OP_STOREI8_MEMBASE_REG;
10568 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
10570 if (regtype == 'l') {
10571 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
10572 mono_bblock_insert_after_ins (bb, ins, store_ins);
10573 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
10574 mono_bblock_insert_after_ins (bb, ins, store_ins);
10575 def_ins = store_ins;
10578 g_assert (store_opcode != OP_STOREV_MEMBASE);
10580 /* Try to fuse the store into the instruction itself */
10581 /* FIXME: Add more instructions */
10582 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
10583 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
10584 ins->inst_imm = ins->inst_c0;
10585 ins->inst_destbasereg = var->inst_basereg;
10586 ins->inst_offset = var->inst_offset;
10587 spec = INS_INFO (ins->opcode);
10588 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
10589 ins->opcode = store_opcode;
10590 ins->inst_destbasereg = var->inst_basereg;
10591 ins->inst_offset = var->inst_offset;
10595 tmp_reg = ins->dreg;
10596 ins->dreg = ins->sreg2;
10597 ins->sreg2 = tmp_reg;
10600 spec2 [MONO_INST_DEST] = ' ';
10601 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10602 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10603 spec2 [MONO_INST_SRC3] = ' ';
10605 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
10606 // FIXME: The backends expect the base reg to be in inst_basereg
10607 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
10609 ins->inst_basereg = var->inst_basereg;
10610 ins->inst_offset = var->inst_offset;
10611 spec = INS_INFO (ins->opcode);
10613 /* printf ("INS: "); mono_print_ins (ins); */
10614 /* Create a store instruction */
10615 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
10617 /* Insert it after the instruction */
10618 mono_bblock_insert_after_ins (bb, ins, store_ins);
10620 def_ins = store_ins;
10623 * We can't assign ins->dreg to var->dreg here, since the
10624 * sregs could use it. So set a flag, and do it after
10627 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
10628 dest_has_lvreg = TRUE;
10633 if (def_ins && !live_range_start [dreg]) {
10634 live_range_start [dreg] = def_ins;
10635 live_range_start_bb [dreg] = bb;
10642 num_sregs = mono_inst_get_src_registers (ins, sregs);
10643 for (srcindex = 0; srcindex < 3; ++srcindex) {
10644 regtype = spec [MONO_INST_SRC1 + srcindex];
10645 sreg = sregs [srcindex];
10647 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
10648 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
10649 MonoInst *var = get_vreg_to_inst (cfg, sreg);
10650 MonoInst *use_ins = ins;
10651 MonoInst *load_ins;
10652 guint32 load_opcode;
10654 if (var->opcode == OP_REGVAR) {
10655 sregs [srcindex] = var->dreg;
10656 //mono_inst_set_src_registers (ins, sregs);
10657 live_range_end [sreg] = use_ins;
10658 live_range_end_bb [sreg] = bb;
10662 g_assert (var->opcode == OP_REGOFFSET);
10664 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
10666 g_assert (load_opcode != OP_LOADV_MEMBASE);
10668 if (vreg_to_lvreg [sreg]) {
10669 g_assert (vreg_to_lvreg [sreg] != -1);
10671 /* The variable is already loaded to an lvreg */
10672 if (G_UNLIKELY (cfg->verbose_level > 2))
10673 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
10674 sregs [srcindex] = vreg_to_lvreg [sreg];
10675 //mono_inst_set_src_registers (ins, sregs);
10679 /* Try to fuse the load into the instruction */
10680 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
10681 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
10682 sregs [0] = var->inst_basereg;
10683 //mono_inst_set_src_registers (ins, sregs);
10684 ins->inst_offset = var->inst_offset;
10685 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
10686 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
10687 sregs [1] = var->inst_basereg;
10688 //mono_inst_set_src_registers (ins, sregs);
10689 ins->inst_offset = var->inst_offset;
10691 if (MONO_IS_REAL_MOVE (ins)) {
10692 ins->opcode = OP_NOP;
10695 //printf ("%d ", srcindex); mono_print_ins (ins);
10697 sreg = alloc_dreg (cfg, stacktypes [regtype]);
10699 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
10700 if (var->dreg == prev_dreg) {
10702 * sreg refers to the value loaded by the load
10703 * emitted below, but we need to use ins->dreg
10704 * since it refers to the store emitted earlier.
10708 g_assert (sreg != -1);
10709 vreg_to_lvreg [var->dreg] = sreg;
10710 g_assert (lvregs_len < 1024);
10711 lvregs [lvregs_len ++] = var->dreg;
10715 sregs [srcindex] = sreg;
10716 //mono_inst_set_src_registers (ins, sregs);
10718 if (regtype == 'l') {
10719 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
10720 mono_bblock_insert_before_ins (bb, ins, load_ins);
10721 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
10722 mono_bblock_insert_before_ins (bb, ins, load_ins);
10723 use_ins = load_ins;
10726 #if SIZEOF_REGISTER == 4
10727 g_assert (load_opcode != OP_LOADI8_MEMBASE);
10729 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
10730 mono_bblock_insert_before_ins (bb, ins, load_ins);
10731 use_ins = load_ins;
10735 if (var->dreg < orig_next_vreg) {
10736 live_range_end [var->dreg] = use_ins;
10737 live_range_end_bb [var->dreg] = bb;
10741 mono_inst_set_src_registers (ins, sregs);
10743 if (dest_has_lvreg) {
10744 g_assert (ins->dreg != -1);
10745 vreg_to_lvreg [prev_dreg] = ins->dreg;
10746 g_assert (lvregs_len < 1024);
10747 lvregs [lvregs_len ++] = prev_dreg;
10748 dest_has_lvreg = FALSE;
10752 tmp_reg = ins->dreg;
10753 ins->dreg = ins->sreg2;
10754 ins->sreg2 = tmp_reg;
10757 if (MONO_IS_CALL (ins)) {
10758 /* Clear vreg_to_lvreg array */
10759 for (i = 0; i < lvregs_len; i++)
10760 vreg_to_lvreg [lvregs [i]] = 0;
10762 } else if (ins->opcode == OP_NOP) {
10764 MONO_INST_NULLIFY_SREGS (ins);
10767 if (cfg->verbose_level > 2)
10768 mono_print_ins_index (1, ins);
10772 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
10774 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
10775 * by storing the current native offset into MonoMethodVar->live_range_start/end.
10777 for (i = 0; i < cfg->num_varinfo; ++i) {
10778 int vreg = MONO_VARINFO (cfg, i)->vreg;
10781 if (live_range_start [vreg]) {
10782 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
10784 ins->inst_c1 = vreg;
10785 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
10787 if (live_range_end [vreg]) {
10788 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
10790 ins->inst_c1 = vreg;
10791 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
10796 g_free (live_range_start);
10797 g_free (live_range_end);
10798 g_free (live_range_start_bb);
10799 g_free (live_range_end_bb);
10804 * - use 'iadd' instead of 'int_add'
10805 * - handling ovf opcodes: decompose in method_to_ir.
10806 * - unify iregs/fregs
10807 * -> partly done, the missing parts are:
10808 * - a more complete unification would involve unifying the hregs as well, so
10809 * code wouldn't need if (fp) all over the place. but that would mean the hregs
10810 * would no longer map to the machine hregs, so the code generators would need to
10811 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
10812 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
10813 * fp/non-fp branches speeds it up by about 15%.
10814 * - use sext/zext opcodes instead of shifts
10816 * - get rid of TEMPLOADs if possible and use vregs instead
10817 * - clean up usage of OP_P/OP_ opcodes
10818 * - cleanup usage of DUMMY_USE
10819 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
10821 * - set the stack type and allocate a dreg in the EMIT_NEW macros
10822 * - get rid of all the <foo>2 stuff when the new JIT is ready.
10823 * - make sure handle_stack_args () is called before the branch is emitted
10824 * - when the new IR is done, get rid of all unused stuff
10825 * - COMPARE/BEQ as separate instructions or unify them ?
10826 * - keeping them separate allows specialized compare instructions like
10827 * compare_imm, compare_membase
10828 * - most back ends unify fp compare+branch, fp compare+ceq
10829 * - integrate mono_save_args into inline_method
10830 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
10831 * - handle long shift opts on 32 bit platforms somehow: they require
10832 * 3 sregs (2 for arg1 and 1 for arg2)
10833 * - make byref a 'normal' type.
10834 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
10835 * variable if needed.
10836 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
10837 * like inline_method.
10838 * - remove inlining restrictions
10839 * - fix LNEG and enable cfold of INEG
10840 * - generalize x86 optimizations like ldelema as a peephole optimization
10841 * - add store_mem_imm for amd64
10842 * - optimize the loading of the interruption flag in the managed->native wrappers
10843 * - avoid special handling of OP_NOP in passes
10844 * - move code inserting instructions into one function/macro.
10845 * - try a coalescing phase after liveness analysis
10846 * - add float -> vreg conversion + local optimizations on !x86
10847 * - figure out how to handle decomposed branches during optimizations, ie.
10848 * compare+branch, op_jump_table+op_br etc.
10849 * - promote RuntimeXHandles to vregs
10850 * - vtype cleanups:
10851 * - add a NEW_VARLOADA_VREG macro
10852 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
10853 * accessing vtype fields.
10854 * - get rid of I8CONST on 64 bit platforms
10855 * - dealing with the increase in code size due to branches created during opcode
10857 * - use extended basic blocks
10858 * - all parts of the JIT
10859 * - handle_global_vregs () && local regalloc
10860 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
10861 * - sources of increase in code size:
10864 * - isinst and castclass
10865 * - lvregs not allocated to global registers even if used multiple times
10866 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
10868 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
10869 * - add all micro optimizations from the old JIT
10870 * - put tree optimizations into the deadce pass
10871 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
10872 * specific function.
10873 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
10874 * fcompare + branchCC.
10875 * - create a helper function for allocating a stack slot, taking into account
10876 * MONO_CFG_HAS_SPILLUP.
10878 * - merge the ia64 switch changes.
10879 * - optimize mono_regstate2_alloc_int/float.
10880 * - fix the pessimistic handling of variables accessed in exception handler blocks.
10881 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
10882 * parts of the tree could be separated by other instructions, killing the tree
10883 * arguments, or stores killing loads etc. Also, should we fold loads into other
10884 * instructions if the result of the load is used multiple times ?
10885 * - make the REM_IMM optimization in mini-x86.c arch-independent.
10886 * - LAST MERGE: 108395.
10887 * - when returning vtypes in registers, generate IR and append it to the end of the
10888 * last bb instead of doing it in the epilog.
10889 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
10897 - When to decompose opcodes:
10898 - earlier: this makes some optimizations hard to implement, since the low level IR
10899 no longer contains the neccessary information. But it is easier to do.
10900 - later: harder to implement, enables more optimizations.
10901 - Branches inside bblocks:
10902 - created when decomposing complex opcodes.
10903 - branches to another bblock: harmless, but not tracked by the branch
10904 optimizations, so need to branch to a label at the start of the bblock.
10905 - branches to inside the same bblock: very problematic, trips up the local
10906 reg allocator. Can be fixed by spitting the current bblock, but that is a
10907 complex operation, since some local vregs can become global vregs etc.
10908 - Local/global vregs:
10909 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
10910 local register allocator.
10911 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
10912 structure, created by mono_create_var (). Assigned to hregs or the stack by
10913 the global register allocator.
10914 - When to do optimizations like alu->alu_imm:
10915 - earlier -> saves work later on since the IR will be smaller/simpler
10916 - later -> can work on more instructions
10917 - Handling of valuetypes:
10918 - When a vtype is pushed on the stack, a new temporary is created, an
10919 instruction computing its address (LDADDR) is emitted and pushed on
10920 the stack. Need to optimize cases when the vtype is used immediately as in
10921 argument passing, stloc etc.
10922 - Instead of the to_end stuff in the old JIT, simply call the function handling
10923 the values on the stack before emitting the last instruction of the bb.
10926 #endif /* DISABLE_JIT */