2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
30 #ifdef HAVE_VALGRIND_MEMCHECK_H
31 #include <valgrind/memcheck.h>
34 #include <mono/metadata/assembly.h>
35 #include <mono/metadata/loader.h>
36 #include <mono/metadata/tabledefs.h>
37 #include <mono/metadata/class.h>
38 #include <mono/metadata/object.h>
39 #include <mono/metadata/exception.h>
40 #include <mono/metadata/opcodes.h>
41 #include <mono/metadata/mono-endian.h>
42 #include <mono/metadata/tokentype.h>
43 #include <mono/metadata/tabledefs.h>
44 #include <mono/metadata/marshal.h>
45 #include <mono/metadata/debug-helpers.h>
46 #include <mono/metadata/mono-debug.h>
47 #include <mono/metadata/gc-internal.h>
48 #include <mono/metadata/security-manager.h>
49 #include <mono/metadata/threads-types.h>
50 #include <mono/metadata/security-core-clr.h>
51 #include <mono/metadata/monitor.h>
52 #include <mono/utils/mono-compiler.h>
59 #include "jit-icalls.h"
61 #define BRANCH_COST 100
62 #define INLINE_LENGTH_LIMIT 20
63 #define INLINE_FAILURE do {\
64 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
67 #define CHECK_CFG_EXCEPTION do {\
68 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
71 #define METHOD_ACCESS_FAILURE do { \
72 char *method_fname = mono_method_full_name (method, TRUE); \
73 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
74 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
75 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
76 g_free (method_fname); \
77 g_free (cil_method_fname); \
78 goto exception_exit; \
80 #define FIELD_ACCESS_FAILURE do { \
81 char *method_fname = mono_method_full_name (method, TRUE); \
82 char *field_fname = mono_field_full_name (field); \
83 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
84 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
85 g_free (method_fname); \
86 g_free (field_fname); \
87 goto exception_exit; \
89 #define GENERIC_SHARING_FAILURE(opcode) do { \
90 if (cfg->generic_sharing_context) { \
91 if (cfg->verbose_level > 2) \
92 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
93 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
94 goto exception_exit; \
98 /* Determine whenever 'ins' represents a load of the 'this' argument */
99 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
101 static int ldind_to_load_membase (int opcode);
102 static int stind_to_store_membase (int opcode);
104 int mono_op_to_op_imm (int opcode);
105 int mono_op_to_op_imm_noemul (int opcode);
107 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
108 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
109 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
111 /* helper methods signature */
112 extern MonoMethodSignature *helper_sig_class_init_trampoline;
113 extern MonoMethodSignature *helper_sig_domain_get;
114 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
115 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
116 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
119 * Instruction metadata
127 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
128 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
134 #if SIZEOF_REGISTER == 8
139 /* keep in sync with the enum in mini.h */
142 #include "mini-ops.h"
147 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
148 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
150 * This should contain the index of the last sreg + 1. This is not the same
151 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
153 const gint8 ins_sreg_counts[] = {
154 #include "mini-ops.h"
159 extern GHashTable *jit_icall_name_hash;
161 #define MONO_INIT_VARINFO(vi,id) do { \
162 (vi)->range.first_use.pos.bid = 0xffff; \
168 mono_inst_set_src_registers (MonoInst *ins, int *regs)
170 ins->sreg1 = regs [0];
171 ins->sreg2 = regs [1];
172 ins->sreg3 = regs [2];
176 mono_alloc_ireg (MonoCompile *cfg)
178 return alloc_ireg (cfg);
182 mono_alloc_freg (MonoCompile *cfg)
184 return alloc_freg (cfg);
188 mono_alloc_preg (MonoCompile *cfg)
190 return alloc_preg (cfg);
194 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
196 return alloc_dreg (cfg, stack_type);
200 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
206 switch (type->type) {
209 case MONO_TYPE_BOOLEAN:
221 case MONO_TYPE_FNPTR:
223 case MONO_TYPE_CLASS:
224 case MONO_TYPE_STRING:
225 case MONO_TYPE_OBJECT:
226 case MONO_TYPE_SZARRAY:
227 case MONO_TYPE_ARRAY:
231 #if SIZEOF_REGISTER == 8
240 case MONO_TYPE_VALUETYPE:
241 if (type->data.klass->enumtype) {
242 type = mono_class_enum_basetype (type->data.klass);
245 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
248 case MONO_TYPE_TYPEDBYREF:
250 case MONO_TYPE_GENERICINST:
251 type = &type->data.generic_class->container_class->byval_arg;
255 g_assert (cfg->generic_sharing_context);
258 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
264 mono_print_bb (MonoBasicBlock *bb, const char *msg)
269 printf ("\n%s %d: [IN: ", msg, bb->block_num);
270 for (i = 0; i < bb->in_count; ++i)
271 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
273 for (i = 0; i < bb->out_count; ++i)
274 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
276 for (tree = bb->code; tree; tree = tree->next)
277 mono_print_ins_index (-1, tree);
281 * Can't put this at the beginning, since other files reference stuff from this
286 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
288 #define GET_BBLOCK(cfg,tblock,ip) do { \
289 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
291 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
292 NEW_BBLOCK (cfg, (tblock)); \
293 (tblock)->cil_code = (ip); \
294 ADD_BBLOCK (cfg, (tblock)); \
298 #if defined(TARGET_X86) || defined(TARGET_AMD64)
299 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
300 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
301 (dest)->dreg = alloc_preg ((cfg)); \
302 (dest)->sreg1 = (sr1); \
303 (dest)->sreg2 = (sr2); \
304 (dest)->inst_imm = (imm); \
305 (dest)->backend.shift_amount = (shift); \
306 MONO_ADD_INS ((cfg)->cbb, (dest)); \
310 #if SIZEOF_REGISTER == 8
311 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
312 /* FIXME: Need to add many more cases */ \
313 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
315 int dr = alloc_preg (cfg); \
316 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
317 (ins)->sreg2 = widen->dreg; \
321 #define ADD_WIDEN_OP(ins, arg1, arg2)
324 #define ADD_BINOP(op) do { \
325 MONO_INST_NEW (cfg, ins, (op)); \
327 ins->sreg1 = sp [0]->dreg; \
328 ins->sreg2 = sp [1]->dreg; \
329 type_from_op (ins, sp [0], sp [1]); \
331 /* Have to insert a widening op */ \
332 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
333 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
334 MONO_ADD_INS ((cfg)->cbb, (ins)); \
335 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
338 #define ADD_UNOP(op) do { \
339 MONO_INST_NEW (cfg, ins, (op)); \
341 ins->sreg1 = sp [0]->dreg; \
342 type_from_op (ins, sp [0], NULL); \
344 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
345 MONO_ADD_INS ((cfg)->cbb, (ins)); \
346 *sp++ = mono_decompose_opcode (cfg, ins); \
349 #define ADD_BINCOND(next_block) do { \
352 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
353 cmp->sreg1 = sp [0]->dreg; \
354 cmp->sreg2 = sp [1]->dreg; \
355 type_from_op (cmp, sp [0], sp [1]); \
357 type_from_op (ins, sp [0], sp [1]); \
358 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
359 GET_BBLOCK (cfg, tblock, target); \
360 link_bblock (cfg, bblock, tblock); \
361 ins->inst_true_bb = tblock; \
362 if ((next_block)) { \
363 link_bblock (cfg, bblock, (next_block)); \
364 ins->inst_false_bb = (next_block); \
365 start_new_bblock = 1; \
367 GET_BBLOCK (cfg, tblock, ip); \
368 link_bblock (cfg, bblock, tblock); \
369 ins->inst_false_bb = tblock; \
370 start_new_bblock = 2; \
372 if (sp != stack_start) { \
373 handle_stack_args (cfg, stack_start, sp - stack_start); \
374 CHECK_UNVERIFIABLE (cfg); \
376 MONO_ADD_INS (bblock, cmp); \
377 MONO_ADD_INS (bblock, ins); \
381 * link_bblock: Links two basic blocks
383 * links two basic blocks in the control flow graph, the 'from'
384 * argument is the starting block and the 'to' argument is the block
385 * the control flow ends to after 'from'.
388 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
390 MonoBasicBlock **newa;
394 if (from->cil_code) {
396 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
398 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
401 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
403 printf ("edge from entry to exit\n");
408 for (i = 0; i < from->out_count; ++i) {
409 if (to == from->out_bb [i]) {
415 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
416 for (i = 0; i < from->out_count; ++i) {
417 newa [i] = from->out_bb [i];
425 for (i = 0; i < to->in_count; ++i) {
426 if (from == to->in_bb [i]) {
432 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
433 for (i = 0; i < to->in_count; ++i) {
434 newa [i] = to->in_bb [i];
443 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
445 link_bblock (cfg, from, to);
449 * mono_find_block_region:
451 * We mark each basic block with a region ID. We use that to avoid BB
452 * optimizations when blocks are in different regions.
455 * A region token that encodes where this region is, and information
456 * about the clause owner for this block.
458 * The region encodes the try/catch/filter clause that owns this block
459 * as well as the type. -1 is a special value that represents a block
460 * that is in none of try/catch/filter.
463 mono_find_block_region (MonoCompile *cfg, int offset)
465 MonoMethod *method = cfg->method;
466 MonoMethodHeader *header = mono_method_get_header (method);
467 MonoExceptionClause *clause;
470 for (i = 0; i < header->num_clauses; ++i) {
471 clause = &header->clauses [i];
472 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
473 (offset < (clause->handler_offset)))
474 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
476 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
477 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
478 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
479 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
480 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
482 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
485 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
486 return ((i + 1) << 8) | clause->flags;
493 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
495 MonoMethod *method = cfg->method;
496 MonoMethodHeader *header = mono_method_get_header (method);
497 MonoExceptionClause *clause;
498 MonoBasicBlock *handler;
502 for (i = 0; i < header->num_clauses; ++i) {
503 clause = &header->clauses [i];
504 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
505 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
506 if (clause->flags == type) {
507 handler = cfg->cil_offset_to_bb [clause->handler_offset];
509 res = g_list_append (res, handler);
517 mono_create_spvar_for_region (MonoCompile *cfg, int region)
521 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
525 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
526 /* prevent it from being register allocated */
527 var->flags |= MONO_INST_INDIRECT;
529 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
533 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
535 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
539 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
543 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
547 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
548 /* prevent it from being register allocated */
549 var->flags |= MONO_INST_INDIRECT;
551 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
557 * Returns the type used in the eval stack when @type is loaded.
558 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
561 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
565 inst->klass = klass = mono_class_from_mono_type (type);
567 inst->type = STACK_MP;
572 switch (type->type) {
574 inst->type = STACK_INV;
578 case MONO_TYPE_BOOLEAN:
584 inst->type = STACK_I4;
589 case MONO_TYPE_FNPTR:
590 inst->type = STACK_PTR;
592 case MONO_TYPE_CLASS:
593 case MONO_TYPE_STRING:
594 case MONO_TYPE_OBJECT:
595 case MONO_TYPE_SZARRAY:
596 case MONO_TYPE_ARRAY:
597 inst->type = STACK_OBJ;
601 inst->type = STACK_I8;
605 inst->type = STACK_R8;
607 case MONO_TYPE_VALUETYPE:
608 if (type->data.klass->enumtype) {
609 type = mono_class_enum_basetype (type->data.klass);
613 inst->type = STACK_VTYPE;
616 case MONO_TYPE_TYPEDBYREF:
617 inst->klass = mono_defaults.typed_reference_class;
618 inst->type = STACK_VTYPE;
620 case MONO_TYPE_GENERICINST:
621 type = &type->data.generic_class->container_class->byval_arg;
624 case MONO_TYPE_MVAR :
625 /* FIXME: all the arguments must be references for now,
626 * later look inside cfg and see if the arg num is
629 g_assert (cfg->generic_sharing_context);
630 inst->type = STACK_OBJ;
633 g_error ("unknown type 0x%02x in eval stack type", type->type);
638 * The following tables are used to quickly validate the IL code in type_from_op ().
641 bin_num_table [STACK_MAX] [STACK_MAX] = {
642 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
643 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
644 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
645 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
646 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
647 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
648 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
649 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
654 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
657 /* reduce the size of this table */
659 bin_int_table [STACK_MAX] [STACK_MAX] = {
660 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
661 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
662 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
663 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
664 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
665 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
666 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
667 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
671 bin_comp_table [STACK_MAX] [STACK_MAX] = {
672 /* Inv i L p F & O vt */
674 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
675 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
676 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
677 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
678 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
679 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
680 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
683 /* reduce the size of this table */
685 shift_table [STACK_MAX] [STACK_MAX] = {
686 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
687 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
688 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
689 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
690 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
691 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
692 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
693 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
697 * Tables to map from the non-specific opcode to the matching
698 * type-specific opcode.
700 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
702 binops_op_map [STACK_MAX] = {
703 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
706 /* handles from CEE_NEG to CEE_CONV_U8 */
708 unops_op_map [STACK_MAX] = {
709 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
712 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
714 ovfops_op_map [STACK_MAX] = {
715 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
718 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
720 ovf2ops_op_map [STACK_MAX] = {
721 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
724 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
726 ovf3ops_op_map [STACK_MAX] = {
727 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
730 /* handles from CEE_BEQ to CEE_BLT_UN */
732 beqops_op_map [STACK_MAX] = {
733 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
736 /* handles from CEE_CEQ to CEE_CLT_UN */
738 ceqops_op_map [STACK_MAX] = {
739 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
743 * Sets ins->type (the type on the eval stack) according to the
744 * type of the opcode and the arguments to it.
745 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
747 * FIXME: this function sets ins->type unconditionally in some cases, but
748 * it should set it to invalid for some types (a conv.x on an object)
751 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
753 switch (ins->opcode) {
760 /* FIXME: check unverifiable args for STACK_MP */
761 ins->type = bin_num_table [src1->type] [src2->type];
762 ins->opcode += binops_op_map [ins->type];
769 ins->type = bin_int_table [src1->type] [src2->type];
770 ins->opcode += binops_op_map [ins->type];
775 ins->type = shift_table [src1->type] [src2->type];
776 ins->opcode += binops_op_map [ins->type];
781 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
782 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
783 ins->opcode = OP_LCOMPARE;
784 else if (src1->type == STACK_R8)
785 ins->opcode = OP_FCOMPARE;
787 ins->opcode = OP_ICOMPARE;
789 case OP_ICOMPARE_IMM:
790 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
791 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
792 ins->opcode = OP_LCOMPARE_IMM;
804 ins->opcode += beqops_op_map [src1->type];
807 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
808 ins->opcode += ceqops_op_map [src1->type];
814 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
815 ins->opcode += ceqops_op_map [src1->type];
819 ins->type = neg_table [src1->type];
820 ins->opcode += unops_op_map [ins->type];
823 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
824 ins->type = src1->type;
826 ins->type = STACK_INV;
827 ins->opcode += unops_op_map [ins->type];
833 ins->type = STACK_I4;
834 ins->opcode += unops_op_map [src1->type];
837 ins->type = STACK_R8;
838 switch (src1->type) {
841 ins->opcode = OP_ICONV_TO_R_UN;
844 ins->opcode = OP_LCONV_TO_R_UN;
848 case CEE_CONV_OVF_I1:
849 case CEE_CONV_OVF_U1:
850 case CEE_CONV_OVF_I2:
851 case CEE_CONV_OVF_U2:
852 case CEE_CONV_OVF_I4:
853 case CEE_CONV_OVF_U4:
854 ins->type = STACK_I4;
855 ins->opcode += ovf3ops_op_map [src1->type];
857 case CEE_CONV_OVF_I_UN:
858 case CEE_CONV_OVF_U_UN:
859 ins->type = STACK_PTR;
860 ins->opcode += ovf2ops_op_map [src1->type];
862 case CEE_CONV_OVF_I1_UN:
863 case CEE_CONV_OVF_I2_UN:
864 case CEE_CONV_OVF_I4_UN:
865 case CEE_CONV_OVF_U1_UN:
866 case CEE_CONV_OVF_U2_UN:
867 case CEE_CONV_OVF_U4_UN:
868 ins->type = STACK_I4;
869 ins->opcode += ovf2ops_op_map [src1->type];
872 ins->type = STACK_PTR;
873 switch (src1->type) {
875 ins->opcode = OP_ICONV_TO_U;
879 #if SIZEOF_REGISTER == 8
880 ins->opcode = OP_LCONV_TO_U;
882 ins->opcode = OP_MOVE;
886 ins->opcode = OP_LCONV_TO_U;
889 ins->opcode = OP_FCONV_TO_U;
895 ins->type = STACK_I8;
896 ins->opcode += unops_op_map [src1->type];
898 case CEE_CONV_OVF_I8:
899 case CEE_CONV_OVF_U8:
900 ins->type = STACK_I8;
901 ins->opcode += ovf3ops_op_map [src1->type];
903 case CEE_CONV_OVF_U8_UN:
904 case CEE_CONV_OVF_I8_UN:
905 ins->type = STACK_I8;
906 ins->opcode += ovf2ops_op_map [src1->type];
910 ins->type = STACK_R8;
911 ins->opcode += unops_op_map [src1->type];
914 ins->type = STACK_R8;
918 ins->type = STACK_I4;
919 ins->opcode += ovfops_op_map [src1->type];
924 ins->type = STACK_PTR;
925 ins->opcode += ovfops_op_map [src1->type];
933 ins->type = bin_num_table [src1->type] [src2->type];
934 ins->opcode += ovfops_op_map [src1->type];
935 if (ins->type == STACK_R8)
936 ins->type = STACK_INV;
938 case OP_LOAD_MEMBASE:
939 ins->type = STACK_PTR;
941 case OP_LOADI1_MEMBASE:
942 case OP_LOADU1_MEMBASE:
943 case OP_LOADI2_MEMBASE:
944 case OP_LOADU2_MEMBASE:
945 case OP_LOADI4_MEMBASE:
946 case OP_LOADU4_MEMBASE:
947 ins->type = STACK_PTR;
949 case OP_LOADI8_MEMBASE:
950 ins->type = STACK_I8;
952 case OP_LOADR4_MEMBASE:
953 case OP_LOADR8_MEMBASE:
954 ins->type = STACK_R8;
957 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
961 if (ins->type == STACK_MP)
962 ins->klass = mono_defaults.object_class;
967 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
973 param_table [STACK_MAX] [STACK_MAX] = {
978 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
982 switch (args->type) {
992 for (i = 0; i < sig->param_count; ++i) {
993 switch (args [i].type) {
997 if (!sig->params [i]->byref)
1001 if (sig->params [i]->byref)
1003 switch (sig->params [i]->type) {
1004 case MONO_TYPE_CLASS:
1005 case MONO_TYPE_STRING:
1006 case MONO_TYPE_OBJECT:
1007 case MONO_TYPE_SZARRAY:
1008 case MONO_TYPE_ARRAY:
1015 if (sig->params [i]->byref)
1017 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1026 /*if (!param_table [args [i].type] [sig->params [i]->type])
1034 * When we need a pointer to the current domain many times in a method, we
1035 * call mono_domain_get() once and we store the result in a local variable.
1036 * This function returns the variable that represents the MonoDomain*.
1038 inline static MonoInst *
1039 mono_get_domainvar (MonoCompile *cfg)
1041 if (!cfg->domainvar)
1042 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1043 return cfg->domainvar;
1047 * The got_var contains the address of the Global Offset Table when AOT
1051 mono_get_got_var (MonoCompile *cfg)
1053 #ifdef MONO_ARCH_NEED_GOT_VAR
1054 if (!cfg->compile_aot)
1056 if (!cfg->got_var) {
1057 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1059 return cfg->got_var;
1066 mono_get_vtable_var (MonoCompile *cfg)
1068 g_assert (cfg->generic_sharing_context);
1070 if (!cfg->rgctx_var) {
1071 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1072 /* force the var to be stack allocated */
1073 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1076 return cfg->rgctx_var;
1080 type_from_stack_type (MonoInst *ins) {
1081 switch (ins->type) {
1082 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1083 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1084 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1085 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1087 return &ins->klass->this_arg;
1088 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1089 case STACK_VTYPE: return &ins->klass->byval_arg;
1091 g_error ("stack type %d to monotype not handled\n", ins->type);
1096 static G_GNUC_UNUSED int
1097 type_to_stack_type (MonoType *t)
1099 switch (mono_type_get_underlying_type (t)->type) {
1102 case MONO_TYPE_BOOLEAN:
1105 case MONO_TYPE_CHAR:
1112 case MONO_TYPE_FNPTR:
1114 case MONO_TYPE_CLASS:
1115 case MONO_TYPE_STRING:
1116 case MONO_TYPE_OBJECT:
1117 case MONO_TYPE_SZARRAY:
1118 case MONO_TYPE_ARRAY:
1126 case MONO_TYPE_VALUETYPE:
1127 case MONO_TYPE_TYPEDBYREF:
1129 case MONO_TYPE_GENERICINST:
1130 if (mono_type_generic_inst_is_valuetype (t))
1136 g_assert_not_reached ();
1143 array_access_to_klass (int opcode)
1147 return mono_defaults.byte_class;
1149 return mono_defaults.uint16_class;
1152 return mono_defaults.int_class;
1155 return mono_defaults.sbyte_class;
1158 return mono_defaults.int16_class;
1161 return mono_defaults.int32_class;
1163 return mono_defaults.uint32_class;
1166 return mono_defaults.int64_class;
1169 return mono_defaults.single_class;
1172 return mono_defaults.double_class;
1173 case CEE_LDELEM_REF:
1174 case CEE_STELEM_REF:
1175 return mono_defaults.object_class;
1177 g_assert_not_reached ();
1183 * We try to share variables when possible
1186 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1191 /* inlining can result in deeper stacks */
1192 if (slot >= mono_method_get_header (cfg->method)->max_stack)
1193 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1195 pos = ins->type - 1 + slot * STACK_MAX;
1197 switch (ins->type) {
1204 if ((vnum = cfg->intvars [pos]))
1205 return cfg->varinfo [vnum];
1206 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1207 cfg->intvars [pos] = res->inst_c0;
1210 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1216 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1219 * Don't use this if a generic_context is set, since that means AOT can't
1220 * look up the method using just the image+token.
1221 * table == 0 means this is a reference made from a wrapper.
1223 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1224 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1225 jump_info_token->image = image;
1226 jump_info_token->token = token;
1227 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1232 * This function is called to handle items that are left on the evaluation stack
1233 * at basic block boundaries. What happens is that we save the values to local variables
1234 * and we reload them later when first entering the target basic block (with the
1235 * handle_loaded_temps () function).
1236 * A single joint point will use the same variables (stored in the array bb->out_stack or
1237 * bb->in_stack, if the basic block is before or after the joint point).
1239 * This function needs to be called _before_ emitting the last instruction of
1240 * the bb (i.e. before emitting a branch).
1241 * If the stack merge fails at a join point, cfg->unverifiable is set.
1244 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1247 MonoBasicBlock *bb = cfg->cbb;
1248 MonoBasicBlock *outb;
1249 MonoInst *inst, **locals;
1254 if (cfg->verbose_level > 3)
1255 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1256 if (!bb->out_scount) {
1257 bb->out_scount = count;
1258 //printf ("bblock %d has out:", bb->block_num);
1260 for (i = 0; i < bb->out_count; ++i) {
1261 outb = bb->out_bb [i];
1262 /* exception handlers are linked, but they should not be considered for stack args */
1263 if (outb->flags & BB_EXCEPTION_HANDLER)
1265 //printf (" %d", outb->block_num);
1266 if (outb->in_stack) {
1268 bb->out_stack = outb->in_stack;
1274 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1275 for (i = 0; i < count; ++i) {
1277 * try to reuse temps already allocated for this purpouse, if they occupy the same
1278 * stack slot and if they are of the same type.
1279 * This won't cause conflicts since if 'local' is used to
1280 * store one of the values in the in_stack of a bblock, then
1281 * the same variable will be used for the same outgoing stack
1283 * This doesn't work when inlining methods, since the bblocks
1284 * in the inlined methods do not inherit their in_stack from
1285 * the bblock they are inlined to. See bug #58863 for an
1288 if (cfg->inlined_method)
1289 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1291 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1296 for (i = 0; i < bb->out_count; ++i) {
1297 outb = bb->out_bb [i];
1298 /* exception handlers are linked, but they should not be considered for stack args */
1299 if (outb->flags & BB_EXCEPTION_HANDLER)
1301 if (outb->in_scount) {
1302 if (outb->in_scount != bb->out_scount) {
1303 cfg->unverifiable = TRUE;
1306 continue; /* check they are the same locals */
1308 outb->in_scount = count;
1309 outb->in_stack = bb->out_stack;
1312 locals = bb->out_stack;
1314 for (i = 0; i < count; ++i) {
1315 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1316 inst->cil_code = sp [i]->cil_code;
1317 sp [i] = locals [i];
1318 if (cfg->verbose_level > 3)
1319 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1323 * It is possible that the out bblocks already have in_stack assigned, and
1324 * the in_stacks differ. In this case, we will store to all the different
1331 /* Find a bblock which has a different in_stack */
1333 while (bindex < bb->out_count) {
1334 outb = bb->out_bb [bindex];
1335 /* exception handlers are linked, but they should not be considered for stack args */
1336 if (outb->flags & BB_EXCEPTION_HANDLER) {
1340 if (outb->in_stack != locals) {
1341 for (i = 0; i < count; ++i) {
1342 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1343 inst->cil_code = sp [i]->cil_code;
1344 sp [i] = locals [i];
1345 if (cfg->verbose_level > 3)
1346 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1348 locals = outb->in_stack;
1357 /* Emit code which loads interface_offsets [klass->interface_id]
1358 * The array is stored in memory before vtable.
1361 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1363 if (cfg->compile_aot) {
1364 int ioffset_reg = alloc_preg (cfg);
1365 int iid_reg = alloc_preg (cfg);
1367 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1368 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1369 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1372 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1377 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1378 * stored in "klass_reg" implements the interface "klass".
1381 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1383 int ibitmap_reg = alloc_preg (cfg);
1384 int ibitmap_byte_reg = alloc_preg (cfg);
1386 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap));
1388 if (cfg->compile_aot) {
1389 int iid_reg = alloc_preg (cfg);
1390 int shifted_iid_reg = alloc_preg (cfg);
1391 int ibitmap_byte_address_reg = alloc_preg (cfg);
1392 int masked_iid_reg = alloc_preg (cfg);
1393 int iid_one_bit_reg = alloc_preg (cfg);
1394 int iid_bit_reg = alloc_preg (cfg);
1395 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1396 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1397 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1398 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1399 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1400 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1401 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1402 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1404 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1405 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1410 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1411 * stored in "vtable_reg" implements the interface "klass".
1414 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1416 int ibitmap_reg = alloc_preg (cfg);
1417 int ibitmap_byte_reg = alloc_preg (cfg);
1419 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap));
1421 if (cfg->compile_aot) {
1422 int iid_reg = alloc_preg (cfg);
1423 int shifted_iid_reg = alloc_preg (cfg);
1424 int ibitmap_byte_address_reg = alloc_preg (cfg);
1425 int masked_iid_reg = alloc_preg (cfg);
1426 int iid_one_bit_reg = alloc_preg (cfg);
1427 int iid_bit_reg = alloc_preg (cfg);
1428 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1429 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, shifted_iid_reg, iid_reg, 3);
1430 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1431 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1432 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, masked_iid_reg, iid_reg, 7);
1433 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1434 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1435 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1437 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1438 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1443 * Emit code which checks whenever the interface id of @klass is smaller than
1444 * than the value given by max_iid_reg.
1447 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1448 MonoBasicBlock *false_target)
1450 if (cfg->compile_aot) {
1451 int iid_reg = alloc_preg (cfg);
1452 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1453 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1456 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1458 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1460 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1463 /* Same as above, but obtains max_iid from a vtable */
1465 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1466 MonoBasicBlock *false_target)
1468 int max_iid_reg = alloc_preg (cfg);
1470 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1471 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1474 /* Same as above, but obtains max_iid from a klass */
1476 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1477 MonoBasicBlock *false_target)
1479 int max_iid_reg = alloc_preg (cfg);
1481 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1482 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1486 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1488 int idepth_reg = alloc_preg (cfg);
1489 int stypes_reg = alloc_preg (cfg);
1490 int stype = alloc_preg (cfg);
1492 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1493 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1494 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1495 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1497 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1498 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1499 if (cfg->compile_aot) {
1500 int const_reg = alloc_preg (cfg);
1501 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1502 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1504 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1506 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1510 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1512 int intf_reg = alloc_preg (cfg);
1514 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1515 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1516 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1518 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1520 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1524 * Variant of the above that takes a register to the class, not the vtable.
1527 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1529 int intf_bit_reg = alloc_preg (cfg);
1531 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1532 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1533 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1535 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1537 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1541 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1543 if (cfg->compile_aot) {
1544 int const_reg = alloc_preg (cfg);
1545 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1546 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1548 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1550 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1554 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1556 if (cfg->compile_aot) {
1557 int const_reg = alloc_preg (cfg);
1558 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1559 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1561 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1563 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1567 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1570 int rank_reg = alloc_preg (cfg);
1571 int eclass_reg = alloc_preg (cfg);
1573 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1574 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1575 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1576 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1577 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1578 if (klass->cast_class == mono_defaults.object_class) {
1579 int parent_reg = alloc_preg (cfg);
1580 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1581 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1582 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1583 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1584 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1585 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1586 } else if (klass->cast_class == mono_defaults.enum_class) {
1587 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1588 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1589 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1591 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1592 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1595 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1596 /* Check that the object is a vector too */
1597 int bounds_reg = alloc_preg (cfg);
1598 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1599 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1600 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1603 int idepth_reg = alloc_preg (cfg);
1604 int stypes_reg = alloc_preg (cfg);
1605 int stype = alloc_preg (cfg);
1607 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1608 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1609 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1610 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1612 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1613 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1614 mini_emit_class_check (cfg, stype, klass);
1619 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1623 g_assert (val == 0);
1628 if ((size <= 4) && (size <= align)) {
1631 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1634 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1637 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1639 #if SIZEOF_REGISTER == 8
1641 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1647 val_reg = alloc_preg (cfg);
1649 if (SIZEOF_REGISTER == 8)
1650 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1652 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1655 /* This could be optimized further if neccesary */
1657 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1664 #if !NO_UNALIGNED_ACCESS
1665 if (SIZEOF_REGISTER == 8) {
1667 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1672 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1680 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1685 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1690 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1696 #endif /* DISABLE_JIT */
1699 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1707 /* This could be optimized further if neccesary */
1709 cur_reg = alloc_preg (cfg);
1710 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1711 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1718 #if !NO_UNALIGNED_ACCESS
1719 if (SIZEOF_REGISTER == 8) {
1721 cur_reg = alloc_preg (cfg);
1722 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1723 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1732 cur_reg = alloc_preg (cfg);
1733 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1734 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1740 cur_reg = alloc_preg (cfg);
1741 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1742 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1748 cur_reg = alloc_preg (cfg);
1749 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1750 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1760 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1763 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1766 type = mini_get_basic_type_from_generic (gsctx, type);
1767 switch (type->type) {
1768 case MONO_TYPE_VOID:
1769 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1772 case MONO_TYPE_BOOLEAN:
1775 case MONO_TYPE_CHAR:
1778 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1782 case MONO_TYPE_FNPTR:
1783 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1784 case MONO_TYPE_CLASS:
1785 case MONO_TYPE_STRING:
1786 case MONO_TYPE_OBJECT:
1787 case MONO_TYPE_SZARRAY:
1788 case MONO_TYPE_ARRAY:
1789 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1792 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1795 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1796 case MONO_TYPE_VALUETYPE:
1797 if (type->data.klass->enumtype) {
1798 type = mono_class_enum_basetype (type->data.klass);
1801 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1802 case MONO_TYPE_TYPEDBYREF:
1803 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1804 case MONO_TYPE_GENERICINST:
1805 type = &type->data.generic_class->container_class->byval_arg;
1808 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1814 * target_type_is_incompatible:
1815 * @cfg: MonoCompile context
1817 * Check that the item @arg on the evaluation stack can be stored
1818 * in the target type (can be a local, or field, etc).
1819 * The cfg arg can be used to check if we need verification or just
1822 * Returns: non-0 value if arg can't be stored on a target.
1825 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1827 MonoType *simple_type;
1830 if (target->byref) {
1831 /* FIXME: check that the pointed to types match */
1832 if (arg->type == STACK_MP)
1833 return arg->klass != mono_class_from_mono_type (target);
1834 if (arg->type == STACK_PTR)
1839 simple_type = mono_type_get_underlying_type (target);
1840 switch (simple_type->type) {
1841 case MONO_TYPE_VOID:
1845 case MONO_TYPE_BOOLEAN:
1848 case MONO_TYPE_CHAR:
1851 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1855 /* STACK_MP is needed when setting pinned locals */
1856 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1861 case MONO_TYPE_FNPTR:
1862 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1865 case MONO_TYPE_CLASS:
1866 case MONO_TYPE_STRING:
1867 case MONO_TYPE_OBJECT:
1868 case MONO_TYPE_SZARRAY:
1869 case MONO_TYPE_ARRAY:
1870 if (arg->type != STACK_OBJ)
1872 /* FIXME: check type compatibility */
1876 if (arg->type != STACK_I8)
1881 if (arg->type != STACK_R8)
1884 case MONO_TYPE_VALUETYPE:
1885 if (arg->type != STACK_VTYPE)
1887 klass = mono_class_from_mono_type (simple_type);
1888 if (klass != arg->klass)
1891 case MONO_TYPE_TYPEDBYREF:
1892 if (arg->type != STACK_VTYPE)
1894 klass = mono_class_from_mono_type (simple_type);
1895 if (klass != arg->klass)
1898 case MONO_TYPE_GENERICINST:
1899 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1900 if (arg->type != STACK_VTYPE)
1902 klass = mono_class_from_mono_type (simple_type);
1903 if (klass != arg->klass)
1907 if (arg->type != STACK_OBJ)
1909 /* FIXME: check type compatibility */
1913 case MONO_TYPE_MVAR:
1914 /* FIXME: all the arguments must be references for now,
1915 * later look inside cfg and see if the arg num is
1916 * really a reference
1918 g_assert (cfg->generic_sharing_context);
1919 if (arg->type != STACK_OBJ)
1923 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1929 * Prepare arguments for passing to a function call.
1930 * Return a non-zero value if the arguments can't be passed to the given
1932 * The type checks are not yet complete and some conversions may need
1933 * casts on 32 or 64 bit architectures.
1935 * FIXME: implement this using target_type_is_incompatible ()
1938 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1940 MonoType *simple_type;
1944 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1948 for (i = 0; i < sig->param_count; ++i) {
1949 if (sig->params [i]->byref) {
1950 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1954 simple_type = sig->params [i];
1955 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1957 switch (simple_type->type) {
1958 case MONO_TYPE_VOID:
1963 case MONO_TYPE_BOOLEAN:
1966 case MONO_TYPE_CHAR:
1969 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
1975 case MONO_TYPE_FNPTR:
1976 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
1979 case MONO_TYPE_CLASS:
1980 case MONO_TYPE_STRING:
1981 case MONO_TYPE_OBJECT:
1982 case MONO_TYPE_SZARRAY:
1983 case MONO_TYPE_ARRAY:
1984 if (args [i]->type != STACK_OBJ)
1989 if (args [i]->type != STACK_I8)
1994 if (args [i]->type != STACK_R8)
1997 case MONO_TYPE_VALUETYPE:
1998 if (simple_type->data.klass->enumtype) {
1999 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2002 if (args [i]->type != STACK_VTYPE)
2005 case MONO_TYPE_TYPEDBYREF:
2006 if (args [i]->type != STACK_VTYPE)
2009 case MONO_TYPE_GENERICINST:
2010 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2014 g_error ("unknown type 0x%02x in check_call_signature",
2022 callvirt_to_call (int opcode)
2027 case OP_VOIDCALLVIRT:
2036 g_assert_not_reached ();
2043 callvirt_to_call_membase (int opcode)
2047 return OP_CALL_MEMBASE;
2048 case OP_VOIDCALLVIRT:
2049 return OP_VOIDCALL_MEMBASE;
2051 return OP_FCALL_MEMBASE;
2053 return OP_LCALL_MEMBASE;
2055 return OP_VCALL_MEMBASE;
2057 g_assert_not_reached ();
2063 #ifdef MONO_ARCH_HAVE_IMT
2065 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2067 #ifdef MONO_ARCH_IMT_REG
2068 int method_reg = alloc_preg (cfg);
2071 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2072 } else if (cfg->compile_aot) {
2073 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2076 MONO_INST_NEW (cfg, ins, OP_PCONST);
2077 ins->inst_p0 = call->method;
2078 ins->dreg = method_reg;
2079 MONO_ADD_INS (cfg->cbb, ins);
2082 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2084 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2089 static MonoJumpInfo *
2090 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2092 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2096 ji->data.target = target;
2101 inline static MonoInst*
2102 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args);
2104 inline static MonoCallInst *
2105 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2106 MonoInst **args, int calli, int virtual, int tail)
2109 #ifdef MONO_ARCH_SOFT_FLOAT
2114 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2116 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2119 call->signature = sig;
2121 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2124 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2125 call->vret_var = cfg->vret_addr;
2126 //g_assert_not_reached ();
2128 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2129 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2132 temp->backend.is_pinvoke = sig->pinvoke;
2135 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2136 * address of return value to increase optimization opportunities.
2137 * Before vtype decomposition, the dreg of the call ins itself represents the
2138 * fact the call modifies the return value. After decomposition, the call will
2139 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2140 * will be transformed into an LDADDR.
2142 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2143 loada->dreg = alloc_preg (cfg);
2144 loada->inst_p0 = temp;
2145 /* We reference the call too since call->dreg could change during optimization */
2146 loada->inst_p1 = call;
2147 MONO_ADD_INS (cfg->cbb, loada);
2149 call->inst.dreg = temp->dreg;
2151 call->vret_var = loada;
2152 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2153 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2155 #ifdef MONO_ARCH_SOFT_FLOAT
2157 * If the call has a float argument, we would need to do an r8->r4 conversion using
2158 * an icall, but that cannot be done during the call sequence since it would clobber
2159 * the call registers + the stack. So we do it before emitting the call.
2161 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2163 MonoInst *in = call->args [i];
2165 if (i >= sig->hasthis)
2166 t = sig->params [i - sig->hasthis];
2168 t = &mono_defaults.int_class->byval_arg;
2169 t = mono_type_get_underlying_type (t);
2171 if (!t->byref && t->type == MONO_TYPE_R4) {
2172 MonoInst *iargs [1];
2176 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2178 /* The result will be in an int vreg */
2179 call->args [i] = conv;
2185 if (COMPILE_LLVM (cfg))
2186 mono_llvm_emit_call (cfg, call);
2188 mono_arch_emit_call (cfg, call);
2190 mono_arch_emit_call (cfg, call);
2193 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2194 cfg->flags |= MONO_CFG_HAS_CALLS;
2199 inline static MonoInst*
2200 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2202 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE);
2204 call->inst.sreg1 = addr->dreg;
2206 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2208 return (MonoInst*)call;
2211 inline static MonoInst*
2212 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2214 #ifdef MONO_ARCH_RGCTX_REG
2219 rgctx_reg = mono_alloc_preg (cfg);
2220 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2222 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2224 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2225 cfg->uses_rgctx_reg = TRUE;
2226 call->rgctx_reg = TRUE;
2228 return (MonoInst*)call;
2230 g_assert_not_reached ();
2236 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, int rgctx_type);
2238 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, int rgctx_type);
2241 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2242 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2244 gboolean might_be_remote;
2245 gboolean virtual = this != NULL;
2246 gboolean enable_for_aot = TRUE;
2250 if (method->string_ctor) {
2251 /* Create the real signature */
2252 /* FIXME: Cache these */
2253 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2254 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2259 might_be_remote = this && sig->hasthis &&
2260 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2261 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this);
2263 context_used = mono_method_check_context_used (method);
2264 if (might_be_remote && context_used) {
2267 g_assert (cfg->generic_sharing_context);
2269 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2271 return mono_emit_calli (cfg, sig, args, addr);
2274 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE);
2276 if (might_be_remote)
2277 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2279 call->method = method;
2280 call->inst.flags |= MONO_INST_HAS_METHOD;
2281 call->inst.inst_left = this;
2284 int vtable_reg, slot_reg, this_reg;
2286 this_reg = this->dreg;
2288 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2289 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2290 /* Make a call to delegate->invoke_impl */
2291 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2292 call->inst.inst_basereg = this_reg;
2293 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2294 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2296 return (MonoInst*)call;
2300 if ((!cfg->compile_aot || enable_for_aot) &&
2301 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2302 (MONO_METHOD_IS_FINAL (method) &&
2303 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK))) {
2305 * the method is not virtual, we just need to ensure this is not null
2306 * and then we can call the method directly.
2308 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2309 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2312 if (!method->string_ctor) {
2313 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2314 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2315 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2318 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2320 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2322 return (MonoInst*)call;
2325 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2327 * the method is virtual, but we can statically dispatch since either
2328 * it's class or the method itself are sealed.
2329 * But first we need to ensure it's not a null reference.
2331 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2332 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2333 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2335 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2336 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2338 return (MonoInst*)call;
2341 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2343 vtable_reg = alloc_preg (cfg);
2344 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2345 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2347 #ifdef MONO_ARCH_HAVE_IMT
2349 guint32 imt_slot = mono_method_get_imt_slot (method);
2350 emit_imt_argument (cfg, call, imt_arg);
2351 slot_reg = vtable_reg;
2352 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2355 if (slot_reg == -1) {
2356 slot_reg = alloc_preg (cfg);
2357 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2358 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2361 slot_reg = vtable_reg;
2362 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2363 (mono_method_get_vtable_index (method) * SIZEOF_VOID_P);
2364 #ifdef MONO_ARCH_HAVE_IMT
2366 g_assert (mono_method_signature (method)->generic_param_count);
2367 emit_imt_argument (cfg, call, imt_arg);
2372 call->inst.sreg1 = slot_reg;
2373 call->virtual = TRUE;
2376 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2378 return (MonoInst*)call;
2382 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2383 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2390 #ifdef MONO_ARCH_RGCTX_REG
2391 rgctx_reg = mono_alloc_preg (cfg);
2392 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2397 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2399 call = (MonoCallInst*)ins;
2401 #ifdef MONO_ARCH_RGCTX_REG
2402 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2403 cfg->uses_rgctx_reg = TRUE;
2404 call->rgctx_reg = TRUE;
2413 static inline MonoInst*
2414 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2416 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2420 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2427 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE);
2430 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2432 return (MonoInst*)call;
2435 inline static MonoInst*
2436 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2438 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2442 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2446 * mono_emit_abs_call:
2448 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2450 inline static MonoInst*
2451 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2452 MonoMethodSignature *sig, MonoInst **args)
2454 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2458 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2461 if (cfg->abs_patches == NULL)
2462 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2463 g_hash_table_insert (cfg->abs_patches, ji, ji);
2464 ins = mono_emit_native_call (cfg, ji, sig, args);
2465 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2470 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2472 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2473 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2477 * Native code might return non register sized integers
2478 * without initializing the upper bits.
2480 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2481 case OP_LOADI1_MEMBASE:
2482 widen_op = OP_ICONV_TO_I1;
2484 case OP_LOADU1_MEMBASE:
2485 widen_op = OP_ICONV_TO_U1;
2487 case OP_LOADI2_MEMBASE:
2488 widen_op = OP_ICONV_TO_I2;
2490 case OP_LOADU2_MEMBASE:
2491 widen_op = OP_ICONV_TO_U2;
2497 if (widen_op != -1) {
2498 int dreg = alloc_preg (cfg);
2501 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2502 widen->type = ins->type;
2512 get_memcpy_method (void)
2514 static MonoMethod *memcpy_method = NULL;
2515 if (!memcpy_method) {
2516 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2518 g_error ("Old corlib found. Install a new one");
2520 return memcpy_method;
2524 * Emit code to copy a valuetype of type @klass whose address is stored in
2525 * @src->dreg to memory whose address is stored at @dest->dreg.
2528 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2530 MonoInst *iargs [3];
2533 MonoMethod *memcpy_method;
2537 * This check breaks with spilled vars... need to handle it during verification anyway.
2538 * g_assert (klass && klass == src->klass && klass == dest->klass);
2542 n = mono_class_native_size (klass, &align);
2544 n = mono_class_value_size (klass, &align);
2546 #if HAVE_WRITE_BARRIERS
2547 /* if native is true there should be no references in the struct */
2548 if (klass->has_references && !native) {
2549 /* Avoid barriers when storing to the stack */
2550 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2551 (dest->opcode == OP_LDADDR))) {
2552 int context_used = 0;
2557 if (cfg->generic_sharing_context)
2558 context_used = mono_class_check_context_used (klass);
2560 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2562 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2563 mono_class_compute_gc_descriptor (klass);
2566 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2571 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2572 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2573 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2577 EMIT_NEW_ICONST (cfg, iargs [2], n);
2579 memcpy_method = get_memcpy_method ();
2580 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2585 get_memset_method (void)
2587 static MonoMethod *memset_method = NULL;
2588 if (!memset_method) {
2589 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2591 g_error ("Old corlib found. Install a new one");
2593 return memset_method;
2597 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2599 MonoInst *iargs [3];
2602 MonoMethod *memset_method;
2604 /* FIXME: Optimize this for the case when dest is an LDADDR */
2606 mono_class_init (klass);
2607 n = mono_class_value_size (klass, &align);
2609 if (n <= sizeof (gpointer) * 5) {
2610 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2613 memset_method = get_memset_method ();
2615 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2616 EMIT_NEW_ICONST (cfg, iargs [2], n);
2617 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2622 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2624 MonoInst *this = NULL;
2626 g_assert (cfg->generic_sharing_context);
2628 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2629 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2630 !method->klass->valuetype)
2631 EMIT_NEW_ARGLOAD (cfg, this, 0);
2633 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2634 MonoInst *mrgctx_loc, *mrgctx_var;
2637 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2639 mrgctx_loc = mono_get_vtable_var (cfg);
2640 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2643 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2644 MonoInst *vtable_loc, *vtable_var;
2648 vtable_loc = mono_get_vtable_var (cfg);
2649 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2651 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2652 MonoInst *mrgctx_var = vtable_var;
2655 vtable_reg = alloc_preg (cfg);
2656 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2657 vtable_var->type = STACK_PTR;
2663 int vtable_reg, res_reg;
2665 vtable_reg = alloc_preg (cfg);
2666 res_reg = alloc_preg (cfg);
2667 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2672 static MonoJumpInfoRgctxEntry *
2673 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2675 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2676 res->method = method;
2677 res->in_mrgctx = in_mrgctx;
2678 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2679 res->data->type = patch_type;
2680 res->data->data.target = patch_data;
2681 res->info_type = info_type;
2686 static inline MonoInst*
2687 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2689 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2693 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2694 MonoClass *klass, int rgctx_type)
2696 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2697 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2699 return emit_rgctx_fetch (cfg, rgctx, entry);
2703 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2704 MonoMethod *cmethod, int rgctx_type)
2706 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2707 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2709 return emit_rgctx_fetch (cfg, rgctx, entry);
2713 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2714 MonoClassField *field, int rgctx_type)
2716 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2717 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2719 return emit_rgctx_fetch (cfg, rgctx, entry);
2723 * On return the caller must check @klass for load errors.
2726 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
2728 MonoInst *vtable_arg;
2730 int context_used = 0;
2732 if (cfg->generic_sharing_context)
2733 context_used = mono_class_check_context_used (klass);
2736 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
2737 klass, MONO_RGCTX_INFO_VTABLE);
2739 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2743 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
2746 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
2747 #ifdef MONO_ARCH_VTABLE_REG
2748 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
2749 cfg->uses_vtable_reg = TRUE;
2756 * On return the caller must check @array_class for load errors
2759 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
2761 int vtable_reg = alloc_preg (cfg);
2762 int context_used = 0;
2764 if (cfg->generic_sharing_context)
2765 context_used = mono_class_check_context_used (array_class);
2767 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2769 if (cfg->opt & MONO_OPT_SHARED) {
2770 int class_reg = alloc_preg (cfg);
2771 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2772 if (cfg->compile_aot) {
2773 int klass_reg = alloc_preg (cfg);
2774 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
2775 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
2777 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
2779 } else if (context_used) {
2780 MonoInst *vtable_ins;
2782 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
2783 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
2785 if (cfg->compile_aot) {
2789 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2791 vt_reg = alloc_preg (cfg);
2792 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
2793 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
2796 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2798 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
2802 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
2806 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
2808 if (mini_get_debug_options ()->better_cast_details) {
2809 int to_klass_reg = alloc_preg (cfg);
2810 int vtable_reg = alloc_preg (cfg);
2811 int klass_reg = alloc_preg (cfg);
2812 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2815 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2819 MONO_ADD_INS (cfg->cbb, tls_get);
2820 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2821 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2823 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
2824 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
2825 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
2830 reset_cast_details (MonoCompile *cfg)
2832 /* Reset the variables holding the cast details */
2833 if (mini_get_debug_options ()->better_cast_details) {
2834 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2836 MONO_ADD_INS (cfg->cbb, tls_get);
2837 /* It is enough to reset the from field */
2838 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
2843 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
2844 * generic code is generated.
2847 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
2849 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
2852 MonoInst *rgctx, *addr;
2854 /* FIXME: What if the class is shared? We might not
2855 have to get the address of the method from the
2857 addr = emit_get_rgctx_method (cfg, context_used, method,
2858 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2860 rgctx = emit_get_rgctx (cfg, method, context_used);
2862 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2864 return mono_emit_method_call (cfg, method, &val, NULL);
2869 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
2873 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
2874 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
2875 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
2876 int rank_reg = alloc_dreg (cfg ,STACK_I4);
2878 obj_reg = sp [0]->dreg;
2879 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2880 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2882 /* FIXME: generics */
2883 g_assert (klass->rank == 0);
2886 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
2887 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2889 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2890 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
2893 MonoInst *element_class;
2895 /* This assertion is from the unboxcast insn */
2896 g_assert (klass->rank == 0);
2898 element_class = emit_get_rgctx_klass (cfg, context_used,
2899 klass->element_class, MONO_RGCTX_INFO_KLASS);
2901 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
2902 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2904 save_cast_details (cfg, klass->element_class, obj_reg);
2905 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
2906 reset_cast_details (cfg);
2909 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
2910 MONO_ADD_INS (cfg->cbb, add);
2911 add->type = STACK_MP;
2918 * Returns NULL and set the cfg exception on error.
2921 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box)
2923 MonoInst *iargs [2];
2926 if (cfg->opt & MONO_OPT_SHARED) {
2927 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2928 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
2930 alloc_ftn = mono_object_new;
2931 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
2932 /* This happens often in argument checking code, eg. throw new FooException... */
2933 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
2934 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
2935 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
2937 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2938 MonoMethod *managed_alloc = NULL;
2942 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
2943 cfg->exception_ptr = klass;
2947 #ifndef MONO_CROSS_COMPILE
2948 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2951 if (managed_alloc) {
2952 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2953 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2955 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
2957 guint32 lw = vtable->klass->instance_size;
2958 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
2959 EMIT_NEW_ICONST (cfg, iargs [0], lw);
2960 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
2963 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2967 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2971 handle_alloc_from_inst (MonoCompile *cfg, MonoClass *klass, MonoInst *data_inst,
2974 MonoInst *iargs [2];
2975 MonoMethod *managed_alloc = NULL;
2979 FIXME: we cannot get managed_alloc here because we can't get
2980 the class's vtable (because it's not a closed class)
2982 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2983 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2986 if (cfg->opt & MONO_OPT_SHARED) {
2987 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2988 iargs [1] = data_inst;
2989 alloc_ftn = mono_object_new;
2991 if (managed_alloc) {
2992 iargs [0] = data_inst;
2993 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2996 iargs [0] = data_inst;
2997 alloc_ftn = mono_object_new_specific;
3000 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3004 * Returns NULL and set the cfg exception on error.
3007 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass)
3009 MonoInst *alloc, *ins;
3011 if (mono_class_is_nullable (klass)) {
3012 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3013 return mono_emit_method_call (cfg, method, &val, NULL);
3016 alloc = handle_alloc (cfg, klass, TRUE);
3020 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3026 handle_box_from_inst (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoInst *data_inst)
3028 MonoInst *alloc, *ins;
3030 if (mono_class_is_nullable (klass)) {
3031 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3032 /* FIXME: What if the class is shared? We might not
3033 have to get the method address from the RGCTX. */
3034 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3035 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3036 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3038 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3040 alloc = handle_alloc_from_inst (cfg, klass, data_inst, TRUE);
3042 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3049 * Returns NULL and set the cfg exception on error.
3052 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3054 MonoBasicBlock *is_null_bb;
3055 int obj_reg = src->dreg;
3056 int vtable_reg = alloc_preg (cfg);
3058 NEW_BBLOCK (cfg, is_null_bb);
3060 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3061 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3063 save_cast_details (cfg, klass, obj_reg);
3065 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3066 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3067 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3069 int klass_reg = alloc_preg (cfg);
3071 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3073 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3074 /* the remoting code is broken, access the class for now */
3075 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3076 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3078 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3079 cfg->exception_ptr = klass;
3082 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3084 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3085 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3087 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3089 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3090 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, is_null_bb);
3094 MONO_START_BB (cfg, is_null_bb);
3096 reset_cast_details (cfg);
3102 * Returns NULL and set the cfg exception on error.
3105 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3108 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3109 int obj_reg = src->dreg;
3110 int vtable_reg = alloc_preg (cfg);
3111 int res_reg = alloc_preg (cfg);
3113 NEW_BBLOCK (cfg, is_null_bb);
3114 NEW_BBLOCK (cfg, false_bb);
3115 NEW_BBLOCK (cfg, end_bb);
3117 /* Do the assignment at the beginning, so the other assignment can be if converted */
3118 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3119 ins->type = STACK_OBJ;
3122 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3123 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3125 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3126 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3127 /* the is_null_bb target simply copies the input register to the output */
3128 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3130 int klass_reg = alloc_preg (cfg);
3132 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3135 int rank_reg = alloc_preg (cfg);
3136 int eclass_reg = alloc_preg (cfg);
3138 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3139 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3140 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3141 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3142 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3143 if (klass->cast_class == mono_defaults.object_class) {
3144 int parent_reg = alloc_preg (cfg);
3145 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3146 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3147 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3148 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3149 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3150 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3151 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3152 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3153 } else if (klass->cast_class == mono_defaults.enum_class) {
3154 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3155 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3156 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3157 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3159 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3160 /* Check that the object is a vector too */
3161 int bounds_reg = alloc_preg (cfg);
3162 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3163 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3164 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3167 /* the is_null_bb target simply copies the input register to the output */
3168 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3170 } else if (mono_class_is_nullable (klass)) {
3171 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3172 /* the is_null_bb target simply copies the input register to the output */
3173 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3175 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3176 /* the remoting code is broken, access the class for now */
3177 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3178 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3180 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3181 cfg->exception_ptr = klass;
3184 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3186 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3187 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3189 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3190 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3192 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3193 /* the is_null_bb target simply copies the input register to the output */
3194 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, is_null_bb);
3199 MONO_START_BB (cfg, false_bb);
3201 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3202 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3204 MONO_START_BB (cfg, is_null_bb);
3206 MONO_START_BB (cfg, end_bb);
3212 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3214 /* This opcode takes as input an object reference and a class, and returns:
3215 0) if the object is an instance of the class,
3216 1) if the object is not instance of the class,
3217 2) if the object is a proxy whose type cannot be determined */
3220 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3221 int obj_reg = src->dreg;
3222 int dreg = alloc_ireg (cfg);
3224 int klass_reg = alloc_preg (cfg);
3226 NEW_BBLOCK (cfg, true_bb);
3227 NEW_BBLOCK (cfg, false_bb);
3228 NEW_BBLOCK (cfg, false2_bb);
3229 NEW_BBLOCK (cfg, end_bb);
3230 NEW_BBLOCK (cfg, no_proxy_bb);
3232 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3233 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3235 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3236 NEW_BBLOCK (cfg, interface_fail_bb);
3238 tmp_reg = alloc_preg (cfg);
3239 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3240 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3241 MONO_START_BB (cfg, interface_fail_bb);
3242 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3244 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3246 tmp_reg = alloc_preg (cfg);
3247 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3248 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3249 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3251 tmp_reg = alloc_preg (cfg);
3252 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3253 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3255 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3256 tmp_reg = alloc_preg (cfg);
3257 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3258 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3260 tmp_reg = alloc_preg (cfg);
3261 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3262 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3263 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3265 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3266 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3268 MONO_START_BB (cfg, no_proxy_bb);
3270 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3273 MONO_START_BB (cfg, false_bb);
3275 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3276 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3278 MONO_START_BB (cfg, false2_bb);
3280 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3281 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3283 MONO_START_BB (cfg, true_bb);
3285 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3287 MONO_START_BB (cfg, end_bb);
3290 MONO_INST_NEW (cfg, ins, OP_ICONST);
3292 ins->type = STACK_I4;
3298 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3300 /* This opcode takes as input an object reference and a class, and returns:
3301 0) if the object is an instance of the class,
3302 1) if the object is a proxy whose type cannot be determined
3303 an InvalidCastException exception is thrown otherwhise*/
3306 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3307 int obj_reg = src->dreg;
3308 int dreg = alloc_ireg (cfg);
3309 int tmp_reg = alloc_preg (cfg);
3310 int klass_reg = alloc_preg (cfg);
3312 NEW_BBLOCK (cfg, end_bb);
3313 NEW_BBLOCK (cfg, ok_result_bb);
3315 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3316 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3318 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3319 NEW_BBLOCK (cfg, interface_fail_bb);
3321 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3322 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3323 MONO_START_BB (cfg, interface_fail_bb);
3324 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3326 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3328 tmp_reg = alloc_preg (cfg);
3329 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3330 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3331 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3333 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3334 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3337 NEW_BBLOCK (cfg, no_proxy_bb);
3339 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3340 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3341 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3343 tmp_reg = alloc_preg (cfg);
3344 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3345 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3347 tmp_reg = alloc_preg (cfg);
3348 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3349 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3350 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3352 NEW_BBLOCK (cfg, fail_1_bb);
3354 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3356 MONO_START_BB (cfg, fail_1_bb);
3358 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3359 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3361 MONO_START_BB (cfg, no_proxy_bb);
3363 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3366 MONO_START_BB (cfg, ok_result_bb);
3368 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3370 MONO_START_BB (cfg, end_bb);
3373 MONO_INST_NEW (cfg, ins, OP_ICONST);
3375 ins->type = STACK_I4;
3381 * Returns NULL and set the cfg exception on error.
3383 static G_GNUC_UNUSED MonoInst*
3384 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method)
3386 gpointer *trampoline;
3387 MonoInst *obj, *method_ins, *tramp_ins;
3391 obj = handle_alloc (cfg, klass, FALSE);
3395 /* Inline the contents of mono_delegate_ctor */
3397 /* Set target field */
3398 /* Optimize away setting of NULL target */
3399 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3400 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3402 /* Set method field */
3403 EMIT_NEW_METHODCONST (cfg, method_ins, method);
3404 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3407 * To avoid looking up the compiled code belonging to the target method
3408 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3409 * store it, and we fill it after the method has been compiled.
3411 if (!cfg->compile_aot && !method->dynamic) {
3412 MonoInst *code_slot_ins;
3414 domain = mono_domain_get ();
3415 mono_domain_lock (domain);
3416 if (!domain_jit_info (domain)->method_code_hash)
3417 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3418 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3420 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3421 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3423 mono_domain_unlock (domain);
3425 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3426 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3429 /* Set invoke_impl field */
3430 if (cfg->compile_aot) {
3431 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3433 trampoline = mono_create_delegate_trampoline (klass);
3434 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3436 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3438 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3444 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3446 MonoJitICallInfo *info;
3448 /* Need to register the icall so it gets an icall wrapper */
3449 info = mono_get_array_new_va_icall (rank);
3451 cfg->flags |= MONO_CFG_HAS_VARARGS;
3453 /* mono_array_new_va () needs a vararg calling convention */
3454 cfg->disable_llvm = TRUE;
3456 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3457 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3461 mono_emit_load_got_addr (MonoCompile *cfg)
3463 MonoInst *getaddr, *dummy_use;
3465 if (!cfg->got_var || cfg->got_var_allocated)
3468 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3469 getaddr->dreg = cfg->got_var->dreg;
3471 /* Add it to the start of the first bblock */
3472 if (cfg->bb_entry->code) {
3473 getaddr->next = cfg->bb_entry->code;
3474 cfg->bb_entry->code = getaddr;
3477 MONO_ADD_INS (cfg->bb_entry, getaddr);
3479 cfg->got_var_allocated = TRUE;
3482 * Add a dummy use to keep the got_var alive, since real uses might
3483 * only be generated by the back ends.
3484 * Add it to end_bblock, so the variable's lifetime covers the whole
3486 * It would be better to make the usage of the got var explicit in all
3487 * cases when the backend needs it (i.e. calls, throw etc.), so this
3488 * wouldn't be needed.
3490 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3491 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3494 static int inline_limit;
3495 static gboolean inline_limit_inited;
3498 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3500 MonoMethodHeader *header;
3502 #ifdef MONO_ARCH_SOFT_FLOAT
3503 MonoMethodSignature *sig = mono_method_signature (method);
3507 if (cfg->generic_sharing_context)
3510 #ifdef MONO_ARCH_HAVE_LMF_OPS
3511 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3512 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3513 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3517 if (method->is_inflated)
3518 /* Avoid inflating the header */
3519 header = mono_method_get_header (((MonoMethodInflated*)method)->declaring);
3521 header = mono_method_get_header (method);
3523 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME) ||
3524 (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3525 (method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3526 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3527 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) ||
3528 (method->klass->marshalbyref) ||
3529 !header || header->num_clauses)
3532 /* also consider num_locals? */
3533 /* Do the size check early to avoid creating vtables */
3534 if (!inline_limit_inited) {
3535 if (getenv ("MONO_INLINELIMIT"))
3536 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3538 inline_limit = INLINE_LENGTH_LIMIT;
3539 inline_limit_inited = TRUE;
3541 if (header->code_size >= inline_limit)
3545 * if we can initialize the class of the method right away, we do,
3546 * otherwise we don't allow inlining if the class needs initialization,
3547 * since it would mean inserting a call to mono_runtime_class_init()
3548 * inside the inlined code
3550 if (!(cfg->opt & MONO_OPT_SHARED)) {
3551 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3552 if (cfg->run_cctors && method->klass->has_cctor) {
3553 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3554 if (!method->klass->runtime_info)
3555 /* No vtable created yet */
3557 vtable = mono_class_vtable (cfg->domain, method->klass);
3560 /* This makes so that inline cannot trigger */
3561 /* .cctors: too many apps depend on them */
3562 /* running with a specific order... */
3563 if (! vtable->initialized)
3565 mono_runtime_class_init (vtable);
3567 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3568 if (!method->klass->runtime_info)
3569 /* No vtable created yet */
3571 vtable = mono_class_vtable (cfg->domain, method->klass);
3574 if (!vtable->initialized)
3579 * If we're compiling for shared code
3580 * the cctor will need to be run at aot method load time, for example,
3581 * or at the end of the compilation of the inlining method.
3583 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3588 * CAS - do not inline methods with declarative security
3589 * Note: this has to be before any possible return TRUE;
3591 if (mono_method_has_declsec (method))
3594 #ifdef MONO_ARCH_SOFT_FLOAT
3596 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3598 for (i = 0; i < sig->param_count; ++i)
3599 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3607 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3609 if (vtable->initialized && !cfg->compile_aot)
3612 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3615 if (!mono_class_needs_cctor_run (vtable->klass, method))
3618 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3619 /* The initialization is already done before the method is called */
3626 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index)
3630 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3632 mono_class_init (klass);
3633 size = mono_class_array_element_size (klass);
3635 mult_reg = alloc_preg (cfg);
3636 array_reg = arr->dreg;
3637 index_reg = index->dreg;
3639 #if SIZEOF_REGISTER == 8
3640 /* The array reg is 64 bits but the index reg is only 32 */
3641 index2_reg = alloc_preg (cfg);
3642 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3644 if (index->type == STACK_I8) {
3645 index2_reg = alloc_preg (cfg);
3646 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
3648 index2_reg = index_reg;
3652 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3654 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3655 if (size == 1 || size == 2 || size == 4 || size == 8) {
3656 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3658 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3659 ins->type = STACK_PTR;
3665 add_reg = alloc_preg (cfg);
3667 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3668 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3669 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3670 ins->type = STACK_PTR;
3671 MONO_ADD_INS (cfg->cbb, ins);
3676 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3678 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3680 int bounds_reg = alloc_preg (cfg);
3681 int add_reg = alloc_preg (cfg);
3682 int mult_reg = alloc_preg (cfg);
3683 int mult2_reg = alloc_preg (cfg);
3684 int low1_reg = alloc_preg (cfg);
3685 int low2_reg = alloc_preg (cfg);
3686 int high1_reg = alloc_preg (cfg);
3687 int high2_reg = alloc_preg (cfg);
3688 int realidx1_reg = alloc_preg (cfg);
3689 int realidx2_reg = alloc_preg (cfg);
3690 int sum_reg = alloc_preg (cfg);
3695 mono_class_init (klass);
3696 size = mono_class_array_element_size (klass);
3698 index1 = index_ins1->dreg;
3699 index2 = index_ins2->dreg;
3701 /* range checking */
3702 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3703 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3705 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3706 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3707 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3708 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3709 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3710 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3711 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3713 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3714 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3715 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3716 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3717 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3718 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3719 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3721 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3722 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3723 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3724 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3725 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3727 ins->type = STACK_MP;
3729 MONO_ADD_INS (cfg->cbb, ins);
3736 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3740 MonoMethod *addr_method;
3743 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3746 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1]);
3748 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3749 /* emit_ldelema_2 depends on OP_LMUL */
3750 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3751 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3755 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3756 addr_method = mono_marshal_get_array_address (rank, element_size);
3757 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
3763 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
3765 MonoInst *ins = NULL;
3767 static MonoClass *runtime_helpers_class = NULL;
3768 if (! runtime_helpers_class)
3769 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
3770 "System.Runtime.CompilerServices", "RuntimeHelpers");
3772 if (cmethod->klass == mono_defaults.string_class) {
3773 if (strcmp (cmethod->name, "get_Chars") == 0) {
3774 int dreg = alloc_ireg (cfg);
3775 int index_reg = alloc_preg (cfg);
3776 int mult_reg = alloc_preg (cfg);
3777 int add_reg = alloc_preg (cfg);
3779 #if SIZEOF_REGISTER == 8
3780 /* The array reg is 64 bits but the index reg is only 32 */
3781 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
3783 index_reg = args [1]->dreg;
3785 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
3787 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3788 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
3789 add_reg = ins->dreg;
3790 /* Avoid a warning */
3792 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3795 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
3796 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3797 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3798 add_reg, G_STRUCT_OFFSET (MonoString, chars));
3800 type_from_op (ins, NULL, NULL);
3802 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3803 int dreg = alloc_ireg (cfg);
3804 /* Decompose later to allow more optimizations */
3805 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
3806 ins->type = STACK_I4;
3807 cfg->cbb->has_array_access = TRUE;
3808 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
3811 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
3812 int mult_reg = alloc_preg (cfg);
3813 int add_reg = alloc_preg (cfg);
3815 /* The corlib functions check for oob already. */
3816 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
3817 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3818 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
3821 } else if (cmethod->klass == mono_defaults.object_class) {
3823 if (strcmp (cmethod->name, "GetType") == 0) {
3824 int dreg = alloc_preg (cfg);
3825 int vt_reg = alloc_preg (cfg);
3826 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3827 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
3828 type_from_op (ins, NULL, NULL);
3831 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
3832 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0) {
3833 int dreg = alloc_ireg (cfg);
3834 int t1 = alloc_ireg (cfg);
3836 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
3837 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
3838 ins->type = STACK_I4;
3842 } else if (strcmp (cmethod->name, ".ctor") == 0) {
3843 MONO_INST_NEW (cfg, ins, OP_NOP);
3844 MONO_ADD_INS (cfg->cbb, ins);
3848 } else if (cmethod->klass == mono_defaults.array_class) {
3849 if (cmethod->name [0] != 'g')
3852 if (strcmp (cmethod->name, "get_Rank") == 0) {
3853 int dreg = alloc_ireg (cfg);
3854 int vtable_reg = alloc_preg (cfg);
3855 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, vtable_reg,
3856 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3857 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
3858 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3859 type_from_op (ins, NULL, NULL);
3862 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3863 int dreg = alloc_ireg (cfg);
3865 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
3866 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
3867 type_from_op (ins, NULL, NULL);
3872 } else if (cmethod->klass == runtime_helpers_class) {
3874 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
3875 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
3879 } else if (cmethod->klass == mono_defaults.thread_class) {
3880 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
3881 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
3882 MONO_ADD_INS (cfg->cbb, ins);
3884 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
3885 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
3886 MONO_ADD_INS (cfg->cbb, ins);
3889 } else if (cmethod->klass == mono_defaults.monitor_class) {
3890 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
3891 if (strcmp (cmethod->name, "Enter") == 0) {
3894 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
3895 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
3896 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
3897 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
3899 return (MonoInst*)call;
3900 } else if (strcmp (cmethod->name, "Exit") == 0) {
3903 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
3904 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
3905 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
3906 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
3908 return (MonoInst*)call;
3910 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
3911 MonoMethod *fast_method = NULL;
3913 /* Avoid infinite recursion */
3914 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
3915 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
3916 strcmp (cfg->method->name, "FastMonitorExit") == 0))
3919 if (strcmp (cmethod->name, "Enter") == 0 ||
3920 strcmp (cmethod->name, "Exit") == 0)
3921 fast_method = mono_monitor_get_fast_path (cmethod);
3925 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
3927 } else if (mini_class_is_system_array (cmethod->klass) &&
3928 strcmp (cmethod->name, "GetGenericValueImpl") == 0) {
3929 MonoInst *addr, *store, *load;
3930 MonoClass *eklass = mono_class_from_mono_type (fsig->params [1]);
3932 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1]);
3933 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
3934 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
3936 } else if (cmethod->klass->image == mono_defaults.corlib &&
3937 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
3938 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
3941 #if SIZEOF_REGISTER == 8
3942 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
3943 /* 64 bit reads are already atomic */
3944 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
3945 ins->dreg = mono_alloc_preg (cfg);
3946 ins->inst_basereg = args [0]->dreg;
3947 ins->inst_offset = 0;
3948 MONO_ADD_INS (cfg->cbb, ins);
3952 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
3953 if (strcmp (cmethod->name, "Increment") == 0) {
3954 MonoInst *ins_iconst;
3957 if (fsig->params [0]->type == MONO_TYPE_I4)
3958 opcode = OP_ATOMIC_ADD_NEW_I4;
3959 #if SIZEOF_REGISTER == 8
3960 else if (fsig->params [0]->type == MONO_TYPE_I8)
3961 opcode = OP_ATOMIC_ADD_NEW_I8;
3964 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3965 ins_iconst->inst_c0 = 1;
3966 ins_iconst->dreg = mono_alloc_ireg (cfg);
3967 MONO_ADD_INS (cfg->cbb, ins_iconst);
3969 MONO_INST_NEW (cfg, ins, opcode);
3970 ins->dreg = mono_alloc_ireg (cfg);
3971 ins->inst_basereg = args [0]->dreg;
3972 ins->inst_offset = 0;
3973 ins->sreg2 = ins_iconst->dreg;
3974 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3975 MONO_ADD_INS (cfg->cbb, ins);
3977 } else if (strcmp (cmethod->name, "Decrement") == 0) {
3978 MonoInst *ins_iconst;
3981 if (fsig->params [0]->type == MONO_TYPE_I4)
3982 opcode = OP_ATOMIC_ADD_NEW_I4;
3983 #if SIZEOF_REGISTER == 8
3984 else if (fsig->params [0]->type == MONO_TYPE_I8)
3985 opcode = OP_ATOMIC_ADD_NEW_I8;
3988 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3989 ins_iconst->inst_c0 = -1;
3990 ins_iconst->dreg = mono_alloc_ireg (cfg);
3991 MONO_ADD_INS (cfg->cbb, ins_iconst);
3993 MONO_INST_NEW (cfg, ins, opcode);
3994 ins->dreg = mono_alloc_ireg (cfg);
3995 ins->inst_basereg = args [0]->dreg;
3996 ins->inst_offset = 0;
3997 ins->sreg2 = ins_iconst->dreg;
3998 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3999 MONO_ADD_INS (cfg->cbb, ins);
4001 } else if (strcmp (cmethod->name, "Add") == 0) {
4004 if (fsig->params [0]->type == MONO_TYPE_I4)
4005 opcode = OP_ATOMIC_ADD_NEW_I4;
4006 #if SIZEOF_REGISTER == 8
4007 else if (fsig->params [0]->type == MONO_TYPE_I8)
4008 opcode = OP_ATOMIC_ADD_NEW_I8;
4012 MONO_INST_NEW (cfg, ins, opcode);
4013 ins->dreg = mono_alloc_ireg (cfg);
4014 ins->inst_basereg = args [0]->dreg;
4015 ins->inst_offset = 0;
4016 ins->sreg2 = args [1]->dreg;
4017 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4018 MONO_ADD_INS (cfg->cbb, ins);
4021 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4023 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4024 if (strcmp (cmethod->name, "Exchange") == 0) {
4026 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4028 if (fsig->params [0]->type == MONO_TYPE_I4)
4029 opcode = OP_ATOMIC_EXCHANGE_I4;
4030 #if SIZEOF_REGISTER == 8
4031 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4032 (fsig->params [0]->type == MONO_TYPE_I))
4033 opcode = OP_ATOMIC_EXCHANGE_I8;
4035 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4036 opcode = OP_ATOMIC_EXCHANGE_I4;
4041 MONO_INST_NEW (cfg, ins, opcode);
4042 ins->dreg = mono_alloc_ireg (cfg);
4043 ins->inst_basereg = args [0]->dreg;
4044 ins->inst_offset = 0;
4045 ins->sreg2 = args [1]->dreg;
4046 MONO_ADD_INS (cfg->cbb, ins);
4048 switch (fsig->params [0]->type) {
4050 ins->type = STACK_I4;
4054 ins->type = STACK_I8;
4056 case MONO_TYPE_OBJECT:
4057 ins->type = STACK_OBJ;
4060 g_assert_not_reached ();
4063 #if HAVE_WRITE_BARRIERS
4065 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4066 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4070 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4072 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4073 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4075 gboolean is_ref = MONO_TYPE_IS_REFERENCE (fsig->params [1]);
4076 if (fsig->params [1]->type == MONO_TYPE_I4)
4078 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4079 size = sizeof (gpointer);
4080 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I4)
4083 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4084 ins->dreg = alloc_ireg (cfg);
4085 ins->sreg1 = args [0]->dreg;
4086 ins->sreg2 = args [1]->dreg;
4087 ins->sreg3 = args [2]->dreg;
4088 ins->type = STACK_I4;
4089 MONO_ADD_INS (cfg->cbb, ins);
4090 } else if (size == 8) {
4091 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4092 ins->dreg = alloc_ireg (cfg);
4093 ins->sreg1 = args [0]->dreg;
4094 ins->sreg2 = args [1]->dreg;
4095 ins->sreg3 = args [2]->dreg;
4096 ins->type = STACK_I8;
4097 MONO_ADD_INS (cfg->cbb, ins);
4099 /* g_assert_not_reached (); */
4101 #if HAVE_WRITE_BARRIERS
4103 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4104 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4108 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4112 } else if (cmethod->klass->image == mono_defaults.corlib) {
4113 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4114 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4115 MONO_INST_NEW (cfg, ins, OP_BREAK);
4116 MONO_ADD_INS (cfg->cbb, ins);
4119 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4120 && strcmp (cmethod->klass->name, "Environment") == 0) {
4121 #ifdef PLATFORM_WIN32
4122 EMIT_NEW_ICONST (cfg, ins, 1);
4124 EMIT_NEW_ICONST (cfg, ins, 0);
4128 } else if (cmethod->klass == mono_defaults.math_class) {
4130 * There is general branches code for Min/Max, but it does not work for
4132 * http://everything2.com/?node_id=1051618
4136 #ifdef MONO_ARCH_SIMD_INTRINSICS
4137 if (cfg->opt & MONO_OPT_SIMD) {
4138 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4144 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4148 * This entry point could be used later for arbitrary method
4151 inline static MonoInst*
4152 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4153 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4155 if (method->klass == mono_defaults.string_class) {
4156 /* managed string allocation support */
4157 if (strcmp (method->name, "InternalAllocateStr") == 0) {
4158 MonoInst *iargs [2];
4159 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4160 MonoMethod *managed_alloc = NULL;
4162 g_assert (vtable); /*Should not fail since it System.String*/
4163 #ifndef MONO_CROSS_COMPILE
4164 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4168 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4169 iargs [1] = args [0];
4170 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4177 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4179 MonoInst *store, *temp;
4182 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4183 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4186 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4187 * would be different than the MonoInst's used to represent arguments, and
4188 * the ldelema implementation can't deal with that.
4189 * Solution: When ldelema is used on an inline argument, create a var for
4190 * it, emit ldelema on that var, and emit the saving code below in
4191 * inline_method () if needed.
4193 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4194 cfg->args [i] = temp;
4195 /* This uses cfg->args [i] which is set by the preceeding line */
4196 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4197 store->cil_code = sp [0]->cil_code;
4202 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4203 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4205 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4207 check_inline_called_method_name_limit (MonoMethod *called_method)
4210 static char *limit = NULL;
4212 if (limit == NULL) {
4213 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4215 if (limit_string != NULL)
4216 limit = limit_string;
4218 limit = (char *) "";
4221 if (limit [0] != '\0') {
4222 char *called_method_name = mono_method_full_name (called_method, TRUE);
4224 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4225 g_free (called_method_name);
4227 //return (strncmp_result <= 0);
4228 return (strncmp_result == 0);
4235 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4237 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4240 static char *limit = NULL;
4242 if (limit == NULL) {
4243 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4244 if (limit_string != NULL) {
4245 limit = limit_string;
4247 limit = (char *) "";
4251 if (limit [0] != '\0') {
4252 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4254 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4255 g_free (caller_method_name);
4257 //return (strncmp_result <= 0);
4258 return (strncmp_result == 0);
4266 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4267 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4269 MonoInst *ins, *rvar = NULL;
4270 MonoMethodHeader *cheader;
4271 MonoBasicBlock *ebblock, *sbblock;
4273 MonoMethod *prev_inlined_method;
4274 MonoInst **prev_locals, **prev_args;
4275 MonoType **prev_arg_types;
4276 guint prev_real_offset;
4277 GHashTable *prev_cbb_hash;
4278 MonoBasicBlock **prev_cil_offset_to_bb;
4279 MonoBasicBlock *prev_cbb;
4280 unsigned char* prev_cil_start;
4281 guint32 prev_cil_offset_to_bb_len;
4282 MonoMethod *prev_current_method;
4283 MonoGenericContext *prev_generic_context;
4284 gboolean ret_var_set, prev_ret_var_set;
4286 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4288 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4289 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4292 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4293 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4297 if (cfg->verbose_level > 2)
4298 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4300 if (!cmethod->inline_info) {
4301 mono_jit_stats.inlineable_methods++;
4302 cmethod->inline_info = 1;
4304 /* allocate space to store the return value */
4305 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4306 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4309 /* allocate local variables */
4310 cheader = mono_method_get_header (cmethod);
4311 prev_locals = cfg->locals;
4312 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4313 for (i = 0; i < cheader->num_locals; ++i)
4314 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4316 /* allocate start and end blocks */
4317 /* This is needed so if the inline is aborted, we can clean up */
4318 NEW_BBLOCK (cfg, sbblock);
4319 sbblock->real_offset = real_offset;
4321 NEW_BBLOCK (cfg, ebblock);
4322 ebblock->block_num = cfg->num_bblocks++;
4323 ebblock->real_offset = real_offset;
4325 prev_args = cfg->args;
4326 prev_arg_types = cfg->arg_types;
4327 prev_inlined_method = cfg->inlined_method;
4328 cfg->inlined_method = cmethod;
4329 cfg->ret_var_set = FALSE;
4330 prev_real_offset = cfg->real_offset;
4331 prev_cbb_hash = cfg->cbb_hash;
4332 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4333 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4334 prev_cil_start = cfg->cil_start;
4335 prev_cbb = cfg->cbb;
4336 prev_current_method = cfg->current_method;
4337 prev_generic_context = cfg->generic_context;
4338 prev_ret_var_set = cfg->ret_var_set;
4340 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4342 ret_var_set = cfg->ret_var_set;
4344 cfg->inlined_method = prev_inlined_method;
4345 cfg->real_offset = prev_real_offset;
4346 cfg->cbb_hash = prev_cbb_hash;
4347 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4348 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4349 cfg->cil_start = prev_cil_start;
4350 cfg->locals = prev_locals;
4351 cfg->args = prev_args;
4352 cfg->arg_types = prev_arg_types;
4353 cfg->current_method = prev_current_method;
4354 cfg->generic_context = prev_generic_context;
4355 cfg->ret_var_set = prev_ret_var_set;
4357 if ((costs >= 0 && costs < 60) || inline_allways) {
4358 if (cfg->verbose_level > 2)
4359 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4361 mono_jit_stats.inlined_methods++;
4363 /* always add some code to avoid block split failures */
4364 MONO_INST_NEW (cfg, ins, OP_NOP);
4365 MONO_ADD_INS (prev_cbb, ins);
4367 prev_cbb->next_bb = sbblock;
4368 link_bblock (cfg, prev_cbb, sbblock);
4371 * Get rid of the begin and end bblocks if possible to aid local
4374 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4376 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4377 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4379 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4380 MonoBasicBlock *prev = ebblock->in_bb [0];
4381 mono_merge_basic_blocks (cfg, prev, ebblock);
4383 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4384 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4385 cfg->cbb = prev_cbb;
4393 * If the inlined method contains only a throw, then the ret var is not
4394 * set, so set it to a dummy value.
4397 static double r8_0 = 0.0;
4399 switch (rvar->type) {
4401 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4404 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4409 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4412 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4413 ins->type = STACK_R8;
4414 ins->inst_p0 = (void*)&r8_0;
4415 ins->dreg = rvar->dreg;
4416 MONO_ADD_INS (cfg->cbb, ins);
4419 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4422 g_assert_not_reached ();
4426 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4431 if (cfg->verbose_level > 2)
4432 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4433 cfg->exception_type = MONO_EXCEPTION_NONE;
4434 mono_loader_clear_error ();
4436 /* This gets rid of the newly added bblocks */
4437 cfg->cbb = prev_cbb;
4443 * Some of these comments may well be out-of-date.
4444 * Design decisions: we do a single pass over the IL code (and we do bblock
4445 * splitting/merging in the few cases when it's required: a back jump to an IL
4446 * address that was not already seen as bblock starting point).
4447 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4448 * Complex operations are decomposed in simpler ones right away. We need to let the
4449 * arch-specific code peek and poke inside this process somehow (except when the
4450 * optimizations can take advantage of the full semantic info of coarse opcodes).
4451 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4452 * MonoInst->opcode initially is the IL opcode or some simplification of that
4453 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4454 * opcode with value bigger than OP_LAST.
4455 * At this point the IR can be handed over to an interpreter, a dumb code generator
4456 * or to the optimizing code generator that will translate it to SSA form.
4458 * Profiling directed optimizations.
4459 * We may compile by default with few or no optimizations and instrument the code
4460 * or the user may indicate what methods to optimize the most either in a config file
4461 * or through repeated runs where the compiler applies offline the optimizations to
4462 * each method and then decides if it was worth it.
4465 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4466 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4467 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4468 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4469 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4470 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4471 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4472 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4474 /* offset from br.s -> br like opcodes */
4475 #define BIG_BRANCH_OFFSET 13
4478 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4480 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4482 return b == NULL || b == bb;
4486 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4488 unsigned char *ip = start;
4489 unsigned char *target;
4492 MonoBasicBlock *bblock;
4493 const MonoOpcode *opcode;
4496 cli_addr = ip - start;
4497 i = mono_opcode_value ((const guint8 **)&ip, end);
4500 opcode = &mono_opcodes [i];
4501 switch (opcode->argument) {
4502 case MonoInlineNone:
4505 case MonoInlineString:
4506 case MonoInlineType:
4507 case MonoInlineField:
4508 case MonoInlineMethod:
4511 case MonoShortInlineR:
4518 case MonoShortInlineVar:
4519 case MonoShortInlineI:
4522 case MonoShortInlineBrTarget:
4523 target = start + cli_addr + 2 + (signed char)ip [1];
4524 GET_BBLOCK (cfg, bblock, target);
4527 GET_BBLOCK (cfg, bblock, ip);
4529 case MonoInlineBrTarget:
4530 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4531 GET_BBLOCK (cfg, bblock, target);
4534 GET_BBLOCK (cfg, bblock, ip);
4536 case MonoInlineSwitch: {
4537 guint32 n = read32 (ip + 1);
4540 cli_addr += 5 + 4 * n;
4541 target = start + cli_addr;
4542 GET_BBLOCK (cfg, bblock, target);
4544 for (j = 0; j < n; ++j) {
4545 target = start + cli_addr + (gint32)read32 (ip);
4546 GET_BBLOCK (cfg, bblock, target);
4556 g_assert_not_reached ();
4559 if (i == CEE_THROW) {
4560 unsigned char *bb_start = ip - 1;
4562 /* Find the start of the bblock containing the throw */
4564 while ((bb_start >= start) && !bblock) {
4565 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4569 bblock->out_of_line = 1;
4578 static inline MonoMethod *
4579 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4583 if (m->wrapper_type != MONO_WRAPPER_NONE)
4584 return mono_method_get_wrapper_data (m, token);
4586 method = mono_get_method_full (m->klass->image, token, klass, context);
4591 static inline MonoMethod *
4592 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4594 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4596 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4602 static inline MonoClass*
4603 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4607 if (method->wrapper_type != MONO_WRAPPER_NONE)
4608 klass = mono_method_get_wrapper_data (method, token);
4610 klass = mono_class_get_full (method->klass->image, token, context);
4612 mono_class_init (klass);
4617 * Returns TRUE if the JIT should abort inlining because "callee"
4618 * is influenced by security attributes.
4621 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4625 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
4629 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
4630 if (result == MONO_JIT_SECURITY_OK)
4633 if (result == MONO_JIT_LINKDEMAND_ECMA) {
4634 /* Generate code to throw a SecurityException before the actual call/link */
4635 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4638 NEW_ICONST (cfg, args [0], 4);
4639 NEW_METHODCONST (cfg, args [1], caller);
4640 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
4641 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
4642 /* don't hide previous results */
4643 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
4644 cfg->exception_data = result;
4652 throw_exception (void)
4654 static MonoMethod *method = NULL;
4657 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4658 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
4665 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
4667 MonoMethod *thrower = throw_exception ();
4670 EMIT_NEW_PCONST (cfg, args [0], ex);
4671 mono_emit_method_call (cfg, thrower, args, NULL);
4675 * Return the original method is a wrapper is specified. We can only access
4676 * the custom attributes from the original method.
4679 get_original_method (MonoMethod *method)
4681 if (method->wrapper_type == MONO_WRAPPER_NONE)
4684 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
4685 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
4688 /* in other cases we need to find the original method */
4689 return mono_marshal_method_from_wrapper (method);
4693 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
4694 MonoBasicBlock *bblock, unsigned char *ip)
4696 /* there's no restriction to access Transparent or SafeCritical fields, so we only check calls to Critical methods */
4697 if (mono_security_core_clr_class_level (mono_field_get_parent (field)) != MONO_SECURITY_CORE_CLR_CRITICAL)
4700 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4701 caller = get_original_method (caller);
4705 /* caller is Critical! only SafeCritical and Critical callers can access the field, so we throw if caller is Transparent */
4706 if (mono_security_core_clr_method_level (caller, TRUE) == MONO_SECURITY_CORE_CLR_TRANSPARENT)
4707 emit_throw_exception (cfg, mono_get_exception_field_access ());
4711 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4712 MonoBasicBlock *bblock, unsigned char *ip)
4714 /* there's no restriction to call Transparent or SafeCritical code, so we only check calls to Critical methods */
4715 if (mono_security_core_clr_method_level (callee, TRUE) != MONO_SECURITY_CORE_CLR_CRITICAL)
4718 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4719 caller = get_original_method (caller);
4723 /* caller is Critical! only SafeCritical and Critical callers can call it, so we throw if the caller is Transparent */
4724 if (mono_security_core_clr_method_level (caller, TRUE) == MONO_SECURITY_CORE_CLR_TRANSPARENT)
4725 emit_throw_exception (cfg, mono_get_exception_method_access ());
4729 * Check that the IL instructions at ip are the array initialization
4730 * sequence and return the pointer to the data and the size.
4733 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
4736 * newarr[System.Int32]
4738 * ldtoken field valuetype ...
4739 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
4741 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
4742 guint32 token = read32 (ip + 7);
4743 guint32 field_token = read32 (ip + 2);
4744 guint32 field_index = field_token & 0xffffff;
4746 const char *data_ptr;
4748 MonoMethod *cmethod;
4749 MonoClass *dummy_class;
4750 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
4756 *out_field_token = field_token;
4758 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
4761 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
4763 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
4764 case MONO_TYPE_BOOLEAN:
4768 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
4769 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
4770 case MONO_TYPE_CHAR:
4780 return NULL; /* stupid ARM FP swapped format */
4790 if (size > mono_type_size (field->type, &dummy_align))
4793 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
4794 if (!method->klass->image->dynamic) {
4795 field_index = read32 (ip + 2) & 0xffffff;
4796 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
4797 data_ptr = mono_image_rva_map (method->klass->image, rva);
4798 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
4799 /* for aot code we do the lookup on load */
4800 if (aot && data_ptr)
4801 return GUINT_TO_POINTER (rva);
4803 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
4805 data_ptr = mono_field_get_data (field);
4813 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
4815 char *method_fname = mono_method_full_name (method, TRUE);
4818 if (mono_method_get_header (method)->code_size == 0)
4819 method_code = g_strdup ("method body is empty.");
4821 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
4822 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
4823 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
4824 g_free (method_fname);
4825 g_free (method_code);
4829 set_exception_object (MonoCompile *cfg, MonoException *exception)
4831 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
4832 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
4833 cfg->exception_ptr = exception;
4837 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4841 if (cfg->generic_sharing_context)
4842 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
4844 type = &klass->byval_arg;
4845 return MONO_TYPE_IS_REFERENCE (type);
4849 * mono_decompose_array_access_opts:
4851 * Decompose array access opcodes.
4852 * This should be in decompose.c, but it emits calls so it has to stay here until
4853 * the old JIT is gone.
4856 mono_decompose_array_access_opts (MonoCompile *cfg)
4858 MonoBasicBlock *bb, *first_bb;
4861 * Unlike decompose_long_opts, this pass does not alter the CFG of the method so it
4862 * can be executed anytime. It should be run before decompose_long
4866 * Create a dummy bblock and emit code into it so we can use the normal
4867 * code generation macros.
4869 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4870 first_bb = cfg->cbb;
4872 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4874 MonoInst *prev = NULL;
4876 MonoInst *iargs [3];
4879 if (!bb->has_array_access)
4882 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE DECOMPOSE-ARRAY-ACCESS-OPTS ");
4884 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4890 for (ins = bb->code; ins; ins = ins->next) {
4891 switch (ins->opcode) {
4893 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg, ins->sreg1,
4894 G_STRUCT_OFFSET (MonoArray, max_length));
4895 MONO_ADD_INS (cfg->cbb, dest);
4897 case OP_BOUNDS_CHECK:
4898 MONO_ARCH_EMIT_BOUNDS_CHECK (cfg, ins->sreg1, ins->inst_imm, ins->sreg2);
4901 if (cfg->opt & MONO_OPT_SHARED) {
4902 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4903 EMIT_NEW_CLASSCONST (cfg, iargs [1], ins->inst_newa_class);
4904 MONO_INST_NEW (cfg, iargs [2], OP_MOVE);
4905 iargs [2]->dreg = ins->sreg1;
4907 dest = mono_emit_jit_icall (cfg, mono_array_new, iargs);
4908 dest->dreg = ins->dreg;
4910 MonoVTable *vtable = mono_class_vtable (cfg->domain, mono_array_class_get (ins->inst_newa_class, 1));
4911 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (vtable, 1);
4913 g_assert (vtable); /*This shall not fail since we check for this condition on OP_NEWARR creation*/
4914 NEW_VTABLECONST (cfg, iargs [0], vtable);
4915 MONO_ADD_INS (cfg->cbb, iargs [0]);
4916 MONO_INST_NEW (cfg, iargs [1], OP_MOVE);
4917 iargs [1]->dreg = ins->sreg1;
4920 dest = mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4922 dest = mono_emit_jit_icall (cfg, mono_array_new_specific, iargs);
4923 dest->dreg = ins->dreg;
4927 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg,
4928 ins->sreg1, G_STRUCT_OFFSET (MonoString, length));
4929 MONO_ADD_INS (cfg->cbb, dest);
4935 g_assert (cfg->cbb == first_bb);
4937 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
4938 /* Replace the original instruction with the new code sequence */
4940 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
4941 first_bb->code = first_bb->last_ins = NULL;
4942 first_bb->in_count = first_bb->out_count = 0;
4943 cfg->cbb = first_bb;
4950 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER DECOMPOSE-ARRAY-ACCESS-OPTS ");
4960 #ifdef MONO_ARCH_SOFT_FLOAT
4963 * mono_decompose_soft_float:
4965 * Soft float support on ARM. We store each double value in a pair of integer vregs,
4966 * similar to long support on 32 bit platforms. 32 bit float values require special
4967 * handling when used as locals, arguments, and in calls.
4968 * One big problem with soft-float is that there are few r4 test cases in our test suite.
4971 mono_decompose_soft_float (MonoCompile *cfg)
4973 MonoBasicBlock *bb, *first_bb;
4976 * This pass creates long opcodes, so it should be run before decompose_long_opts ().
4980 * Create a dummy bblock and emit code into it so we can use the normal
4981 * code generation macros.
4983 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4984 first_bb = cfg->cbb;
4986 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4988 MonoInst *prev = NULL;
4991 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE HANDLE-SOFT-FLOAT ");
4993 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4999 for (ins = bb->code; ins; ins = ins->next) {
5000 const char *spec = INS_INFO (ins->opcode);
5002 /* Most fp operations are handled automatically by opcode emulation */
5004 switch (ins->opcode) {
5007 d.vald = *(double*)ins->inst_p0;
5008 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
5013 /* We load the r8 value */
5014 d.vald = *(float*)ins->inst_p0;
5015 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
5019 ins->opcode = OP_LMOVE;
5022 ins->opcode = OP_MOVE;
5023 ins->sreg1 = ins->sreg1 + 1;
5026 ins->opcode = OP_MOVE;
5027 ins->sreg1 = ins->sreg1 + 2;
5030 int reg = ins->sreg1;
5032 ins->opcode = OP_SETLRET;
5034 ins->sreg1 = reg + 1;
5035 ins->sreg2 = reg + 2;
5038 case OP_LOADR8_MEMBASE:
5039 ins->opcode = OP_LOADI8_MEMBASE;
5041 case OP_STORER8_MEMBASE_REG:
5042 ins->opcode = OP_STOREI8_MEMBASE_REG;
5044 case OP_STORER4_MEMBASE_REG: {
5045 MonoInst *iargs [2];
5048 /* Arg 1 is the double value */
5049 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
5050 iargs [0]->dreg = ins->sreg1;
5052 /* Arg 2 is the address to store to */
5053 addr_reg = mono_alloc_preg (cfg);
5054 EMIT_NEW_BIALU_IMM (cfg, iargs [1], OP_PADD_IMM, addr_reg, ins->inst_destbasereg, ins->inst_offset);
5055 mono_emit_jit_icall (cfg, mono_fstore_r4, iargs);
5059 case OP_LOADR4_MEMBASE: {
5060 MonoInst *iargs [1];
5064 addr_reg = mono_alloc_preg (cfg);
5065 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, addr_reg, ins->inst_basereg, ins->inst_offset);
5066 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
5067 conv->dreg = ins->dreg;
5072 case OP_FCALL_MEMBASE: {
5073 MonoCallInst *call = (MonoCallInst*)ins;
5074 if (call->signature->ret->type == MONO_TYPE_R4) {
5075 MonoCallInst *call2;
5076 MonoInst *iargs [1];
5079 /* Convert the call into a call returning an int */
5080 MONO_INST_NEW_CALL (cfg, call2, OP_CALL);
5081 memcpy (call2, call, sizeof (MonoCallInst));
5082 switch (ins->opcode) {
5084 call2->inst.opcode = OP_CALL;
5087 call2->inst.opcode = OP_CALL_REG;
5089 case OP_FCALL_MEMBASE:
5090 call2->inst.opcode = OP_CALL_MEMBASE;
5093 g_assert_not_reached ();
5095 call2->inst.dreg = mono_alloc_ireg (cfg);
5096 MONO_ADD_INS (cfg->cbb, (MonoInst*)call2);
5098 /* FIXME: Optimize this */
5100 /* Emit an r4->r8 conversion */
5101 EMIT_NEW_VARLOADA_VREG (cfg, iargs [0], call2->inst.dreg, &mono_defaults.int32_class->byval_arg);
5102 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
5103 conv->dreg = ins->dreg;
5105 switch (ins->opcode) {
5107 ins->opcode = OP_LCALL;
5110 ins->opcode = OP_LCALL_REG;
5112 case OP_FCALL_MEMBASE:
5113 ins->opcode = OP_LCALL_MEMBASE;
5116 g_assert_not_reached ();
5122 MonoJitICallInfo *info;
5123 MonoInst *iargs [2];
5124 MonoInst *call, *cmp, *br;
5126 /* Convert fcompare+fbcc to icall+icompare+beq */
5128 info = mono_find_jit_opcode_emulation (ins->next->opcode);
5131 /* Create dummy MonoInst's for the arguments */
5132 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
5133 iargs [0]->dreg = ins->sreg1;
5134 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
5135 iargs [1]->dreg = ins->sreg2;
5137 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
5139 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE_IMM);
5140 cmp->sreg1 = call->dreg;
5142 MONO_ADD_INS (cfg->cbb, cmp);
5144 MONO_INST_NEW (cfg, br, OP_IBNE_UN);
5145 br->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * 2);
5146 br->inst_true_bb = ins->next->inst_true_bb;
5147 br->inst_false_bb = ins->next->inst_false_bb;
5148 MONO_ADD_INS (cfg->cbb, br);
5150 /* The call sequence might include fp ins */
5153 /* Skip fbcc or fccc */
5154 NULLIFY_INS (ins->next);
5162 MonoJitICallInfo *info;
5163 MonoInst *iargs [2];
5166 /* Convert fccc to icall+icompare+iceq */
5168 info = mono_find_jit_opcode_emulation (ins->opcode);
5171 /* Create dummy MonoInst's for the arguments */
5172 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
5173 iargs [0]->dreg = ins->sreg1;
5174 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
5175 iargs [1]->dreg = ins->sreg2;
5177 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
5179 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, call->dreg, 1);
5180 MONO_EMIT_NEW_UNALU (cfg, OP_ICEQ, ins->dreg, -1);
5182 /* The call sequence might include fp ins */
5187 MonoInst *iargs [2];
5188 MonoInst *call, *cmp;
5190 /* Convert to icall+icompare+cond_exc+move */
5192 /* Create dummy MonoInst's for the arguments */
5193 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
5194 iargs [0]->dreg = ins->sreg1;
5196 call = mono_emit_jit_icall (cfg, mono_isfinite, iargs);
5198 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE_IMM);
5199 cmp->sreg1 = call->dreg;
5201 MONO_ADD_INS (cfg->cbb, cmp);
5203 MONO_EMIT_NEW_COND_EXC (cfg, INE_UN, "ArithmeticException");
5205 /* Do the assignment if the value is finite */
5206 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, ins->dreg, ins->sreg1);
5212 if (spec [MONO_INST_SRC1] == 'f' || spec [MONO_INST_SRC2] == 'f' || spec [MONO_INST_DEST] == 'f') {
5213 mono_print_ins (ins);
5214 g_assert_not_reached ();
5219 g_assert (cfg->cbb == first_bb);
5221 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
5222 /* Replace the original instruction with the new code sequence */
5224 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
5225 first_bb->code = first_bb->last_ins = NULL;
5226 first_bb->in_count = first_bb->out_count = 0;
5227 cfg->cbb = first_bb;
5234 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER HANDLE-SOFT-FLOAT ");
5237 mono_decompose_long_opts (cfg);
5243 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5246 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5247 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5248 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5249 /* Optimize reg-reg moves away */
5251 * Can't optimize other opcodes, since sp[0] might point to
5252 * the last ins of a decomposed opcode.
5254 sp [0]->dreg = (cfg)->locals [n]->dreg;
5256 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5261 * ldloca inhibits many optimizations so try to get rid of it in common
5264 static inline unsigned char *
5265 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5274 local = read16 (ip + 2);
5278 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5279 gboolean skip = FALSE;
5281 /* From the INITOBJ case */
5282 token = read32 (ip + 2);
5283 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5284 CHECK_TYPELOAD (klass);
5285 if (generic_class_is_reference_type (cfg, klass)) {
5286 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5287 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5288 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5289 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5290 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5303 is_exception_class (MonoClass *class)
5306 if (class == mono_defaults.exception_class)
5308 class = class->parent;
5314 * mono_method_to_ir:
5316 * Translate the .net IL into linear IR.
5319 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5320 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5321 guint inline_offset, gboolean is_virtual_call)
5323 MonoInst *ins, **sp, **stack_start;
5324 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5325 MonoMethod *cmethod, *method_definition;
5326 MonoInst **arg_array;
5327 MonoMethodHeader *header;
5329 guint32 token, ins_flag;
5331 MonoClass *constrained_call = NULL;
5332 unsigned char *ip, *end, *target, *err_pos;
5333 static double r8_0 = 0.0;
5334 MonoMethodSignature *sig;
5335 MonoGenericContext *generic_context = NULL;
5336 MonoGenericContainer *generic_container = NULL;
5337 MonoType **param_types;
5338 int i, n, start_new_bblock, dreg;
5339 int num_calls = 0, inline_costs = 0;
5340 int breakpoint_id = 0;
5342 MonoBoolean security, pinvoke;
5343 MonoSecurityManager* secman = NULL;
5344 MonoDeclSecurityActions actions;
5345 GSList *class_inits = NULL;
5346 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5348 gboolean init_locals;
5350 /* serialization and xdomain stuff may need access to private fields and methods */
5351 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5352 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5353 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5354 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5355 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5356 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5358 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5360 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5361 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5362 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5363 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5365 image = method->klass->image;
5366 header = mono_method_get_header (method);
5367 generic_container = mono_method_get_generic_container (method);
5368 sig = mono_method_signature (method);
5369 num_args = sig->hasthis + sig->param_count;
5370 ip = (unsigned char*)header->code;
5371 cfg->cil_start = ip;
5372 end = ip + header->code_size;
5373 mono_jit_stats.cil_code_size += header->code_size;
5374 init_locals = header->init_locals;
5377 * Methods without init_locals set could cause asserts in various passes
5382 method_definition = method;
5383 while (method_definition->is_inflated) {
5384 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5385 method_definition = imethod->declaring;
5388 /* SkipVerification is not allowed if core-clr is enabled */
5389 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5391 dont_verify_stloc = TRUE;
5394 if (!dont_verify && mini_method_verify (cfg, method_definition))
5395 goto exception_exit;
5397 if (mono_debug_using_mono_debugger ())
5398 cfg->keep_cil_nops = TRUE;
5400 if (sig->is_inflated)
5401 generic_context = mono_method_get_context (method);
5402 else if (generic_container)
5403 generic_context = &generic_container->context;
5404 cfg->generic_context = generic_context;
5406 if (!cfg->generic_sharing_context)
5407 g_assert (!sig->has_type_parameters);
5409 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5410 g_assert (method->is_inflated);
5411 g_assert (mono_method_get_context (method)->method_inst);
5413 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5414 g_assert (sig->generic_param_count);
5416 if (cfg->method == method) {
5417 cfg->real_offset = 0;
5419 cfg->real_offset = inline_offset;
5422 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5423 cfg->cil_offset_to_bb_len = header->code_size;
5425 cfg->current_method = method;
5427 if (cfg->verbose_level > 2)
5428 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5430 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5432 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5433 for (n = 0; n < sig->param_count; ++n)
5434 param_types [n + sig->hasthis] = sig->params [n];
5435 cfg->arg_types = param_types;
5437 dont_inline = g_list_prepend (dont_inline, method);
5438 if (cfg->method == method) {
5440 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5441 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5444 NEW_BBLOCK (cfg, start_bblock);
5445 cfg->bb_entry = start_bblock;
5446 start_bblock->cil_code = NULL;
5447 start_bblock->cil_length = 0;
5450 NEW_BBLOCK (cfg, end_bblock);
5451 cfg->bb_exit = end_bblock;
5452 end_bblock->cil_code = NULL;
5453 end_bblock->cil_length = 0;
5454 g_assert (cfg->num_bblocks == 2);
5456 arg_array = cfg->args;
5458 if (header->num_clauses) {
5459 cfg->spvars = g_hash_table_new (NULL, NULL);
5460 cfg->exvars = g_hash_table_new (NULL, NULL);
5462 /* handle exception clauses */
5463 for (i = 0; i < header->num_clauses; ++i) {
5464 MonoBasicBlock *try_bb;
5465 MonoExceptionClause *clause = &header->clauses [i];
5466 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5467 try_bb->real_offset = clause->try_offset;
5468 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5469 tblock->real_offset = clause->handler_offset;
5470 tblock->flags |= BB_EXCEPTION_HANDLER;
5472 link_bblock (cfg, try_bb, tblock);
5474 if (*(ip + clause->handler_offset) == CEE_POP)
5475 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5477 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5478 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5479 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5480 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5481 MONO_ADD_INS (tblock, ins);
5483 /* todo: is a fault block unsafe to optimize? */
5484 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5485 tblock->flags |= BB_EXCEPTION_UNSAFE;
5489 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5491 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5493 /* catch and filter blocks get the exception object on the stack */
5494 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5495 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5496 MonoInst *dummy_use;
5498 /* mostly like handle_stack_args (), but just sets the input args */
5499 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5500 tblock->in_scount = 1;
5501 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5502 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5505 * Add a dummy use for the exvar so its liveness info will be
5509 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5511 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5512 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5513 tblock->flags |= BB_EXCEPTION_HANDLER;
5514 tblock->real_offset = clause->data.filter_offset;
5515 tblock->in_scount = 1;
5516 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5517 /* The filter block shares the exvar with the handler block */
5518 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5519 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5520 MONO_ADD_INS (tblock, ins);
5524 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5525 clause->data.catch_class &&
5526 cfg->generic_sharing_context &&
5527 mono_class_check_context_used (clause->data.catch_class)) {
5529 * In shared generic code with catch
5530 * clauses containing type variables
5531 * the exception handling code has to
5532 * be able to get to the rgctx.
5533 * Therefore we have to make sure that
5534 * the vtable/mrgctx argument (for
5535 * static or generic methods) or the
5536 * "this" argument (for non-static
5537 * methods) are live.
5539 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5540 mini_method_get_context (method)->method_inst ||
5541 method->klass->valuetype) {
5542 mono_get_vtable_var (cfg);
5544 MonoInst *dummy_use;
5546 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5551 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5552 cfg->cbb = start_bblock;
5553 cfg->args = arg_array;
5554 mono_save_args (cfg, sig, inline_args);
5557 /* FIRST CODE BLOCK */
5558 NEW_BBLOCK (cfg, bblock);
5559 bblock->cil_code = ip;
5563 ADD_BBLOCK (cfg, bblock);
5565 if (cfg->method == method) {
5566 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5567 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5568 MONO_INST_NEW (cfg, ins, OP_BREAK);
5569 MONO_ADD_INS (bblock, ins);
5573 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5574 secman = mono_security_manager_get_methods ();
5576 security = (secman && mono_method_has_declsec (method));
5577 /* at this point having security doesn't mean we have any code to generate */
5578 if (security && (cfg->method == method)) {
5579 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5580 * And we do not want to enter the next section (with allocation) if we
5581 * have nothing to generate */
5582 security = mono_declsec_get_demands (method, &actions);
5585 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5586 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5588 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5589 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5590 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5592 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5593 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5597 mono_custom_attrs_free (custom);
5600 custom = mono_custom_attrs_from_class (wrapped->klass);
5601 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5605 mono_custom_attrs_free (custom);
5608 /* not a P/Invoke after all */
5613 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5614 /* we use a separate basic block for the initialization code */
5615 NEW_BBLOCK (cfg, init_localsbb);
5616 cfg->bb_init = init_localsbb;
5617 init_localsbb->real_offset = cfg->real_offset;
5618 start_bblock->next_bb = init_localsbb;
5619 init_localsbb->next_bb = bblock;
5620 link_bblock (cfg, start_bblock, init_localsbb);
5621 link_bblock (cfg, init_localsbb, bblock);
5623 cfg->cbb = init_localsbb;
5625 start_bblock->next_bb = bblock;
5626 link_bblock (cfg, start_bblock, bblock);
5629 /* at this point we know, if security is TRUE, that some code needs to be generated */
5630 if (security && (cfg->method == method)) {
5633 mono_jit_stats.cas_demand_generation++;
5635 if (actions.demand.blob) {
5636 /* Add code for SecurityAction.Demand */
5637 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5638 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5639 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5640 mono_emit_method_call (cfg, secman->demand, args, NULL);
5642 if (actions.noncasdemand.blob) {
5643 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5644 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5645 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5646 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5647 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5648 mono_emit_method_call (cfg, secman->demand, args, NULL);
5650 if (actions.demandchoice.blob) {
5651 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5652 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5653 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5654 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5655 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5659 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5661 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5664 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5665 /* check if this is native code, e.g. an icall or a p/invoke */
5666 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5667 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5669 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
5670 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
5672 /* if this ia a native call then it can only be JITted from platform code */
5673 if ((icall || pinvk) && method->klass && method->klass->image) {
5674 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
5675 MonoException *ex = icall ? mono_get_exception_security () :
5676 mono_get_exception_method_access ();
5677 emit_throw_exception (cfg, ex);
5684 if (header->code_size == 0)
5687 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5692 if (cfg->method == method)
5693 mono_debug_init_method (cfg, bblock, breakpoint_id);
5695 for (n = 0; n < header->num_locals; ++n) {
5696 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5701 /* We force the vtable variable here for all shared methods
5702 for the possibility that they might show up in a stack
5703 trace where their exact instantiation is needed. */
5704 if (cfg->generic_sharing_context && method == cfg->method) {
5705 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5706 mini_method_get_context (method)->method_inst ||
5707 method->klass->valuetype) {
5708 mono_get_vtable_var (cfg);
5710 /* FIXME: Is there a better way to do this?
5711 We need the variable live for the duration
5712 of the whole method. */
5713 cfg->args [0]->flags |= MONO_INST_INDIRECT;
5717 /* add a check for this != NULL to inlined methods */
5718 if (is_virtual_call) {
5721 NEW_ARGLOAD (cfg, arg_ins, 0);
5722 MONO_ADD_INS (cfg->cbb, arg_ins);
5723 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
5724 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, arg_ins->dreg);
5725 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, arg_ins->dreg);
5728 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5729 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5732 start_new_bblock = 0;
5736 if (cfg->method == method)
5737 cfg->real_offset = ip - header->code;
5739 cfg->real_offset = inline_offset;
5744 if (start_new_bblock) {
5745 bblock->cil_length = ip - bblock->cil_code;
5746 if (start_new_bblock == 2) {
5747 g_assert (ip == tblock->cil_code);
5749 GET_BBLOCK (cfg, tblock, ip);
5751 bblock->next_bb = tblock;
5754 start_new_bblock = 0;
5755 for (i = 0; i < bblock->in_scount; ++i) {
5756 if (cfg->verbose_level > 3)
5757 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5758 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5762 g_slist_free (class_inits);
5765 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5766 link_bblock (cfg, bblock, tblock);
5767 if (sp != stack_start) {
5768 handle_stack_args (cfg, stack_start, sp - stack_start);
5770 CHECK_UNVERIFIABLE (cfg);
5772 bblock->next_bb = tblock;
5775 for (i = 0; i < bblock->in_scount; ++i) {
5776 if (cfg->verbose_level > 3)
5777 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5778 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5781 g_slist_free (class_inits);
5786 bblock->real_offset = cfg->real_offset;
5788 if ((cfg->method == method) && cfg->coverage_info) {
5789 guint32 cil_offset = ip - header->code;
5790 cfg->coverage_info->data [cil_offset].cil_code = ip;
5792 /* TODO: Use an increment here */
5793 #if defined(TARGET_X86)
5794 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5795 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5797 MONO_ADD_INS (cfg->cbb, ins);
5799 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5800 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5804 if (cfg->verbose_level > 3)
5805 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5809 if (cfg->keep_cil_nops)
5810 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
5812 MONO_INST_NEW (cfg, ins, OP_NOP);
5814 MONO_ADD_INS (bblock, ins);
5817 MONO_INST_NEW (cfg, ins, OP_BREAK);
5819 MONO_ADD_INS (bblock, ins);
5825 CHECK_STACK_OVF (1);
5826 n = (*ip)-CEE_LDARG_0;
5828 EMIT_NEW_ARGLOAD (cfg, ins, n);
5836 CHECK_STACK_OVF (1);
5837 n = (*ip)-CEE_LDLOC_0;
5839 EMIT_NEW_LOCLOAD (cfg, ins, n);
5848 n = (*ip)-CEE_STLOC_0;
5851 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5853 emit_stloc_ir (cfg, sp, header, n);
5860 CHECK_STACK_OVF (1);
5863 EMIT_NEW_ARGLOAD (cfg, ins, n);
5869 CHECK_STACK_OVF (1);
5872 NEW_ARGLOADA (cfg, ins, n);
5873 MONO_ADD_INS (cfg->cbb, ins);
5883 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5885 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5890 CHECK_STACK_OVF (1);
5893 EMIT_NEW_LOCLOAD (cfg, ins, n);
5897 case CEE_LDLOCA_S: {
5898 unsigned char *tmp_ip;
5900 CHECK_STACK_OVF (1);
5901 CHECK_LOCAL (ip [1]);
5903 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
5909 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5918 CHECK_LOCAL (ip [1]);
5919 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5921 emit_stloc_ir (cfg, sp, header, ip [1]);
5926 CHECK_STACK_OVF (1);
5927 EMIT_NEW_PCONST (cfg, ins, NULL);
5928 ins->type = STACK_OBJ;
5933 CHECK_STACK_OVF (1);
5934 EMIT_NEW_ICONST (cfg, ins, -1);
5947 CHECK_STACK_OVF (1);
5948 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
5954 CHECK_STACK_OVF (1);
5956 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
5962 CHECK_STACK_OVF (1);
5963 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
5969 CHECK_STACK_OVF (1);
5970 MONO_INST_NEW (cfg, ins, OP_I8CONST);
5971 ins->type = STACK_I8;
5972 ins->dreg = alloc_dreg (cfg, STACK_I8);
5974 ins->inst_l = (gint64)read64 (ip);
5975 MONO_ADD_INS (bblock, ins);
5981 gboolean use_aotconst = FALSE;
5983 #ifdef TARGET_POWERPC
5984 /* FIXME: Clean this up */
5985 if (cfg->compile_aot)
5986 use_aotconst = TRUE;
5989 /* FIXME: we should really allocate this only late in the compilation process */
5990 f = mono_domain_alloc (cfg->domain, sizeof (float));
5992 CHECK_STACK_OVF (1);
5998 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
6000 dreg = alloc_freg (cfg);
6001 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
6002 ins->type = STACK_R8;
6004 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6005 ins->type = STACK_R8;
6006 ins->dreg = alloc_dreg (cfg, STACK_R8);
6008 MONO_ADD_INS (bblock, ins);
6018 gboolean use_aotconst = FALSE;
6020 #ifdef TARGET_POWERPC
6021 /* FIXME: Clean this up */
6022 if (cfg->compile_aot)
6023 use_aotconst = TRUE;
6026 /* FIXME: we should really allocate this only late in the compilation process */
6027 d = mono_domain_alloc (cfg->domain, sizeof (double));
6029 CHECK_STACK_OVF (1);
6035 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
6037 dreg = alloc_freg (cfg);
6038 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
6039 ins->type = STACK_R8;
6041 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6042 ins->type = STACK_R8;
6043 ins->dreg = alloc_dreg (cfg, STACK_R8);
6045 MONO_ADD_INS (bblock, ins);
6054 MonoInst *temp, *store;
6056 CHECK_STACK_OVF (1);
6060 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
6061 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
6063 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6066 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6079 if (sp [0]->type == STACK_R8)
6080 /* we need to pop the value from the x86 FP stack */
6081 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
6090 if (stack_start != sp)
6092 token = read32 (ip + 1);
6093 /* FIXME: check the signature matches */
6094 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6099 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
6100 GENERIC_SHARING_FAILURE (CEE_JMP);
6102 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
6103 CHECK_CFG_EXCEPTION;
6105 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6107 MonoMethodSignature *fsig = mono_method_signature (cmethod);
6110 /* Handle tail calls similarly to calls */
6111 n = fsig->param_count + fsig->hasthis;
6113 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
6114 call->method = cmethod;
6115 call->tail_call = TRUE;
6116 call->signature = mono_method_signature (cmethod);
6117 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
6118 call->inst.inst_p0 = cmethod;
6119 for (i = 0; i < n; ++i)
6120 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
6122 mono_arch_emit_call (cfg, call);
6123 MONO_ADD_INS (bblock, (MonoInst*)call);
6126 for (i = 0; i < num_args; ++i)
6127 /* Prevent arguments from being optimized away */
6128 arg_array [i]->flags |= MONO_INST_VOLATILE;
6130 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6131 ins = (MonoInst*)call;
6132 ins->inst_p0 = cmethod;
6133 MONO_ADD_INS (bblock, ins);
6137 start_new_bblock = 1;
6142 case CEE_CALLVIRT: {
6143 MonoInst *addr = NULL;
6144 MonoMethodSignature *fsig = NULL;
6146 int virtual = *ip == CEE_CALLVIRT;
6147 int calli = *ip == CEE_CALLI;
6148 gboolean pass_imt_from_rgctx = FALSE;
6149 MonoInst *imt_arg = NULL;
6150 gboolean pass_vtable = FALSE;
6151 gboolean pass_mrgctx = FALSE;
6152 MonoInst *vtable_arg = NULL;
6153 gboolean check_this = FALSE;
6154 gboolean supported_tail_call = FALSE;
6157 token = read32 (ip + 1);
6164 if (method->wrapper_type != MONO_WRAPPER_NONE)
6165 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6167 fsig = mono_metadata_parse_signature (image, token);
6169 n = fsig->param_count + fsig->hasthis;
6171 MonoMethod *cil_method;
6173 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6174 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6175 cil_method = cmethod;
6176 } else if (constrained_call) {
6177 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6179 * This is needed since get_method_constrained can't find
6180 * the method in klass representing a type var.
6181 * The type var is guaranteed to be a reference type in this
6184 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6185 cil_method = cmethod;
6186 g_assert (!cmethod->klass->valuetype);
6188 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6191 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6192 cil_method = cmethod;
6197 if (!dont_verify && !cfg->skip_visibility) {
6198 MonoMethod *target_method = cil_method;
6199 if (method->is_inflated) {
6200 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6202 if (!mono_method_can_access_method (method_definition, target_method) &&
6203 !mono_method_can_access_method (method, cil_method))
6204 METHOD_ACCESS_FAILURE;
6207 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6208 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6210 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6211 /* MS.NET seems to silently convert this to a callvirt */
6214 if (!cmethod->klass->inited)
6215 if (!mono_class_init (cmethod->klass))
6218 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6219 mini_class_is_system_array (cmethod->klass)) {
6220 array_rank = cmethod->klass->rank;
6221 fsig = mono_method_signature (cmethod);
6223 if (mono_method_signature (cmethod)->pinvoke) {
6224 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6225 check_for_pending_exc, FALSE);
6226 fsig = mono_method_signature (wrapper);
6227 } else if (constrained_call) {
6228 fsig = mono_method_signature (cmethod);
6230 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6234 mono_save_token_info (cfg, image, token, cil_method);
6236 n = fsig->param_count + fsig->hasthis;
6238 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6239 if (check_linkdemand (cfg, method, cmethod))
6241 CHECK_CFG_EXCEPTION;
6244 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6245 g_assert_not_reached ();
6248 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6251 if (!cfg->generic_sharing_context && cmethod)
6252 g_assert (!mono_method_check_context_used (cmethod));
6256 //g_assert (!virtual || fsig->hasthis);
6260 if (constrained_call) {
6262 * We have the `constrained.' prefix opcode.
6264 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6266 * The type parameter is instantiated as a valuetype,
6267 * but that type doesn't override the method we're
6268 * calling, so we need to box `this'.
6270 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
6271 ins->klass = constrained_call;
6272 sp [0] = handle_box (cfg, ins, constrained_call);
6273 CHECK_CFG_EXCEPTION;
6274 } else if (!constrained_call->valuetype) {
6275 int dreg = alloc_preg (cfg);
6278 * The type parameter is instantiated as a reference
6279 * type. We have a managed pointer on the stack, so
6280 * we need to dereference it here.
6282 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6283 ins->type = STACK_OBJ;
6285 } else if (cmethod->klass->valuetype)
6287 constrained_call = NULL;
6290 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6294 * If the callee is a shared method, then its static cctor
6295 * might not get called after the call was patched.
6297 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6298 emit_generic_class_init (cfg, cmethod->klass);
6299 CHECK_TYPELOAD (cmethod->klass);
6302 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6303 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6304 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6305 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6306 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6309 * Pass vtable iff target method might
6310 * be shared, which means that sharing
6311 * is enabled for its class and its
6312 * context is sharable (and it's not a
6315 if (sharing_enabled && context_sharable &&
6316 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6320 if (cmethod && mini_method_get_context (cmethod) &&
6321 mini_method_get_context (cmethod)->method_inst) {
6322 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6323 MonoGenericContext *context = mini_method_get_context (cmethod);
6324 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6326 g_assert (!pass_vtable);
6328 if (sharing_enabled && context_sharable)
6332 if (cfg->generic_sharing_context && cmethod) {
6333 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6335 context_used = mono_method_check_context_used (cmethod);
6337 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6338 /* Generic method interface
6339 calls are resolved via a
6340 helper function and don't
6342 if (!cmethod_context || !cmethod_context->method_inst)
6343 pass_imt_from_rgctx = TRUE;
6347 * If a shared method calls another
6348 * shared method then the caller must
6349 * have a generic sharing context
6350 * because the magic trampoline
6351 * requires it. FIXME: We shouldn't
6352 * have to force the vtable/mrgctx
6353 * variable here. Instead there
6354 * should be a flag in the cfg to
6355 * request a generic sharing context.
6358 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6359 mono_get_vtable_var (cfg);
6364 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6366 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6368 CHECK_TYPELOAD (cmethod->klass);
6369 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6374 g_assert (!vtable_arg);
6377 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6379 EMIT_NEW_METHOD_RGCTX_CONST (cfg, vtable_arg, cmethod);
6382 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6383 MONO_METHOD_IS_FINAL (cmethod)) {
6390 if (pass_imt_from_rgctx) {
6391 g_assert (!pass_vtable);
6394 imt_arg = emit_get_rgctx_method (cfg, context_used,
6395 cmethod, MONO_RGCTX_INFO_METHOD);
6401 MONO_INST_NEW (cfg, check, OP_CHECK_THIS);
6402 check->sreg1 = sp [0]->dreg;
6403 MONO_ADD_INS (cfg->cbb, check);
6406 /* Calling virtual generic methods */
6407 if (cmethod && virtual &&
6408 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6409 !(MONO_METHOD_IS_FINAL (cmethod) &&
6410 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6411 mono_method_signature (cmethod)->generic_param_count) {
6412 MonoInst *this_temp, *this_arg_temp, *store;
6413 MonoInst *iargs [4];
6415 g_assert (mono_method_signature (cmethod)->is_inflated);
6417 /* Prevent inlining of methods that contain indirect calls */
6420 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && !defined(ENABLE_LLVM)
6421 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt) {
6422 g_assert (!imt_arg);
6424 imt_arg = emit_get_rgctx_method (cfg, context_used,
6425 cmethod, MONO_RGCTX_INFO_METHOD);
6428 g_assert (cmethod->is_inflated);
6429 EMIT_NEW_METHODCONST (cfg, imt_arg, cmethod);
6431 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6435 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6436 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6437 MONO_ADD_INS (bblock, store);
6439 /* FIXME: This should be a managed pointer */
6440 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6442 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6444 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6445 cmethod, MONO_RGCTX_INFO_METHOD);
6446 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6447 addr = mono_emit_jit_icall (cfg,
6448 mono_helper_compile_generic_method, iargs);
6450 EMIT_NEW_METHODCONST (cfg, iargs [1], cmethod);
6451 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6452 addr = mono_emit_jit_icall (cfg, mono_helper_compile_generic_method, iargs);
6455 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6457 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6460 if (!MONO_TYPE_IS_VOID (fsig->ret))
6461 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6468 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6469 supported_tail_call = cmethod && MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
6471 supported_tail_call = cmethod && mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6475 /* FIXME: runtime generic context pointer for jumps? */
6476 /* FIXME: handle this for generic sharing eventually */
6477 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) && supported_tail_call) {
6480 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6483 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6484 /* Handle tail calls similarly to calls */
6485 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE);
6487 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6488 call->tail_call = TRUE;
6489 call->method = cmethod;
6490 call->signature = mono_method_signature (cmethod);
6493 * We implement tail calls by storing the actual arguments into the
6494 * argument variables, then emitting a CEE_JMP.
6496 for (i = 0; i < n; ++i) {
6497 /* Prevent argument from being register allocated */
6498 arg_array [i]->flags |= MONO_INST_VOLATILE;
6499 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6503 ins = (MonoInst*)call;
6504 ins->inst_p0 = cmethod;
6505 ins->inst_p1 = arg_array [0];
6506 MONO_ADD_INS (bblock, ins);
6507 link_bblock (cfg, bblock, end_bblock);
6508 start_new_bblock = 1;
6509 /* skip CEE_RET as well */
6515 /* Conversion to a JIT intrinsic */
6516 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6517 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6518 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6529 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6530 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6531 mono_method_check_inlining (cfg, cmethod) &&
6532 !g_list_find (dont_inline, cmethod)) {
6534 gboolean allways = FALSE;
6536 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6537 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6538 /* Prevent inlining of methods that call wrappers */
6540 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6544 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6546 cfg->real_offset += 5;
6549 if (!MONO_TYPE_IS_VOID (fsig->ret))
6550 /* *sp is already set by inline_method */
6553 inline_costs += costs;
6559 inline_costs += 10 * num_calls++;
6561 /* Tail recursion elimination */
6562 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6563 gboolean has_vtargs = FALSE;
6566 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6569 /* keep it simple */
6570 for (i = fsig->param_count - 1; i >= 0; i--) {
6571 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6576 for (i = 0; i < n; ++i)
6577 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6578 MONO_INST_NEW (cfg, ins, OP_BR);
6579 MONO_ADD_INS (bblock, ins);
6580 tblock = start_bblock->out_bb [0];
6581 link_bblock (cfg, bblock, tblock);
6582 ins->inst_target_bb = tblock;
6583 start_new_bblock = 1;
6585 /* skip the CEE_RET, too */
6586 if (ip_in_bb (cfg, bblock, ip + 5))
6596 /* Generic sharing */
6597 /* FIXME: only do this for generic methods if
6598 they are not shared! */
6599 if (context_used && !imt_arg && !array_rank &&
6600 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6601 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6602 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6603 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6606 g_assert (cfg->generic_sharing_context && cmethod);
6610 * We are compiling a call to a
6611 * generic method from shared code,
6612 * which means that we have to look up
6613 * the method in the rgctx and do an
6616 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6619 /* Indirect calls */
6621 g_assert (!imt_arg);
6623 if (*ip == CEE_CALL)
6624 g_assert (context_used);
6625 else if (*ip == CEE_CALLI)
6626 g_assert (!vtable_arg);
6628 /* FIXME: what the hell is this??? */
6629 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6630 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6632 /* Prevent inlining of methods with indirect calls */
6636 #ifdef MONO_ARCH_RGCTX_REG
6638 int rgctx_reg = mono_alloc_preg (cfg);
6640 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6641 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6642 call = (MonoCallInst*)ins;
6643 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
6644 cfg->uses_rgctx_reg = TRUE;
6645 call->rgctx_reg = TRUE;
6650 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6652 * Instead of emitting an indirect call, emit a direct call
6653 * with the contents of the aotconst as the patch info.
6655 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
6657 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6658 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
6661 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6664 if (!MONO_TYPE_IS_VOID (fsig->ret))
6665 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6676 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6677 if (sp [fsig->param_count]->type == STACK_OBJ) {
6678 MonoInst *iargs [2];
6681 iargs [1] = sp [fsig->param_count];
6683 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6686 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6687 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6688 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6689 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6691 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6694 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6695 if (!cmethod->klass->element_class->valuetype && !readonly)
6696 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6697 CHECK_TYPELOAD (cmethod->klass);
6700 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6703 g_assert_not_reached ();
6711 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6713 if (!MONO_TYPE_IS_VOID (fsig->ret))
6714 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6724 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6726 } else if (imt_arg) {
6727 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6729 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6732 if (!MONO_TYPE_IS_VOID (fsig->ret))
6733 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6740 if (cfg->method != method) {
6741 /* return from inlined method */
6743 * If in_count == 0, that means the ret is unreachable due to
6744 * being preceeded by a throw. In that case, inline_method () will
6745 * handle setting the return value
6746 * (test case: test_0_inline_throw ()).
6748 if (return_var && cfg->cbb->in_count) {
6752 //g_assert (returnvar != -1);
6753 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6754 cfg->ret_var_set = TRUE;
6758 MonoType *ret_type = mono_method_signature (method)->ret;
6760 g_assert (!return_var);
6763 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6766 if (!cfg->vret_addr) {
6769 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6771 EMIT_NEW_RETLOADA (cfg, ret_addr);
6773 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6774 ins->klass = mono_class_from_mono_type (ret_type);
6777 #ifdef MONO_ARCH_SOFT_FLOAT
6778 if (!ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6779 MonoInst *iargs [1];
6783 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6784 mono_arch_emit_setret (cfg, method, conv);
6786 mono_arch_emit_setret (cfg, method, *sp);
6789 mono_arch_emit_setret (cfg, method, *sp);
6794 if (sp != stack_start)
6796 MONO_INST_NEW (cfg, ins, OP_BR);
6798 ins->inst_target_bb = end_bblock;
6799 MONO_ADD_INS (bblock, ins);
6800 link_bblock (cfg, bblock, end_bblock);
6801 start_new_bblock = 1;
6805 MONO_INST_NEW (cfg, ins, OP_BR);
6807 target = ip + 1 + (signed char)(*ip);
6809 GET_BBLOCK (cfg, tblock, target);
6810 link_bblock (cfg, bblock, tblock);
6811 ins->inst_target_bb = tblock;
6812 if (sp != stack_start) {
6813 handle_stack_args (cfg, stack_start, sp - stack_start);
6815 CHECK_UNVERIFIABLE (cfg);
6817 MONO_ADD_INS (bblock, ins);
6818 start_new_bblock = 1;
6819 inline_costs += BRANCH_COST;
6833 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6835 target = ip + 1 + *(signed char*)ip;
6841 inline_costs += BRANCH_COST;
6845 MONO_INST_NEW (cfg, ins, OP_BR);
6848 target = ip + 4 + (gint32)read32(ip);
6850 GET_BBLOCK (cfg, tblock, target);
6851 link_bblock (cfg, bblock, tblock);
6852 ins->inst_target_bb = tblock;
6853 if (sp != stack_start) {
6854 handle_stack_args (cfg, stack_start, sp - stack_start);
6856 CHECK_UNVERIFIABLE (cfg);
6859 MONO_ADD_INS (bblock, ins);
6861 start_new_bblock = 1;
6862 inline_costs += BRANCH_COST;
6869 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6870 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6871 guint32 opsize = is_short ? 1 : 4;
6873 CHECK_OPSIZE (opsize);
6875 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6878 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6883 GET_BBLOCK (cfg, tblock, target);
6884 link_bblock (cfg, bblock, tblock);
6885 GET_BBLOCK (cfg, tblock, ip);
6886 link_bblock (cfg, bblock, tblock);
6888 if (sp != stack_start) {
6889 handle_stack_args (cfg, stack_start, sp - stack_start);
6890 CHECK_UNVERIFIABLE (cfg);
6893 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6894 cmp->sreg1 = sp [0]->dreg;
6895 type_from_op (cmp, sp [0], NULL);
6898 #if SIZEOF_REGISTER == 4
6899 if (cmp->opcode == OP_LCOMPARE_IMM) {
6900 /* Convert it to OP_LCOMPARE */
6901 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6902 ins->type = STACK_I8;
6903 ins->dreg = alloc_dreg (cfg, STACK_I8);
6905 MONO_ADD_INS (bblock, ins);
6906 cmp->opcode = OP_LCOMPARE;
6907 cmp->sreg2 = ins->dreg;
6910 MONO_ADD_INS (bblock, cmp);
6912 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
6913 type_from_op (ins, sp [0], NULL);
6914 MONO_ADD_INS (bblock, ins);
6915 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
6916 GET_BBLOCK (cfg, tblock, target);
6917 ins->inst_true_bb = tblock;
6918 GET_BBLOCK (cfg, tblock, ip);
6919 ins->inst_false_bb = tblock;
6920 start_new_bblock = 2;
6923 inline_costs += BRANCH_COST;
6938 MONO_INST_NEW (cfg, ins, *ip);
6940 target = ip + 4 + (gint32)read32(ip);
6946 inline_costs += BRANCH_COST;
6950 MonoBasicBlock **targets;
6951 MonoBasicBlock *default_bblock;
6952 MonoJumpInfoBBTable *table;
6953 int offset_reg = alloc_preg (cfg);
6954 int target_reg = alloc_preg (cfg);
6955 int table_reg = alloc_preg (cfg);
6956 int sum_reg = alloc_preg (cfg);
6957 gboolean use_op_switch;
6961 n = read32 (ip + 1);
6964 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
6968 CHECK_OPSIZE (n * sizeof (guint32));
6969 target = ip + n * sizeof (guint32);
6971 GET_BBLOCK (cfg, default_bblock, target);
6973 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
6974 for (i = 0; i < n; ++i) {
6975 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
6976 targets [i] = tblock;
6980 if (sp != stack_start) {
6982 * Link the current bb with the targets as well, so handle_stack_args
6983 * will set their in_stack correctly.
6985 link_bblock (cfg, bblock, default_bblock);
6986 for (i = 0; i < n; ++i)
6987 link_bblock (cfg, bblock, targets [i]);
6989 handle_stack_args (cfg, stack_start, sp - stack_start);
6991 CHECK_UNVERIFIABLE (cfg);
6994 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
6995 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
6998 for (i = 0; i < n; ++i)
6999 link_bblock (cfg, bblock, targets [i]);
7001 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
7002 table->table = targets;
7003 table->table_size = n;
7005 use_op_switch = FALSE;
7007 /* ARM implements SWITCH statements differently */
7008 /* FIXME: Make it use the generic implementation */
7009 if (!cfg->compile_aot)
7010 use_op_switch = TRUE;
7013 if (COMPILE_LLVM (cfg))
7014 use_op_switch = TRUE;
7016 cfg->cbb->has_jump_table = 1;
7018 if (use_op_switch) {
7019 MONO_INST_NEW (cfg, ins, OP_SWITCH);
7020 ins->sreg1 = src1->dreg;
7021 ins->inst_p0 = table;
7022 ins->inst_many_bb = targets;
7023 ins->klass = GUINT_TO_POINTER (n);
7024 MONO_ADD_INS (cfg->cbb, ins);
7026 if (sizeof (gpointer) == 8)
7027 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
7029 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
7031 #if SIZEOF_REGISTER == 8
7032 /* The upper word might not be zero, and we add it to a 64 bit address later */
7033 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
7036 if (cfg->compile_aot) {
7037 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
7039 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
7040 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
7041 ins->inst_p0 = table;
7042 ins->dreg = table_reg;
7043 MONO_ADD_INS (cfg->cbb, ins);
7046 /* FIXME: Use load_memindex */
7047 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
7048 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
7049 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
7051 start_new_bblock = 1;
7052 inline_costs += (BRANCH_COST * 2);
7072 dreg = alloc_freg (cfg);
7075 dreg = alloc_lreg (cfg);
7078 dreg = alloc_preg (cfg);
7081 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
7082 ins->type = ldind_type [*ip - CEE_LDIND_I1];
7083 ins->flags |= ins_flag;
7085 MONO_ADD_INS (bblock, ins);
7100 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
7101 ins->flags |= ins_flag;
7103 MONO_ADD_INS (bblock, ins);
7105 #if HAVE_WRITE_BARRIERS
7106 if (*ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0))) {
7107 /* insert call to write barrier */
7108 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
7109 mono_emit_method_call (cfg, write_barrier, sp, NULL);
7120 MONO_INST_NEW (cfg, ins, (*ip));
7122 ins->sreg1 = sp [0]->dreg;
7123 ins->sreg2 = sp [1]->dreg;
7124 type_from_op (ins, sp [0], sp [1]);
7126 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7128 /* Use the immediate opcodes if possible */
7129 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
7130 int imm_opcode = mono_op_to_op_imm (ins->opcode);
7131 if (imm_opcode != -1) {
7132 ins->opcode = imm_opcode;
7133 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7136 sp [1]->opcode = OP_NOP;
7140 MONO_ADD_INS ((cfg)->cbb, (ins));
7142 *sp++ = mono_decompose_opcode (cfg, ins);
7159 MONO_INST_NEW (cfg, ins, (*ip));
7161 ins->sreg1 = sp [0]->dreg;
7162 ins->sreg2 = sp [1]->dreg;
7163 type_from_op (ins, sp [0], sp [1]);
7165 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7166 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7168 /* FIXME: Pass opcode to is_inst_imm */
7170 /* Use the immediate opcodes if possible */
7171 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7174 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7175 if (imm_opcode != -1) {
7176 ins->opcode = imm_opcode;
7177 if (sp [1]->opcode == OP_I8CONST) {
7178 #if SIZEOF_REGISTER == 8
7179 ins->inst_imm = sp [1]->inst_l;
7181 ins->inst_ls_word = sp [1]->inst_ls_word;
7182 ins->inst_ms_word = sp [1]->inst_ms_word;
7186 ins->inst_imm = (gssize)(sp [1]->inst_c0);
7189 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7190 if (sp [1]->next == NULL)
7191 sp [1]->opcode = OP_NOP;
7194 MONO_ADD_INS ((cfg)->cbb, (ins));
7196 *sp++ = mono_decompose_opcode (cfg, ins);
7209 case CEE_CONV_OVF_I8:
7210 case CEE_CONV_OVF_U8:
7214 /* Special case this earlier so we have long constants in the IR */
7215 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7216 int data = sp [-1]->inst_c0;
7217 sp [-1]->opcode = OP_I8CONST;
7218 sp [-1]->type = STACK_I8;
7219 #if SIZEOF_REGISTER == 8
7220 if ((*ip) == CEE_CONV_U8)
7221 sp [-1]->inst_c0 = (guint32)data;
7223 sp [-1]->inst_c0 = data;
7225 sp [-1]->inst_ls_word = data;
7226 if ((*ip) == CEE_CONV_U8)
7227 sp [-1]->inst_ms_word = 0;
7229 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7231 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7238 case CEE_CONV_OVF_I4:
7239 case CEE_CONV_OVF_I1:
7240 case CEE_CONV_OVF_I2:
7241 case CEE_CONV_OVF_I:
7242 case CEE_CONV_OVF_U:
7245 if (sp [-1]->type == STACK_R8) {
7246 ADD_UNOP (CEE_CONV_OVF_I8);
7253 case CEE_CONV_OVF_U1:
7254 case CEE_CONV_OVF_U2:
7255 case CEE_CONV_OVF_U4:
7258 if (sp [-1]->type == STACK_R8) {
7259 ADD_UNOP (CEE_CONV_OVF_U8);
7266 case CEE_CONV_OVF_I1_UN:
7267 case CEE_CONV_OVF_I2_UN:
7268 case CEE_CONV_OVF_I4_UN:
7269 case CEE_CONV_OVF_I8_UN:
7270 case CEE_CONV_OVF_U1_UN:
7271 case CEE_CONV_OVF_U2_UN:
7272 case CEE_CONV_OVF_U4_UN:
7273 case CEE_CONV_OVF_U8_UN:
7274 case CEE_CONV_OVF_I_UN:
7275 case CEE_CONV_OVF_U_UN:
7285 case CEE_ADD_OVF_UN:
7287 case CEE_MUL_OVF_UN:
7289 case CEE_SUB_OVF_UN:
7297 token = read32 (ip + 1);
7298 klass = mini_get_class (method, token, generic_context);
7299 CHECK_TYPELOAD (klass);
7301 if (generic_class_is_reference_type (cfg, klass)) {
7302 MonoInst *store, *load;
7303 int dreg = alloc_preg (cfg);
7305 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7306 load->flags |= ins_flag;
7307 MONO_ADD_INS (cfg->cbb, load);
7309 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7310 store->flags |= ins_flag;
7311 MONO_ADD_INS (cfg->cbb, store);
7313 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7325 token = read32 (ip + 1);
7326 klass = mini_get_class (method, token, generic_context);
7327 CHECK_TYPELOAD (klass);
7329 /* Optimize the common ldobj+stloc combination */
7339 loc_index = ip [5] - CEE_STLOC_0;
7346 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7347 CHECK_LOCAL (loc_index);
7349 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7350 ins->dreg = cfg->locals [loc_index]->dreg;
7356 /* Optimize the ldobj+stobj combination */
7357 /* The reference case ends up being a load+store anyway */
7358 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7363 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7370 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7379 CHECK_STACK_OVF (1);
7381 n = read32 (ip + 1);
7383 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7384 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7385 ins->type = STACK_OBJ;
7388 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7389 MonoInst *iargs [1];
7391 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7392 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7394 if (cfg->opt & MONO_OPT_SHARED) {
7395 MonoInst *iargs [3];
7397 if (cfg->compile_aot) {
7398 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7400 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7401 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7402 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7403 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7404 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7406 if (bblock->out_of_line) {
7407 MonoInst *iargs [2];
7409 if (image == mono_defaults.corlib) {
7411 * Avoid relocations in AOT and save some space by using a
7412 * version of helper_ldstr specialized to mscorlib.
7414 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7415 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7417 /* Avoid creating the string object */
7418 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7419 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7420 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7424 if (cfg->compile_aot) {
7425 NEW_LDSTRCONST (cfg, ins, image, n);
7427 MONO_ADD_INS (bblock, ins);
7430 NEW_PCONST (cfg, ins, NULL);
7431 ins->type = STACK_OBJ;
7432 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7434 MONO_ADD_INS (bblock, ins);
7443 MonoInst *iargs [2];
7444 MonoMethodSignature *fsig;
7447 MonoInst *vtable_arg = NULL;
7450 token = read32 (ip + 1);
7451 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7454 fsig = mono_method_get_signature (cmethod, image, token);
7456 mono_save_token_info (cfg, image, token, cmethod);
7458 if (!mono_class_init (cmethod->klass))
7461 if (cfg->generic_sharing_context)
7462 context_used = mono_method_check_context_used (cmethod);
7464 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7465 if (check_linkdemand (cfg, method, cmethod))
7467 CHECK_CFG_EXCEPTION;
7468 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7469 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7472 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7473 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7474 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7476 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7477 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7479 EMIT_NEW_METHOD_RGCTX_CONST (cfg, vtable_arg, cmethod);
7483 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7484 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7486 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7488 CHECK_TYPELOAD (cmethod->klass);
7489 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7494 n = fsig->param_count;
7498 * Generate smaller code for the common newobj <exception> instruction in
7499 * argument checking code.
7501 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7502 is_exception_class (cmethod->klass) && n <= 2 &&
7503 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7504 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7505 MonoInst *iargs [3];
7507 g_assert (!vtable_arg);
7511 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7514 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7518 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7523 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7526 g_assert_not_reached ();
7534 /* move the args to allow room for 'this' in the first position */
7540 /* check_call_signature () requires sp[0] to be set */
7541 this_ins.type = STACK_OBJ;
7543 if (check_call_signature (cfg, fsig, sp))
7548 if (mini_class_is_system_array (cmethod->klass)) {
7549 g_assert (!vtable_arg);
7552 *sp = emit_get_rgctx_method (cfg, context_used,
7553 cmethod, MONO_RGCTX_INFO_METHOD);
7555 EMIT_NEW_METHODCONST (cfg, *sp, cmethod);
7558 /* Avoid varargs in the common case */
7559 if (fsig->param_count == 1)
7560 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7561 else if (fsig->param_count == 2)
7562 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7564 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7565 } else if (cmethod->string_ctor) {
7566 g_assert (!context_used);
7567 g_assert (!vtable_arg);
7568 /* we simply pass a null pointer */
7569 EMIT_NEW_PCONST (cfg, *sp, NULL);
7570 /* now call the string ctor */
7571 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7573 MonoInst* callvirt_this_arg = NULL;
7575 if (cmethod->klass->valuetype) {
7576 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7577 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7578 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7583 * The code generated by mini_emit_virtual_call () expects
7584 * iargs [0] to be a boxed instance, but luckily the vcall
7585 * will be transformed into a normal call there.
7587 } else if (context_used) {
7591 if (cfg->opt & MONO_OPT_SHARED)
7592 rgctx_info = MONO_RGCTX_INFO_KLASS;
7594 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7595 data = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, rgctx_info);
7597 alloc = handle_alloc_from_inst (cfg, cmethod->klass, data, FALSE);
7600 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7602 CHECK_TYPELOAD (cmethod->klass);
7605 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7606 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7607 * As a workaround, we call class cctors before allocating objects.
7609 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7610 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7611 if (cfg->verbose_level > 2)
7612 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7613 class_inits = g_slist_prepend (class_inits, vtable);
7616 alloc = handle_alloc (cfg, cmethod->klass, FALSE);
7619 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
7622 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7624 /* Now call the actual ctor */
7625 /* Avoid virtual calls to ctors if possible */
7626 if (cmethod->klass->marshalbyref)
7627 callvirt_this_arg = sp [0];
7629 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7630 mono_method_check_inlining (cfg, cmethod) &&
7631 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7632 !g_list_find (dont_inline, cmethod)) {
7635 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7636 cfg->real_offset += 5;
7639 inline_costs += costs - 5;
7642 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7644 } else if (context_used &&
7645 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7646 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7647 MonoInst *cmethod_addr;
7649 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7650 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7652 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7655 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7656 callvirt_this_arg, NULL, vtable_arg);
7660 if (alloc == NULL) {
7662 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7663 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7677 token = read32 (ip + 1);
7678 klass = mini_get_class (method, token, generic_context);
7679 CHECK_TYPELOAD (klass);
7680 if (sp [0]->type != STACK_OBJ)
7683 if (cfg->generic_sharing_context)
7684 context_used = mono_class_check_context_used (klass);
7693 args [1] = emit_get_rgctx_klass (cfg, context_used,
7694 klass, MONO_RGCTX_INFO_KLASS);
7696 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7700 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7701 MonoMethod *mono_castclass;
7702 MonoInst *iargs [1];
7705 mono_castclass = mono_marshal_get_castclass (klass);
7708 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7709 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7710 g_assert (costs > 0);
7713 cfg->real_offset += 5;
7718 inline_costs += costs;
7721 ins = handle_castclass (cfg, klass, *sp);
7722 CHECK_CFG_EXCEPTION;
7732 token = read32 (ip + 1);
7733 klass = mini_get_class (method, token, generic_context);
7734 CHECK_TYPELOAD (klass);
7735 if (sp [0]->type != STACK_OBJ)
7738 if (cfg->generic_sharing_context)
7739 context_used = mono_class_check_context_used (klass);
7748 args [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7750 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7754 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7755 MonoMethod *mono_isinst;
7756 MonoInst *iargs [1];
7759 mono_isinst = mono_marshal_get_isinst (klass);
7762 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7763 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7764 g_assert (costs > 0);
7767 cfg->real_offset += 5;
7772 inline_costs += costs;
7775 ins = handle_isinst (cfg, klass, *sp);
7776 CHECK_CFG_EXCEPTION;
7783 case CEE_UNBOX_ANY: {
7787 token = read32 (ip + 1);
7788 klass = mini_get_class (method, token, generic_context);
7789 CHECK_TYPELOAD (klass);
7791 mono_save_token_info (cfg, image, token, klass);
7793 if (cfg->generic_sharing_context)
7794 context_used = mono_class_check_context_used (klass);
7796 if (generic_class_is_reference_type (cfg, klass)) {
7797 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
7799 MonoInst *iargs [2];
7804 iargs [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7805 ins = mono_emit_jit_icall (cfg, mono_object_castclass, iargs);
7809 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7810 MonoMethod *mono_castclass;
7811 MonoInst *iargs [1];
7814 mono_castclass = mono_marshal_get_castclass (klass);
7817 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7818 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7820 g_assert (costs > 0);
7823 cfg->real_offset += 5;
7827 inline_costs += costs;
7829 ins = handle_castclass (cfg, klass, *sp);
7830 CHECK_CFG_EXCEPTION;
7838 if (mono_class_is_nullable (klass)) {
7839 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
7846 ins = handle_unbox (cfg, klass, sp, context_used);
7852 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7865 token = read32 (ip + 1);
7866 klass = mini_get_class (method, token, generic_context);
7867 CHECK_TYPELOAD (klass);
7869 mono_save_token_info (cfg, image, token, klass);
7871 if (cfg->generic_sharing_context)
7872 context_used = mono_class_check_context_used (klass);
7874 if (generic_class_is_reference_type (cfg, klass)) {
7880 if (klass == mono_defaults.void_class)
7882 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7884 /* frequent check in generic code: box (struct), brtrue */
7885 if (!mono_class_is_nullable (klass) &&
7886 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) && (ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S)) {
7887 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7889 MONO_INST_NEW (cfg, ins, OP_BR);
7890 if (*ip == CEE_BRTRUE_S) {
7893 target = ip + 1 + (signed char)(*ip);
7898 target = ip + 4 + (gint)(read32 (ip));
7901 GET_BBLOCK (cfg, tblock, target);
7902 link_bblock (cfg, bblock, tblock);
7903 ins->inst_target_bb = tblock;
7904 GET_BBLOCK (cfg, tblock, ip);
7906 * This leads to some inconsistency, since the two bblocks are
7907 * not really connected, but it is needed for handling stack
7908 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
7909 * FIXME: This should only be needed if sp != stack_start, but that
7910 * doesn't work for some reason (test failure in mcs/tests on x86).
7912 link_bblock (cfg, bblock, tblock);
7913 if (sp != stack_start) {
7914 handle_stack_args (cfg, stack_start, sp - stack_start);
7916 CHECK_UNVERIFIABLE (cfg);
7918 MONO_ADD_INS (bblock, ins);
7919 start_new_bblock = 1;
7927 if (cfg->opt & MONO_OPT_SHARED)
7928 rgctx_info = MONO_RGCTX_INFO_KLASS;
7930 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7931 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
7932 *sp++ = handle_box_from_inst (cfg, val, klass, context_used, data);
7934 *sp++ = handle_box (cfg, val, klass);
7937 CHECK_CFG_EXCEPTION;
7946 token = read32 (ip + 1);
7947 klass = mini_get_class (method, token, generic_context);
7948 CHECK_TYPELOAD (klass);
7950 mono_save_token_info (cfg, image, token, klass);
7952 if (cfg->generic_sharing_context)
7953 context_used = mono_class_check_context_used (klass);
7955 if (mono_class_is_nullable (klass)) {
7958 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
7959 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
7963 ins = handle_unbox (cfg, klass, sp, context_used);
7973 MonoClassField *field;
7977 if (*ip == CEE_STFLD) {
7984 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
7986 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
7989 token = read32 (ip + 1);
7990 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7991 field = mono_method_get_wrapper_data (method, token);
7992 klass = field->parent;
7995 field = mono_field_from_token (image, token, &klass, generic_context);
7999 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8000 FIELD_ACCESS_FAILURE;
8001 mono_class_init (klass);
8003 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
8004 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
8005 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8006 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8009 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
8010 if (*ip == CEE_STFLD) {
8011 if (target_type_is_incompatible (cfg, field->type, sp [1]))
8013 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8014 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
8015 MonoInst *iargs [5];
8018 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8019 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8020 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
8024 if (cfg->opt & MONO_OPT_INLINE) {
8025 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
8026 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8027 g_assert (costs > 0);
8029 cfg->real_offset += 5;
8032 inline_costs += costs;
8034 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
8039 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
8041 #if HAVE_WRITE_BARRIERS
8042 if (mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
8043 /* insert call to write barrier */
8044 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
8045 MonoInst *iargs [2];
8048 dreg = alloc_preg (cfg);
8049 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8051 mono_emit_method_call (cfg, write_barrier, iargs, NULL);
8055 store->flags |= ins_flag;
8062 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8063 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
8064 MonoInst *iargs [4];
8067 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8068 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8069 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
8070 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8071 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
8072 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8074 g_assert (costs > 0);
8076 cfg->real_offset += 5;
8080 inline_costs += costs;
8082 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
8086 if (sp [0]->type == STACK_VTYPE) {
8089 /* Have to compute the address of the variable */
8091 var = get_vreg_to_inst (cfg, sp [0]->dreg);
8093 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
8095 g_assert (var->klass == klass);
8097 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
8101 if (*ip == CEE_LDFLDA) {
8102 dreg = alloc_preg (cfg);
8104 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8105 ins->klass = mono_class_from_mono_type (field->type);
8106 ins->type = STACK_MP;
8111 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
8112 load->flags |= ins_flag;
8123 MonoClassField *field;
8124 gpointer addr = NULL;
8125 gboolean is_special_static;
8128 token = read32 (ip + 1);
8130 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8131 field = mono_method_get_wrapper_data (method, token);
8132 klass = field->parent;
8135 field = mono_field_from_token (image, token, &klass, generic_context);
8138 mono_class_init (klass);
8139 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8140 FIELD_ACCESS_FAILURE;
8142 /* if the class is Critical then transparent code cannot access it's fields */
8143 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8144 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8147 * We can only support shared generic static
8148 * field access on architectures where the
8149 * trampoline code has been extended to handle
8150 * the generic class init.
8152 #ifndef MONO_ARCH_VTABLE_REG
8153 GENERIC_SHARING_FAILURE (*ip);
8156 if (cfg->generic_sharing_context)
8157 context_used = mono_class_check_context_used (klass);
8159 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
8161 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8162 * to be called here.
8164 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8165 mono_class_vtable (cfg->domain, klass);
8166 CHECK_TYPELOAD (klass);
8168 mono_domain_lock (cfg->domain);
8169 if (cfg->domain->special_static_fields)
8170 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8171 mono_domain_unlock (cfg->domain);
8173 is_special_static = mono_class_field_is_special_static (field);
8175 /* Generate IR to compute the field address */
8177 if ((cfg->opt & MONO_OPT_SHARED) ||
8178 (cfg->compile_aot && is_special_static) ||
8179 (context_used && is_special_static)) {
8180 MonoInst *iargs [2];
8182 g_assert (field->parent);
8183 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8185 iargs [1] = emit_get_rgctx_field (cfg, context_used,
8186 field, MONO_RGCTX_INFO_CLASS_FIELD);
8188 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8190 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8191 } else if (context_used) {
8192 MonoInst *static_data;
8195 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8196 method->klass->name_space, method->klass->name, method->name,
8197 depth, field->offset);
8200 if (mono_class_needs_cctor_run (klass, method)) {
8204 vtable = emit_get_rgctx_klass (cfg, context_used,
8205 klass, MONO_RGCTX_INFO_VTABLE);
8207 // FIXME: This doesn't work since it tries to pass the argument
8208 // in the normal way, instead of using MONO_ARCH_VTABLE_REG
8210 * The vtable pointer is always passed in a register regardless of
8211 * the calling convention, so assign it manually, and make a call
8212 * using a signature without parameters.
8214 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable);
8215 #ifdef MONO_ARCH_VTABLE_REG
8216 mono_call_inst_add_outarg_reg (cfg, call, vtable->dreg, MONO_ARCH_VTABLE_REG, FALSE);
8217 cfg->uses_vtable_reg = TRUE;
8224 * The pointer we're computing here is
8226 * super_info.static_data + field->offset
8228 static_data = emit_get_rgctx_klass (cfg, context_used,
8229 klass, MONO_RGCTX_INFO_STATIC_DATA);
8231 if (field->offset == 0) {
8234 int addr_reg = mono_alloc_preg (cfg);
8235 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8237 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8238 MonoInst *iargs [2];
8240 g_assert (field->parent);
8241 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8242 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8243 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8245 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8247 CHECK_TYPELOAD (klass);
8249 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8250 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8251 if (cfg->verbose_level > 2)
8252 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8253 class_inits = g_slist_prepend (class_inits, vtable);
8255 if (cfg->run_cctors) {
8257 /* This makes so that inline cannot trigger */
8258 /* .cctors: too many apps depend on them */
8259 /* running with a specific order... */
8260 if (! vtable->initialized)
8262 ex = mono_runtime_class_init_full (vtable, FALSE);
8264 set_exception_object (cfg, ex);
8265 goto exception_exit;
8269 addr = (char*)vtable->data + field->offset;
8271 if (cfg->compile_aot)
8272 EMIT_NEW_SFLDACONST (cfg, ins, field);
8274 EMIT_NEW_PCONST (cfg, ins, addr);
8277 * insert call to mono_threads_get_static_data (GPOINTER_TO_UINT (addr))
8278 * This could be later optimized to do just a couple of
8279 * memory dereferences with constant offsets.
8281 MonoInst *iargs [1];
8282 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8283 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8287 /* Generate IR to do the actual load/store operation */
8289 if (*ip == CEE_LDSFLDA) {
8290 ins->klass = mono_class_from_mono_type (field->type);
8291 ins->type = STACK_PTR;
8293 } else if (*ip == CEE_STSFLD) {
8298 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
8299 store->flags |= ins_flag;
8301 gboolean is_const = FALSE;
8302 MonoVTable *vtable = NULL;
8304 if (!context_used) {
8305 vtable = mono_class_vtable (cfg->domain, klass);
8306 CHECK_TYPELOAD (klass);
8308 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8309 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8310 gpointer addr = (char*)vtable->data + field->offset;
8311 int ro_type = field->type->type;
8312 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
8313 ro_type = mono_class_enum_basetype (field->type->data.klass)->type;
8315 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8318 case MONO_TYPE_BOOLEAN:
8320 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8324 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8327 case MONO_TYPE_CHAR:
8329 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8333 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8338 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8342 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8345 #ifndef HAVE_MOVING_COLLECTOR
8348 case MONO_TYPE_STRING:
8349 case MONO_TYPE_OBJECT:
8350 case MONO_TYPE_CLASS:
8351 case MONO_TYPE_SZARRAY:
8353 case MONO_TYPE_FNPTR:
8354 case MONO_TYPE_ARRAY:
8355 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8356 type_to_eval_stack_type ((cfg), field->type, *sp);
8362 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8367 case MONO_TYPE_VALUETYPE:
8377 CHECK_STACK_OVF (1);
8379 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8380 load->flags |= ins_flag;
8393 token = read32 (ip + 1);
8394 klass = mini_get_class (method, token, generic_context);
8395 CHECK_TYPELOAD (klass);
8396 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8397 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8408 const char *data_ptr;
8410 guint32 field_token;
8416 token = read32 (ip + 1);
8418 klass = mini_get_class (method, token, generic_context);
8419 CHECK_TYPELOAD (klass);
8421 if (cfg->generic_sharing_context)
8422 context_used = mono_class_check_context_used (klass);
8424 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
8425 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
8426 ins->sreg1 = sp [0]->dreg;
8427 ins->type = STACK_I4;
8428 ins->dreg = alloc_ireg (cfg);
8429 MONO_ADD_INS (cfg->cbb, ins);
8430 *sp = mono_decompose_opcode (cfg, ins);
8435 MonoClass *array_class = mono_array_class_get (klass, 1);
8436 /* FIXME: we cannot get a managed
8437 allocator because we can't get the
8438 open generic class's vtable. We
8439 have the same problem in
8440 handle_alloc_from_inst(). This
8441 needs to be solved so that we can
8442 have managed allocs of shared
8445 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
8446 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
8448 MonoMethod *managed_alloc = NULL;
8450 /* FIXME: Decompose later to help abcrem */
8453 args [0] = emit_get_rgctx_klass (cfg, context_used,
8454 array_class, MONO_RGCTX_INFO_VTABLE);
8459 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
8461 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8463 if (cfg->opt & MONO_OPT_SHARED) {
8464 /* Decompose now to avoid problems with references to the domainvar */
8465 MonoInst *iargs [3];
8467 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8468 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8471 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8473 /* Decompose later since it is needed by abcrem */
8474 MonoClass *array_type = mono_array_class_get (klass, 1);
8475 mono_class_vtable (cfg->domain, array_type);
8476 CHECK_TYPELOAD (array_type);
8478 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8479 ins->dreg = alloc_preg (cfg);
8480 ins->sreg1 = sp [0]->dreg;
8481 ins->inst_newa_class = klass;
8482 ins->type = STACK_OBJ;
8484 MONO_ADD_INS (cfg->cbb, ins);
8485 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8486 cfg->cbb->has_array_access = TRUE;
8488 /* Needed so mono_emit_load_get_addr () gets called */
8489 mono_get_got_var (cfg);
8499 * we inline/optimize the initialization sequence if possible.
8500 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8501 * for small sizes open code the memcpy
8502 * ensure the rva field is big enough
8504 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8505 MonoMethod *memcpy_method = get_memcpy_method ();
8506 MonoInst *iargs [3];
8507 int add_reg = alloc_preg (cfg);
8509 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8510 if (cfg->compile_aot) {
8511 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8513 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8515 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8516 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8525 if (sp [0]->type != STACK_OBJ)
8528 dreg = alloc_preg (cfg);
8529 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8530 ins->dreg = alloc_preg (cfg);
8531 ins->sreg1 = sp [0]->dreg;
8532 ins->type = STACK_I4;
8533 MONO_ADD_INS (cfg->cbb, ins);
8534 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8535 cfg->cbb->has_array_access = TRUE;
8543 if (sp [0]->type != STACK_OBJ)
8546 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8548 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8549 CHECK_TYPELOAD (klass);
8550 /* we need to make sure that this array is exactly the type it needs
8551 * to be for correctness. the wrappers are lax with their usage
8552 * so we need to ignore them here
8554 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
8555 MonoClass *array_class = mono_array_class_get (klass, 1);
8556 mini_emit_check_array_type (cfg, sp [0], array_class);
8557 CHECK_TYPELOAD (array_class);
8561 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8576 case CEE_LDELEM_REF: {
8582 if (*ip == CEE_LDELEM) {
8584 token = read32 (ip + 1);
8585 klass = mini_get_class (method, token, generic_context);
8586 CHECK_TYPELOAD (klass);
8587 mono_class_init (klass);
8590 klass = array_access_to_klass (*ip);
8592 if (sp [0]->type != STACK_OBJ)
8595 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8597 if (sp [1]->opcode == OP_ICONST) {
8598 int array_reg = sp [0]->dreg;
8599 int index_reg = sp [1]->dreg;
8600 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8602 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8603 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8605 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8606 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8609 if (*ip == CEE_LDELEM)
8622 case CEE_STELEM_REF:
8629 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8631 if (*ip == CEE_STELEM) {
8633 token = read32 (ip + 1);
8634 klass = mini_get_class (method, token, generic_context);
8635 CHECK_TYPELOAD (klass);
8636 mono_class_init (klass);
8639 klass = array_access_to_klass (*ip);
8641 if (sp [0]->type != STACK_OBJ)
8644 /* storing a NULL doesn't need any of the complex checks in stelemref */
8645 if (generic_class_is_reference_type (cfg, klass) &&
8646 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8647 MonoMethod* helper = mono_marshal_get_stelemref ();
8648 MonoInst *iargs [3];
8650 if (sp [0]->type != STACK_OBJ)
8652 if (sp [2]->type != STACK_OBJ)
8659 mono_emit_method_call (cfg, helper, iargs, NULL);
8661 if (sp [1]->opcode == OP_ICONST) {
8662 int array_reg = sp [0]->dreg;
8663 int index_reg = sp [1]->dreg;
8664 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8666 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8667 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8669 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8670 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8674 if (*ip == CEE_STELEM)
8681 case CEE_CKFINITE: {
8685 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8686 ins->sreg1 = sp [0]->dreg;
8687 ins->dreg = alloc_freg (cfg);
8688 ins->type = STACK_R8;
8689 MONO_ADD_INS (bblock, ins);
8691 *sp++ = mono_decompose_opcode (cfg, ins);
8696 case CEE_REFANYVAL: {
8697 MonoInst *src_var, *src;
8699 int klass_reg = alloc_preg (cfg);
8700 int dreg = alloc_preg (cfg);
8703 MONO_INST_NEW (cfg, ins, *ip);
8706 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8707 CHECK_TYPELOAD (klass);
8708 mono_class_init (klass);
8710 if (cfg->generic_sharing_context)
8711 context_used = mono_class_check_context_used (klass);
8714 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8716 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8717 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8718 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8721 MonoInst *klass_ins;
8723 klass_ins = emit_get_rgctx_klass (cfg, context_used,
8724 klass, MONO_RGCTX_INFO_KLASS);
8727 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8728 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8730 mini_emit_class_check (cfg, klass_reg, klass);
8732 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8733 ins->type = STACK_MP;
8738 case CEE_MKREFANY: {
8739 MonoInst *loc, *addr;
8742 MONO_INST_NEW (cfg, ins, *ip);
8745 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8746 CHECK_TYPELOAD (klass);
8747 mono_class_init (klass);
8749 if (cfg->generic_sharing_context)
8750 context_used = mono_class_check_context_used (klass);
8752 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8753 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8756 MonoInst *const_ins;
8757 int type_reg = alloc_preg (cfg);
8759 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
8760 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8761 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8762 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8763 } else if (cfg->compile_aot) {
8764 int const_reg = alloc_preg (cfg);
8765 int type_reg = alloc_preg (cfg);
8767 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8768 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8769 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8770 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8772 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8773 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8775 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8777 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8778 ins->type = STACK_VTYPE;
8779 ins->klass = mono_defaults.typed_reference_class;
8786 MonoClass *handle_class;
8788 CHECK_STACK_OVF (1);
8791 n = read32 (ip + 1);
8793 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
8794 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8795 handle = mono_method_get_wrapper_data (method, n);
8796 handle_class = mono_method_get_wrapper_data (method, n + 1);
8797 if (handle_class == mono_defaults.typehandle_class)
8798 handle = &((MonoClass*)handle)->byval_arg;
8801 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8805 mono_class_init (handle_class);
8806 if (cfg->generic_sharing_context) {
8807 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
8808 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
8809 /* This case handles ldtoken
8810 of an open type, like for
8813 } else if (handle_class == mono_defaults.typehandle_class) {
8814 /* If we get a MONO_TYPE_CLASS
8815 then we need to provide the
8817 instantiation of it. */
8818 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8821 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8822 } else if (handle_class == mono_defaults.fieldhandle_class)
8823 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8824 else if (handle_class == mono_defaults.methodhandle_class)
8825 context_used = mono_method_check_context_used (handle);
8827 g_assert_not_reached ();
8830 if ((cfg->opt & MONO_OPT_SHARED) &&
8831 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
8832 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
8833 MonoInst *addr, *vtvar, *iargs [3];
8834 int method_context_used;
8836 if (cfg->generic_sharing_context)
8837 method_context_used = mono_method_check_context_used (method);
8839 method_context_used = 0;
8841 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8843 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8844 EMIT_NEW_ICONST (cfg, iargs [1], n);
8845 if (method_context_used) {
8846 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
8847 method, MONO_RGCTX_INFO_METHOD);
8848 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
8850 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
8851 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
8853 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8855 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8857 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8859 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
8860 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
8861 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
8862 (cmethod->klass == mono_defaults.monotype_class->parent) &&
8863 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
8864 MonoClass *tclass = mono_class_from_mono_type (handle);
8866 mono_class_init (tclass);
8868 ins = emit_get_rgctx_klass (cfg, context_used,
8869 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
8870 } else if (cfg->compile_aot) {
8871 if (method->wrapper_type) {
8872 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
8873 /* Special case for static synchronized wrappers */
8874 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
8876 /* FIXME: n is not a normal token */
8877 cfg->disable_aot = TRUE;
8878 EMIT_NEW_PCONST (cfg, ins, NULL);
8881 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
8884 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
8886 ins->type = STACK_OBJ;
8887 ins->klass = cmethod->klass;
8890 MonoInst *addr, *vtvar;
8892 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8895 if (handle_class == mono_defaults.typehandle_class) {
8896 ins = emit_get_rgctx_klass (cfg, context_used,
8897 mono_class_from_mono_type (handle),
8898 MONO_RGCTX_INFO_TYPE);
8899 } else if (handle_class == mono_defaults.methodhandle_class) {
8900 ins = emit_get_rgctx_method (cfg, context_used,
8901 handle, MONO_RGCTX_INFO_METHOD);
8902 } else if (handle_class == mono_defaults.fieldhandle_class) {
8903 ins = emit_get_rgctx_field (cfg, context_used,
8904 handle, MONO_RGCTX_INFO_CLASS_FIELD);
8906 g_assert_not_reached ();
8908 } else if (cfg->compile_aot) {
8909 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
8911 EMIT_NEW_PCONST (cfg, ins, handle);
8913 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8914 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8915 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8925 MONO_INST_NEW (cfg, ins, OP_THROW);
8927 ins->sreg1 = sp [0]->dreg;
8929 bblock->out_of_line = TRUE;
8930 MONO_ADD_INS (bblock, ins);
8931 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
8932 MONO_ADD_INS (bblock, ins);
8935 link_bblock (cfg, bblock, end_bblock);
8936 start_new_bblock = 1;
8938 case CEE_ENDFINALLY:
8939 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
8940 MONO_ADD_INS (bblock, ins);
8942 start_new_bblock = 1;
8945 * Control will leave the method so empty the stack, otherwise
8946 * the next basic block will start with a nonempty stack.
8948 while (sp != stack_start) {
8956 if (*ip == CEE_LEAVE) {
8958 target = ip + 5 + (gint32)read32(ip + 1);
8961 target = ip + 2 + (signed char)(ip [1]);
8964 /* empty the stack */
8965 while (sp != stack_start) {
8970 * If this leave statement is in a catch block, check for a
8971 * pending exception, and rethrow it if necessary.
8973 for (i = 0; i < header->num_clauses; ++i) {
8974 MonoExceptionClause *clause = &header->clauses [i];
8977 * Use <= in the final comparison to handle clauses with multiple
8978 * leave statements, like in bug #78024.
8979 * The ordering of the exception clauses guarantees that we find the
8982 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len)) {
8984 MonoBasicBlock *dont_throw;
8989 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
8992 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
8994 NEW_BBLOCK (cfg, dont_throw);
8997 * Currently, we allways rethrow the abort exception, despite the
8998 * fact that this is not correct. See thread6.cs for an example.
8999 * But propagating the abort exception is more important than
9000 * getting the sematics right.
9002 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
9003 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
9004 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
9006 MONO_START_BB (cfg, dont_throw);
9011 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
9013 for (tmp = handlers; tmp; tmp = tmp->next) {
9015 link_bblock (cfg, bblock, tblock);
9016 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
9017 ins->inst_target_bb = tblock;
9018 MONO_ADD_INS (bblock, ins);
9019 bblock->has_call_handler = 1;
9021 g_list_free (handlers);
9024 MONO_INST_NEW (cfg, ins, OP_BR);
9025 MONO_ADD_INS (bblock, ins);
9026 GET_BBLOCK (cfg, tblock, target);
9027 link_bblock (cfg, bblock, tblock);
9028 ins->inst_target_bb = tblock;
9029 start_new_bblock = 1;
9031 if (*ip == CEE_LEAVE)
9040 * Mono specific opcodes
9042 case MONO_CUSTOM_PREFIX: {
9044 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
9048 case CEE_MONO_ICALL: {
9050 MonoJitICallInfo *info;
9052 token = read32 (ip + 2);
9053 func = mono_method_get_wrapper_data (method, token);
9054 info = mono_find_jit_icall_by_addr (func);
9057 CHECK_STACK (info->sig->param_count);
9058 sp -= info->sig->param_count;
9060 ins = mono_emit_jit_icall (cfg, info->func, sp);
9061 if (!MONO_TYPE_IS_VOID (info->sig->ret))
9065 inline_costs += 10 * num_calls++;
9069 case CEE_MONO_LDPTR: {
9072 CHECK_STACK_OVF (1);
9074 token = read32 (ip + 2);
9076 ptr = mono_method_get_wrapper_data (method, token);
9077 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
9078 MonoJitICallInfo *callinfo;
9079 const char *icall_name;
9081 icall_name = method->name + strlen ("__icall_wrapper_");
9082 g_assert (icall_name);
9083 callinfo = mono_find_jit_icall_by_name (icall_name);
9084 g_assert (callinfo);
9086 if (ptr == callinfo->func) {
9087 /* Will be transformed into an AOTCONST later */
9088 EMIT_NEW_PCONST (cfg, ins, ptr);
9094 /* FIXME: Generalize this */
9095 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
9096 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
9101 EMIT_NEW_PCONST (cfg, ins, ptr);
9104 inline_costs += 10 * num_calls++;
9105 /* Can't embed random pointers into AOT code */
9106 cfg->disable_aot = 1;
9109 case CEE_MONO_ICALL_ADDR: {
9110 MonoMethod *cmethod;
9113 CHECK_STACK_OVF (1);
9115 token = read32 (ip + 2);
9117 cmethod = mono_method_get_wrapper_data (method, token);
9119 if (cfg->compile_aot) {
9120 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
9122 ptr = mono_lookup_internal_call (cmethod);
9124 EMIT_NEW_PCONST (cfg, ins, ptr);
9130 case CEE_MONO_VTADDR: {
9131 MonoInst *src_var, *src;
9137 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9138 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
9143 case CEE_MONO_NEWOBJ: {
9144 MonoInst *iargs [2];
9146 CHECK_STACK_OVF (1);
9148 token = read32 (ip + 2);
9149 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9150 mono_class_init (klass);
9151 NEW_DOMAINCONST (cfg, iargs [0]);
9152 MONO_ADD_INS (cfg->cbb, iargs [0]);
9153 NEW_CLASSCONST (cfg, iargs [1], klass);
9154 MONO_ADD_INS (cfg->cbb, iargs [1]);
9155 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
9157 inline_costs += 10 * num_calls++;
9160 case CEE_MONO_OBJADDR:
9163 MONO_INST_NEW (cfg, ins, OP_MOVE);
9164 ins->dreg = alloc_preg (cfg);
9165 ins->sreg1 = sp [0]->dreg;
9166 ins->type = STACK_MP;
9167 MONO_ADD_INS (cfg->cbb, ins);
9171 case CEE_MONO_LDNATIVEOBJ:
9173 * Similar to LDOBJ, but instead load the unmanaged
9174 * representation of the vtype to the stack.
9179 token = read32 (ip + 2);
9180 klass = mono_method_get_wrapper_data (method, token);
9181 g_assert (klass->valuetype);
9182 mono_class_init (klass);
9185 MonoInst *src, *dest, *temp;
9188 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
9189 temp->backend.is_pinvoke = 1;
9190 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
9191 mini_emit_stobj (cfg, dest, src, klass, TRUE);
9193 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
9194 dest->type = STACK_VTYPE;
9195 dest->klass = klass;
9201 case CEE_MONO_RETOBJ: {
9203 * Same as RET, but return the native representation of a vtype
9206 g_assert (cfg->ret);
9207 g_assert (mono_method_signature (method)->pinvoke);
9212 token = read32 (ip + 2);
9213 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9215 if (!cfg->vret_addr) {
9216 g_assert (cfg->ret_var_is_local);
9218 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
9220 EMIT_NEW_RETLOADA (cfg, ins);
9222 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
9224 if (sp != stack_start)
9227 MONO_INST_NEW (cfg, ins, OP_BR);
9228 ins->inst_target_bb = end_bblock;
9229 MONO_ADD_INS (bblock, ins);
9230 link_bblock (cfg, bblock, end_bblock);
9231 start_new_bblock = 1;
9235 case CEE_MONO_CISINST:
9236 case CEE_MONO_CCASTCLASS: {
9241 token = read32 (ip + 2);
9242 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9243 if (ip [1] == CEE_MONO_CISINST)
9244 ins = handle_cisinst (cfg, klass, sp [0]);
9246 ins = handle_ccastclass (cfg, klass, sp [0]);
9252 case CEE_MONO_SAVE_LMF:
9253 case CEE_MONO_RESTORE_LMF:
9254 #ifdef MONO_ARCH_HAVE_LMF_OPS
9255 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9256 MONO_ADD_INS (bblock, ins);
9257 cfg->need_lmf_area = TRUE;
9261 case CEE_MONO_CLASSCONST:
9262 CHECK_STACK_OVF (1);
9264 token = read32 (ip + 2);
9265 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9268 inline_costs += 10 * num_calls++;
9270 case CEE_MONO_NOT_TAKEN:
9271 bblock->out_of_line = TRUE;
9275 CHECK_STACK_OVF (1);
9277 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9278 ins->dreg = alloc_preg (cfg);
9279 ins->inst_offset = (gint32)read32 (ip + 2);
9280 ins->type = STACK_PTR;
9281 MONO_ADD_INS (bblock, ins);
9285 case CEE_MONO_DYN_CALL: {
9288 /* It would be easier to call a trampoline, but that would put an
9289 * extra frame on the stack, confusing exception handling. So
9290 * implement it inline using an opcode for now.
9293 if (!cfg->dyn_call_var) {
9294 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9295 /* prevent it from being register allocated */
9296 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
9299 /* Has to use a call inst since it local regalloc expects it */
9300 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
9301 ins = (MonoInst*)call;
9303 ins->sreg1 = sp [0]->dreg;
9304 ins->sreg2 = sp [1]->dreg;
9305 MONO_ADD_INS (bblock, ins);
9307 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
9308 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
9312 inline_costs += 10 * num_calls++;
9317 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9327 /* somewhat similar to LDTOKEN */
9328 MonoInst *addr, *vtvar;
9329 CHECK_STACK_OVF (1);
9330 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9332 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9333 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9335 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9336 ins->type = STACK_VTYPE;
9337 ins->klass = mono_defaults.argumenthandle_class;
9350 * The following transforms:
9351 * CEE_CEQ into OP_CEQ
9352 * CEE_CGT into OP_CGT
9353 * CEE_CGT_UN into OP_CGT_UN
9354 * CEE_CLT into OP_CLT
9355 * CEE_CLT_UN into OP_CLT_UN
9357 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9359 MONO_INST_NEW (cfg, ins, cmp->opcode);
9361 cmp->sreg1 = sp [0]->dreg;
9362 cmp->sreg2 = sp [1]->dreg;
9363 type_from_op (cmp, sp [0], sp [1]);
9365 if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9366 cmp->opcode = OP_LCOMPARE;
9367 else if (sp [0]->type == STACK_R8)
9368 cmp->opcode = OP_FCOMPARE;
9370 cmp->opcode = OP_ICOMPARE;
9371 MONO_ADD_INS (bblock, cmp);
9372 ins->type = STACK_I4;
9373 ins->dreg = alloc_dreg (cfg, ins->type);
9374 type_from_op (ins, sp [0], sp [1]);
9376 if (cmp->opcode == OP_FCOMPARE) {
9378 * The backends expect the fceq opcodes to do the
9381 cmp->opcode = OP_NOP;
9382 ins->sreg1 = cmp->sreg1;
9383 ins->sreg2 = cmp->sreg2;
9385 MONO_ADD_INS (bblock, ins);
9392 MonoMethod *cil_method;
9393 gboolean needs_static_rgctx_invoke;
9395 CHECK_STACK_OVF (1);
9397 n = read32 (ip + 2);
9398 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9401 mono_class_init (cmethod->klass);
9403 mono_save_token_info (cfg, image, n, cmethod);
9405 if (cfg->generic_sharing_context)
9406 context_used = mono_method_check_context_used (cmethod);
9408 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9410 cil_method = cmethod;
9411 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9412 METHOD_ACCESS_FAILURE;
9414 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9415 if (check_linkdemand (cfg, method, cmethod))
9417 CHECK_CFG_EXCEPTION;
9418 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9419 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9423 * Optimize the common case of ldftn+delegate creation
9425 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
9426 /* FIXME: SGEN support */
9427 /* FIXME: handle shared static generic methods */
9428 /* FIXME: handle this in shared code */
9429 if (!needs_static_rgctx_invoke && !context_used && (sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9430 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9431 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9432 MonoInst *target_ins;
9435 invoke = mono_get_delegate_invoke (ctor_method->klass);
9436 if (!invoke || !mono_method_signature (invoke))
9440 if (cfg->verbose_level > 3)
9441 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9442 target_ins = sp [-1];
9444 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod);
9445 CHECK_CFG_EXCEPTION;
9454 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
9456 EMIT_NEW_METHODCONST (cfg, argconst, cmethod);
9458 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
9462 inline_costs += 10 * num_calls++;
9465 case CEE_LDVIRTFTN: {
9470 n = read32 (ip + 2);
9471 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9474 mono_class_init (cmethod->klass);
9476 if (cfg->generic_sharing_context)
9477 context_used = mono_method_check_context_used (cmethod);
9479 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9480 if (check_linkdemand (cfg, method, cmethod))
9482 CHECK_CFG_EXCEPTION;
9483 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9484 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9491 args [1] = emit_get_rgctx_method (cfg, context_used,
9492 cmethod, MONO_RGCTX_INFO_METHOD);
9493 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
9495 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
9496 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
9500 inline_costs += 10 * num_calls++;
9504 CHECK_STACK_OVF (1);
9506 n = read16 (ip + 2);
9508 EMIT_NEW_ARGLOAD (cfg, ins, n);
9513 CHECK_STACK_OVF (1);
9515 n = read16 (ip + 2);
9517 NEW_ARGLOADA (cfg, ins, n);
9518 MONO_ADD_INS (cfg->cbb, ins);
9526 n = read16 (ip + 2);
9528 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9530 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9534 CHECK_STACK_OVF (1);
9536 n = read16 (ip + 2);
9538 EMIT_NEW_LOCLOAD (cfg, ins, n);
9543 unsigned char *tmp_ip;
9544 CHECK_STACK_OVF (1);
9546 n = read16 (ip + 2);
9549 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9555 EMIT_NEW_LOCLOADA (cfg, ins, n);
9564 n = read16 (ip + 2);
9566 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9568 emit_stloc_ir (cfg, sp, header, n);
9575 if (sp != stack_start)
9577 if (cfg->method != method)
9579 * Inlining this into a loop in a parent could lead to
9580 * stack overflows which is different behavior than the
9581 * non-inlined case, thus disable inlining in this case.
9583 goto inline_failure;
9585 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9586 ins->dreg = alloc_preg (cfg);
9587 ins->sreg1 = sp [0]->dreg;
9588 ins->type = STACK_PTR;
9589 MONO_ADD_INS (cfg->cbb, ins);
9591 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9593 ins->flags |= MONO_INST_INIT;
9598 case CEE_ENDFILTER: {
9599 MonoExceptionClause *clause, *nearest;
9600 int cc, nearest_num;
9604 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9606 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9607 ins->sreg1 = (*sp)->dreg;
9608 MONO_ADD_INS (bblock, ins);
9609 start_new_bblock = 1;
9614 for (cc = 0; cc < header->num_clauses; ++cc) {
9615 clause = &header->clauses [cc];
9616 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9617 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9618 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9624 if ((ip - header->code) != nearest->handler_offset)
9629 case CEE_UNALIGNED_:
9630 ins_flag |= MONO_INST_UNALIGNED;
9631 /* FIXME: record alignment? we can assume 1 for now */
9636 ins_flag |= MONO_INST_VOLATILE;
9640 ins_flag |= MONO_INST_TAILCALL;
9641 cfg->flags |= MONO_CFG_HAS_TAIL;
9642 /* Can't inline tail calls at this time */
9643 inline_costs += 100000;
9650 token = read32 (ip + 2);
9651 klass = mini_get_class (method, token, generic_context);
9652 CHECK_TYPELOAD (klass);
9653 if (generic_class_is_reference_type (cfg, klass))
9654 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9656 mini_emit_initobj (cfg, *sp, NULL, klass);
9660 case CEE_CONSTRAINED_:
9662 token = read32 (ip + 2);
9663 if (method->wrapper_type != MONO_WRAPPER_NONE)
9664 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
9666 constrained_call = mono_class_get_full (image, token, generic_context);
9667 CHECK_TYPELOAD (constrained_call);
9672 MonoInst *iargs [3];
9676 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9677 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9678 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9679 /* emit_memset only works when val == 0 */
9680 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9685 if (ip [1] == CEE_CPBLK) {
9686 MonoMethod *memcpy_method = get_memcpy_method ();
9687 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9689 MonoMethod *memset_method = get_memset_method ();
9690 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9700 ins_flag |= MONO_INST_NOTYPECHECK;
9702 ins_flag |= MONO_INST_NORANGECHECK;
9703 /* we ignore the no-nullcheck for now since we
9704 * really do it explicitly only when doing callvirt->call
9710 int handler_offset = -1;
9712 for (i = 0; i < header->num_clauses; ++i) {
9713 MonoExceptionClause *clause = &header->clauses [i];
9714 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9715 handler_offset = clause->handler_offset;
9720 bblock->flags |= BB_EXCEPTION_UNSAFE;
9722 g_assert (handler_offset != -1);
9724 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9725 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9726 ins->sreg1 = load->dreg;
9727 MONO_ADD_INS (bblock, ins);
9729 link_bblock (cfg, bblock, end_bblock);
9730 start_new_bblock = 1;
9738 CHECK_STACK_OVF (1);
9740 token = read32 (ip + 2);
9741 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC) {
9742 MonoType *type = mono_type_create_from_typespec (image, token);
9743 token = mono_type_size (type, &ialign);
9745 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9746 CHECK_TYPELOAD (klass);
9747 mono_class_init (klass);
9748 token = mono_class_value_size (klass, &align);
9750 EMIT_NEW_ICONST (cfg, ins, token);
9755 case CEE_REFANYTYPE: {
9756 MonoInst *src_var, *src;
9762 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9764 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9765 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9766 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9776 g_error ("opcode 0xfe 0x%02x not handled", ip [1]);
9781 g_error ("opcode 0x%02x not handled", *ip);
9784 if (start_new_bblock != 1)
9787 bblock->cil_length = ip - bblock->cil_code;
9788 bblock->next_bb = end_bblock;
9790 if (cfg->method == method && cfg->domainvar) {
9792 MonoInst *get_domain;
9794 cfg->cbb = init_localsbb;
9796 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9797 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9800 get_domain->dreg = alloc_preg (cfg);
9801 MONO_ADD_INS (cfg->cbb, get_domain);
9803 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9804 MONO_ADD_INS (cfg->cbb, store);
9807 #ifdef TARGET_POWERPC
9808 if (cfg->compile_aot)
9809 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
9810 mono_get_got_var (cfg);
9813 if (cfg->method == method && cfg->got_var)
9814 mono_emit_load_got_addr (cfg);
9819 cfg->cbb = init_localsbb;
9821 for (i = 0; i < header->num_locals; ++i) {
9822 MonoType *ptype = header->locals [i];
9823 int t = ptype->type;
9824 dreg = cfg->locals [i]->dreg;
9826 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
9827 t = mono_class_enum_basetype (ptype->data.klass)->type;
9829 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9830 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
9831 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
9832 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
9833 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
9834 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
9835 MONO_INST_NEW (cfg, ins, OP_R8CONST);
9836 ins->type = STACK_R8;
9837 ins->inst_p0 = (void*)&r8_0;
9838 ins->dreg = alloc_dreg (cfg, STACK_R8);
9839 MONO_ADD_INS (init_localsbb, ins);
9840 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
9841 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
9842 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
9843 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
9845 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9852 if (cfg->method == method) {
9854 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9855 bb->region = mono_find_block_region (cfg, bb->real_offset);
9857 mono_create_spvar_for_region (cfg, bb->region);
9858 if (cfg->verbose_level > 2)
9859 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
9863 g_slist_free (class_inits);
9864 dont_inline = g_list_remove (dont_inline, method);
9866 if (inline_costs < 0) {
9869 /* Method is too large */
9870 mname = mono_method_full_name (method, TRUE);
9871 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
9872 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
9877 if ((cfg->verbose_level > 2) && (cfg->method == method))
9878 mono_print_code (cfg, "AFTER METHOD-TO-IR");
9880 return inline_costs;
9883 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
9884 g_slist_free (class_inits);
9885 dont_inline = g_list_remove (dont_inline, method);
9889 g_slist_free (class_inits);
9890 dont_inline = g_list_remove (dont_inline, method);
9894 g_slist_free (class_inits);
9895 dont_inline = g_list_remove (dont_inline, method);
9896 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
9900 g_slist_free (class_inits);
9901 dont_inline = g_list_remove (dont_inline, method);
9902 set_exception_type_from_invalid_il (cfg, method, ip);
9907 store_membase_reg_to_store_membase_imm (int opcode)
9910 case OP_STORE_MEMBASE_REG:
9911 return OP_STORE_MEMBASE_IMM;
9912 case OP_STOREI1_MEMBASE_REG:
9913 return OP_STOREI1_MEMBASE_IMM;
9914 case OP_STOREI2_MEMBASE_REG:
9915 return OP_STOREI2_MEMBASE_IMM;
9916 case OP_STOREI4_MEMBASE_REG:
9917 return OP_STOREI4_MEMBASE_IMM;
9918 case OP_STOREI8_MEMBASE_REG:
9919 return OP_STOREI8_MEMBASE_IMM;
9921 g_assert_not_reached ();
9927 #endif /* DISABLE_JIT */
9930 mono_op_to_op_imm (int opcode)
9940 return OP_IDIV_UN_IMM;
9944 return OP_IREM_UN_IMM;
9958 return OP_ISHR_UN_IMM;
9975 return OP_LSHR_UN_IMM;
9978 return OP_COMPARE_IMM;
9980 return OP_ICOMPARE_IMM;
9982 return OP_LCOMPARE_IMM;
9984 case OP_STORE_MEMBASE_REG:
9985 return OP_STORE_MEMBASE_IMM;
9986 case OP_STOREI1_MEMBASE_REG:
9987 return OP_STOREI1_MEMBASE_IMM;
9988 case OP_STOREI2_MEMBASE_REG:
9989 return OP_STOREI2_MEMBASE_IMM;
9990 case OP_STOREI4_MEMBASE_REG:
9991 return OP_STOREI4_MEMBASE_IMM;
9993 #if defined(TARGET_X86) || defined (TARGET_AMD64)
9995 return OP_X86_PUSH_IMM;
9996 case OP_X86_COMPARE_MEMBASE_REG:
9997 return OP_X86_COMPARE_MEMBASE_IMM;
9999 #if defined(TARGET_AMD64)
10000 case OP_AMD64_ICOMPARE_MEMBASE_REG:
10001 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10003 case OP_VOIDCALL_REG:
10004 return OP_VOIDCALL;
10012 return OP_LOCALLOC_IMM;
10019 ldind_to_load_membase (int opcode)
10023 return OP_LOADI1_MEMBASE;
10025 return OP_LOADU1_MEMBASE;
10027 return OP_LOADI2_MEMBASE;
10029 return OP_LOADU2_MEMBASE;
10031 return OP_LOADI4_MEMBASE;
10033 return OP_LOADU4_MEMBASE;
10035 return OP_LOAD_MEMBASE;
10036 case CEE_LDIND_REF:
10037 return OP_LOAD_MEMBASE;
10039 return OP_LOADI8_MEMBASE;
10041 return OP_LOADR4_MEMBASE;
10043 return OP_LOADR8_MEMBASE;
10045 g_assert_not_reached ();
10052 stind_to_store_membase (int opcode)
10056 return OP_STOREI1_MEMBASE_REG;
10058 return OP_STOREI2_MEMBASE_REG;
10060 return OP_STOREI4_MEMBASE_REG;
10062 case CEE_STIND_REF:
10063 return OP_STORE_MEMBASE_REG;
10065 return OP_STOREI8_MEMBASE_REG;
10067 return OP_STORER4_MEMBASE_REG;
10069 return OP_STORER8_MEMBASE_REG;
10071 g_assert_not_reached ();
10078 mono_load_membase_to_load_mem (int opcode)
10080 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
10081 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10083 case OP_LOAD_MEMBASE:
10084 return OP_LOAD_MEM;
10085 case OP_LOADU1_MEMBASE:
10086 return OP_LOADU1_MEM;
10087 case OP_LOADU2_MEMBASE:
10088 return OP_LOADU2_MEM;
10089 case OP_LOADI4_MEMBASE:
10090 return OP_LOADI4_MEM;
10091 case OP_LOADU4_MEMBASE:
10092 return OP_LOADU4_MEM;
10093 #if SIZEOF_REGISTER == 8
10094 case OP_LOADI8_MEMBASE:
10095 return OP_LOADI8_MEM;
10104 op_to_op_dest_membase (int store_opcode, int opcode)
10106 #if defined(TARGET_X86)
10107 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
10112 return OP_X86_ADD_MEMBASE_REG;
10114 return OP_X86_SUB_MEMBASE_REG;
10116 return OP_X86_AND_MEMBASE_REG;
10118 return OP_X86_OR_MEMBASE_REG;
10120 return OP_X86_XOR_MEMBASE_REG;
10123 return OP_X86_ADD_MEMBASE_IMM;
10126 return OP_X86_SUB_MEMBASE_IMM;
10129 return OP_X86_AND_MEMBASE_IMM;
10132 return OP_X86_OR_MEMBASE_IMM;
10135 return OP_X86_XOR_MEMBASE_IMM;
10141 #if defined(TARGET_AMD64)
10142 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
10147 return OP_X86_ADD_MEMBASE_REG;
10149 return OP_X86_SUB_MEMBASE_REG;
10151 return OP_X86_AND_MEMBASE_REG;
10153 return OP_X86_OR_MEMBASE_REG;
10155 return OP_X86_XOR_MEMBASE_REG;
10157 return OP_X86_ADD_MEMBASE_IMM;
10159 return OP_X86_SUB_MEMBASE_IMM;
10161 return OP_X86_AND_MEMBASE_IMM;
10163 return OP_X86_OR_MEMBASE_IMM;
10165 return OP_X86_XOR_MEMBASE_IMM;
10167 return OP_AMD64_ADD_MEMBASE_REG;
10169 return OP_AMD64_SUB_MEMBASE_REG;
10171 return OP_AMD64_AND_MEMBASE_REG;
10173 return OP_AMD64_OR_MEMBASE_REG;
10175 return OP_AMD64_XOR_MEMBASE_REG;
10178 return OP_AMD64_ADD_MEMBASE_IMM;
10181 return OP_AMD64_SUB_MEMBASE_IMM;
10184 return OP_AMD64_AND_MEMBASE_IMM;
10187 return OP_AMD64_OR_MEMBASE_IMM;
10190 return OP_AMD64_XOR_MEMBASE_IMM;
10200 op_to_op_store_membase (int store_opcode, int opcode)
10202 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10205 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10206 return OP_X86_SETEQ_MEMBASE;
10208 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10209 return OP_X86_SETNE_MEMBASE;
10217 op_to_op_src1_membase (int load_opcode, int opcode)
10220 /* FIXME: This has sign extension issues */
10222 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10223 return OP_X86_COMPARE_MEMBASE8_IMM;
10226 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10231 return OP_X86_PUSH_MEMBASE;
10232 case OP_COMPARE_IMM:
10233 case OP_ICOMPARE_IMM:
10234 return OP_X86_COMPARE_MEMBASE_IMM;
10237 return OP_X86_COMPARE_MEMBASE_REG;
10241 #ifdef TARGET_AMD64
10242 /* FIXME: This has sign extension issues */
10244 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10245 return OP_X86_COMPARE_MEMBASE8_IMM;
10250 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10251 return OP_X86_PUSH_MEMBASE;
10253 /* FIXME: This only works for 32 bit immediates
10254 case OP_COMPARE_IMM:
10255 case OP_LCOMPARE_IMM:
10256 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10257 return OP_AMD64_COMPARE_MEMBASE_IMM;
10259 case OP_ICOMPARE_IMM:
10260 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10261 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10265 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10266 return OP_AMD64_COMPARE_MEMBASE_REG;
10269 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10270 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10279 op_to_op_src2_membase (int load_opcode, int opcode)
10282 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10288 return OP_X86_COMPARE_REG_MEMBASE;
10290 return OP_X86_ADD_REG_MEMBASE;
10292 return OP_X86_SUB_REG_MEMBASE;
10294 return OP_X86_AND_REG_MEMBASE;
10296 return OP_X86_OR_REG_MEMBASE;
10298 return OP_X86_XOR_REG_MEMBASE;
10302 #ifdef TARGET_AMD64
10305 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10306 return OP_AMD64_ICOMPARE_REG_MEMBASE;
10310 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10311 return OP_AMD64_COMPARE_REG_MEMBASE;
10314 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10315 return OP_X86_ADD_REG_MEMBASE;
10317 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10318 return OP_X86_SUB_REG_MEMBASE;
10320 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10321 return OP_X86_AND_REG_MEMBASE;
10323 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10324 return OP_X86_OR_REG_MEMBASE;
10326 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10327 return OP_X86_XOR_REG_MEMBASE;
10329 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10330 return OP_AMD64_ADD_REG_MEMBASE;
10332 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10333 return OP_AMD64_SUB_REG_MEMBASE;
10335 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10336 return OP_AMD64_AND_REG_MEMBASE;
10338 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10339 return OP_AMD64_OR_REG_MEMBASE;
10341 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10342 return OP_AMD64_XOR_REG_MEMBASE;
10350 mono_op_to_op_imm_noemul (int opcode)
10353 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10358 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10366 return mono_op_to_op_imm (opcode);
10370 #ifndef DISABLE_JIT
10373 * mono_handle_global_vregs:
10375 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10379 mono_handle_global_vregs (MonoCompile *cfg)
10381 gint32 *vreg_to_bb;
10382 MonoBasicBlock *bb;
10385 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
10387 #ifdef MONO_ARCH_SIMD_INTRINSICS
10388 if (cfg->uses_simd_intrinsics)
10389 mono_simd_simplify_indirection (cfg);
10392 /* Find local vregs used in more than one bb */
10393 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10394 MonoInst *ins = bb->code;
10395 int block_num = bb->block_num;
10397 if (cfg->verbose_level > 2)
10398 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
10401 for (; ins; ins = ins->next) {
10402 const char *spec = INS_INFO (ins->opcode);
10403 int regtype = 0, regindex;
10406 if (G_UNLIKELY (cfg->verbose_level > 2))
10407 mono_print_ins (ins);
10409 g_assert (ins->opcode >= MONO_CEE_LAST);
10411 for (regindex = 0; regindex < 4; regindex ++) {
10414 if (regindex == 0) {
10415 regtype = spec [MONO_INST_DEST];
10416 if (regtype == ' ')
10419 } else if (regindex == 1) {
10420 regtype = spec [MONO_INST_SRC1];
10421 if (regtype == ' ')
10424 } else if (regindex == 2) {
10425 regtype = spec [MONO_INST_SRC2];
10426 if (regtype == ' ')
10429 } else if (regindex == 3) {
10430 regtype = spec [MONO_INST_SRC3];
10431 if (regtype == ' ')
10436 #if SIZEOF_REGISTER == 4
10437 /* In the LLVM case, the long opcodes are not decomposed */
10438 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
10440 * Since some instructions reference the original long vreg,
10441 * and some reference the two component vregs, it is quite hard
10442 * to determine when it needs to be global. So be conservative.
10444 if (!get_vreg_to_inst (cfg, vreg)) {
10445 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10447 if (cfg->verbose_level > 2)
10448 printf ("LONG VREG R%d made global.\n", vreg);
10452 * Make the component vregs volatile since the optimizations can
10453 * get confused otherwise.
10455 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
10456 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
10460 g_assert (vreg != -1);
10462 prev_bb = vreg_to_bb [vreg];
10463 if (prev_bb == 0) {
10464 /* 0 is a valid block num */
10465 vreg_to_bb [vreg] = block_num + 1;
10466 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
10467 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
10470 if (!get_vreg_to_inst (cfg, vreg)) {
10471 if (G_UNLIKELY (cfg->verbose_level > 2))
10472 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
10476 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
10479 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
10482 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
10485 g_assert_not_reached ();
10489 /* Flag as having been used in more than one bb */
10490 vreg_to_bb [vreg] = -1;
10496 /* If a variable is used in only one bblock, convert it into a local vreg */
10497 for (i = 0; i < cfg->num_varinfo; i++) {
10498 MonoInst *var = cfg->varinfo [i];
10499 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
10501 switch (var->type) {
10507 #if SIZEOF_REGISTER == 8
10510 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
10511 /* Enabling this screws up the fp stack on x86 */
10514 /* Arguments are implicitly global */
10515 /* Putting R4 vars into registers doesn't work currently */
10516 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
10518 * Make that the variable's liveness interval doesn't contain a call, since
10519 * that would cause the lvreg to be spilled, making the whole optimization
10522 /* This is too slow for JIT compilation */
10524 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
10526 int def_index, call_index, ins_index;
10527 gboolean spilled = FALSE;
10532 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
10533 const char *spec = INS_INFO (ins->opcode);
10535 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
10536 def_index = ins_index;
10538 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
10539 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
10540 if (call_index > def_index) {
10546 if (MONO_IS_CALL (ins))
10547 call_index = ins_index;
10557 if (G_UNLIKELY (cfg->verbose_level > 2))
10558 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10559 var->flags |= MONO_INST_IS_DEAD;
10560 cfg->vreg_to_inst [var->dreg] = NULL;
10567 * Compress the varinfo and vars tables so the liveness computation is faster and
10568 * takes up less space.
10571 for (i = 0; i < cfg->num_varinfo; ++i) {
10572 MonoInst *var = cfg->varinfo [i];
10573 if (pos < i && cfg->locals_start == i)
10574 cfg->locals_start = pos;
10575 if (!(var->flags & MONO_INST_IS_DEAD)) {
10577 cfg->varinfo [pos] = cfg->varinfo [i];
10578 cfg->varinfo [pos]->inst_c0 = pos;
10579 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10580 cfg->vars [pos].idx = pos;
10581 #if SIZEOF_REGISTER == 4
10582 if (cfg->varinfo [pos]->type == STACK_I8) {
10583 /* Modify the two component vars too */
10586 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10587 var1->inst_c0 = pos;
10588 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10589 var1->inst_c0 = pos;
10596 cfg->num_varinfo = pos;
10597 if (cfg->locals_start > cfg->num_varinfo)
10598 cfg->locals_start = cfg->num_varinfo;
10602 * mono_spill_global_vars:
10604 * Generate spill code for variables which are not allocated to registers,
10605 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10606 * code is generated which could be optimized by the local optimization passes.
10609 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10611 MonoBasicBlock *bb;
10613 int orig_next_vreg;
10614 guint32 *vreg_to_lvreg;
10616 guint32 i, lvregs_len;
10617 gboolean dest_has_lvreg = FALSE;
10618 guint32 stacktypes [128];
10619 MonoInst **live_range_start, **live_range_end;
10620 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
10622 *need_local_opts = FALSE;
10624 memset (spec2, 0, sizeof (spec2));
10626 /* FIXME: Move this function to mini.c */
10627 stacktypes ['i'] = STACK_PTR;
10628 stacktypes ['l'] = STACK_I8;
10629 stacktypes ['f'] = STACK_R8;
10630 #ifdef MONO_ARCH_SIMD_INTRINSICS
10631 stacktypes ['x'] = STACK_VTYPE;
10634 #if SIZEOF_REGISTER == 4
10635 /* Create MonoInsts for longs */
10636 for (i = 0; i < cfg->num_varinfo; i++) {
10637 MonoInst *ins = cfg->varinfo [i];
10639 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10640 switch (ins->type) {
10641 #ifdef MONO_ARCH_SOFT_FLOAT
10647 g_assert (ins->opcode == OP_REGOFFSET);
10649 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10651 tree->opcode = OP_REGOFFSET;
10652 tree->inst_basereg = ins->inst_basereg;
10653 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10655 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10657 tree->opcode = OP_REGOFFSET;
10658 tree->inst_basereg = ins->inst_basereg;
10659 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10669 /* FIXME: widening and truncation */
10672 * As an optimization, when a variable allocated to the stack is first loaded into
10673 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10674 * the variable again.
10676 orig_next_vreg = cfg->next_vreg;
10677 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10678 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10682 * These arrays contain the first and last instructions accessing a given
10684 * Since we emit bblocks in the same order we process them here, and we
10685 * don't split live ranges, these will precisely describe the live range of
10686 * the variable, i.e. the instruction range where a valid value can be found
10687 * in the variables location.
10689 /* FIXME: Only do this if debugging info is requested */
10690 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
10691 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
10692 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10693 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10695 /* Add spill loads/stores */
10696 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10699 if (cfg->verbose_level > 2)
10700 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10702 /* Clear vreg_to_lvreg array */
10703 for (i = 0; i < lvregs_len; i++)
10704 vreg_to_lvreg [lvregs [i]] = 0;
10708 MONO_BB_FOR_EACH_INS (bb, ins) {
10709 const char *spec = INS_INFO (ins->opcode);
10710 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
10711 gboolean store, no_lvreg;
10712 int sregs [MONO_MAX_SRC_REGS];
10714 if (G_UNLIKELY (cfg->verbose_level > 2))
10715 mono_print_ins (ins);
10717 if (ins->opcode == OP_NOP)
10721 * We handle LDADDR here as well, since it can only be decomposed
10722 * when variable addresses are known.
10724 if (ins->opcode == OP_LDADDR) {
10725 MonoInst *var = ins->inst_p0;
10727 if (var->opcode == OP_VTARG_ADDR) {
10728 /* Happens on SPARC/S390 where vtypes are passed by reference */
10729 MonoInst *vtaddr = var->inst_left;
10730 if (vtaddr->opcode == OP_REGVAR) {
10731 ins->opcode = OP_MOVE;
10732 ins->sreg1 = vtaddr->dreg;
10734 else if (var->inst_left->opcode == OP_REGOFFSET) {
10735 ins->opcode = OP_LOAD_MEMBASE;
10736 ins->inst_basereg = vtaddr->inst_basereg;
10737 ins->inst_offset = vtaddr->inst_offset;
10741 g_assert (var->opcode == OP_REGOFFSET);
10743 ins->opcode = OP_ADD_IMM;
10744 ins->sreg1 = var->inst_basereg;
10745 ins->inst_imm = var->inst_offset;
10748 *need_local_opts = TRUE;
10749 spec = INS_INFO (ins->opcode);
10752 if (ins->opcode < MONO_CEE_LAST) {
10753 mono_print_ins (ins);
10754 g_assert_not_reached ();
10758 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10762 if (MONO_IS_STORE_MEMBASE (ins)) {
10763 tmp_reg = ins->dreg;
10764 ins->dreg = ins->sreg2;
10765 ins->sreg2 = tmp_reg;
10768 spec2 [MONO_INST_DEST] = ' ';
10769 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10770 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10771 spec2 [MONO_INST_SRC3] = ' ';
10773 } else if (MONO_IS_STORE_MEMINDEX (ins))
10774 g_assert_not_reached ();
10779 if (G_UNLIKELY (cfg->verbose_level > 2)) {
10780 printf ("\t %.3s %d", spec, ins->dreg);
10781 num_sregs = mono_inst_get_src_registers (ins, sregs);
10782 for (srcindex = 0; srcindex < 3; ++srcindex)
10783 printf (" %d", sregs [srcindex]);
10790 regtype = spec [MONO_INST_DEST];
10791 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
10794 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
10795 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
10796 MonoInst *store_ins;
10798 MonoInst *def_ins = ins;
10799 int dreg = ins->dreg; /* The original vreg */
10801 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
10803 if (var->opcode == OP_REGVAR) {
10804 ins->dreg = var->dreg;
10805 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
10807 * Instead of emitting a load+store, use a _membase opcode.
10809 g_assert (var->opcode == OP_REGOFFSET);
10810 if (ins->opcode == OP_MOVE) {
10814 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
10815 ins->inst_basereg = var->inst_basereg;
10816 ins->inst_offset = var->inst_offset;
10819 spec = INS_INFO (ins->opcode);
10823 g_assert (var->opcode == OP_REGOFFSET);
10825 prev_dreg = ins->dreg;
10827 /* Invalidate any previous lvreg for this vreg */
10828 vreg_to_lvreg [ins->dreg] = 0;
10832 #ifdef MONO_ARCH_SOFT_FLOAT
10833 if (store_opcode == OP_STORER8_MEMBASE_REG) {
10835 store_opcode = OP_STOREI8_MEMBASE_REG;
10839 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
10841 if (regtype == 'l') {
10842 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
10843 mono_bblock_insert_after_ins (bb, ins, store_ins);
10844 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
10845 mono_bblock_insert_after_ins (bb, ins, store_ins);
10846 def_ins = store_ins;
10849 g_assert (store_opcode != OP_STOREV_MEMBASE);
10851 /* Try to fuse the store into the instruction itself */
10852 /* FIXME: Add more instructions */
10853 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
10854 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
10855 ins->inst_imm = ins->inst_c0;
10856 ins->inst_destbasereg = var->inst_basereg;
10857 ins->inst_offset = var->inst_offset;
10858 spec = INS_INFO (ins->opcode);
10859 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
10860 ins->opcode = store_opcode;
10861 ins->inst_destbasereg = var->inst_basereg;
10862 ins->inst_offset = var->inst_offset;
10866 tmp_reg = ins->dreg;
10867 ins->dreg = ins->sreg2;
10868 ins->sreg2 = tmp_reg;
10871 spec2 [MONO_INST_DEST] = ' ';
10872 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10873 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10874 spec2 [MONO_INST_SRC3] = ' ';
10876 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
10877 // FIXME: The backends expect the base reg to be in inst_basereg
10878 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
10880 ins->inst_basereg = var->inst_basereg;
10881 ins->inst_offset = var->inst_offset;
10882 spec = INS_INFO (ins->opcode);
10884 /* printf ("INS: "); mono_print_ins (ins); */
10885 /* Create a store instruction */
10886 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
10888 /* Insert it after the instruction */
10889 mono_bblock_insert_after_ins (bb, ins, store_ins);
10891 def_ins = store_ins;
10894 * We can't assign ins->dreg to var->dreg here, since the
10895 * sregs could use it. So set a flag, and do it after
10898 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
10899 dest_has_lvreg = TRUE;
10904 if (def_ins && !live_range_start [dreg]) {
10905 live_range_start [dreg] = def_ins;
10906 live_range_start_bb [dreg] = bb;
10913 num_sregs = mono_inst_get_src_registers (ins, sregs);
10914 for (srcindex = 0; srcindex < 3; ++srcindex) {
10915 regtype = spec [MONO_INST_SRC1 + srcindex];
10916 sreg = sregs [srcindex];
10918 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
10919 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
10920 MonoInst *var = get_vreg_to_inst (cfg, sreg);
10921 MonoInst *use_ins = ins;
10922 MonoInst *load_ins;
10923 guint32 load_opcode;
10925 if (var->opcode == OP_REGVAR) {
10926 sregs [srcindex] = var->dreg;
10927 //mono_inst_set_src_registers (ins, sregs);
10928 live_range_end [sreg] = use_ins;
10929 live_range_end_bb [sreg] = bb;
10933 g_assert (var->opcode == OP_REGOFFSET);
10935 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
10937 g_assert (load_opcode != OP_LOADV_MEMBASE);
10939 if (vreg_to_lvreg [sreg]) {
10940 g_assert (vreg_to_lvreg [sreg] != -1);
10942 /* The variable is already loaded to an lvreg */
10943 if (G_UNLIKELY (cfg->verbose_level > 2))
10944 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
10945 sregs [srcindex] = vreg_to_lvreg [sreg];
10946 //mono_inst_set_src_registers (ins, sregs);
10950 /* Try to fuse the load into the instruction */
10951 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
10952 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
10953 sregs [0] = var->inst_basereg;
10954 //mono_inst_set_src_registers (ins, sregs);
10955 ins->inst_offset = var->inst_offset;
10956 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
10957 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
10958 sregs [1] = var->inst_basereg;
10959 //mono_inst_set_src_registers (ins, sregs);
10960 ins->inst_offset = var->inst_offset;
10962 if (MONO_IS_REAL_MOVE (ins)) {
10963 ins->opcode = OP_NOP;
10966 //printf ("%d ", srcindex); mono_print_ins (ins);
10968 sreg = alloc_dreg (cfg, stacktypes [regtype]);
10970 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
10971 if (var->dreg == prev_dreg) {
10973 * sreg refers to the value loaded by the load
10974 * emitted below, but we need to use ins->dreg
10975 * since it refers to the store emitted earlier.
10979 g_assert (sreg != -1);
10980 vreg_to_lvreg [var->dreg] = sreg;
10981 g_assert (lvregs_len < 1024);
10982 lvregs [lvregs_len ++] = var->dreg;
10986 sregs [srcindex] = sreg;
10987 //mono_inst_set_src_registers (ins, sregs);
10989 if (regtype == 'l') {
10990 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
10991 mono_bblock_insert_before_ins (bb, ins, load_ins);
10992 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
10993 mono_bblock_insert_before_ins (bb, ins, load_ins);
10994 use_ins = load_ins;
10997 #if SIZEOF_REGISTER == 4
10998 g_assert (load_opcode != OP_LOADI8_MEMBASE);
11000 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
11001 mono_bblock_insert_before_ins (bb, ins, load_ins);
11002 use_ins = load_ins;
11006 if (var->dreg < orig_next_vreg) {
11007 live_range_end [var->dreg] = use_ins;
11008 live_range_end_bb [var->dreg] = bb;
11012 mono_inst_set_src_registers (ins, sregs);
11014 if (dest_has_lvreg) {
11015 g_assert (ins->dreg != -1);
11016 vreg_to_lvreg [prev_dreg] = ins->dreg;
11017 g_assert (lvregs_len < 1024);
11018 lvregs [lvregs_len ++] = prev_dreg;
11019 dest_has_lvreg = FALSE;
11023 tmp_reg = ins->dreg;
11024 ins->dreg = ins->sreg2;
11025 ins->sreg2 = tmp_reg;
11028 if (MONO_IS_CALL (ins)) {
11029 /* Clear vreg_to_lvreg array */
11030 for (i = 0; i < lvregs_len; i++)
11031 vreg_to_lvreg [lvregs [i]] = 0;
11033 } else if (ins->opcode == OP_NOP) {
11035 MONO_INST_NULLIFY_SREGS (ins);
11038 if (cfg->verbose_level > 2)
11039 mono_print_ins_index (1, ins);
11043 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
11045 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
11046 * by storing the current native offset into MonoMethodVar->live_range_start/end.
11048 for (i = 0; i < cfg->num_varinfo; ++i) {
11049 int vreg = MONO_VARINFO (cfg, i)->vreg;
11052 if (live_range_start [vreg]) {
11053 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
11055 ins->inst_c1 = vreg;
11056 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
11058 if (live_range_end [vreg]) {
11059 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
11061 ins->inst_c1 = vreg;
11062 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
11067 g_free (live_range_start);
11068 g_free (live_range_end);
11069 g_free (live_range_start_bb);
11070 g_free (live_range_end_bb);
11075 * - use 'iadd' instead of 'int_add'
11076 * - handling ovf opcodes: decompose in method_to_ir.
11077 * - unify iregs/fregs
11078 * -> partly done, the missing parts are:
11079 * - a more complete unification would involve unifying the hregs as well, so
11080 * code wouldn't need if (fp) all over the place. but that would mean the hregs
11081 * would no longer map to the machine hregs, so the code generators would need to
11082 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
11083 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
11084 * fp/non-fp branches speeds it up by about 15%.
11085 * - use sext/zext opcodes instead of shifts
11087 * - get rid of TEMPLOADs if possible and use vregs instead
11088 * - clean up usage of OP_P/OP_ opcodes
11089 * - cleanup usage of DUMMY_USE
11090 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
11092 * - set the stack type and allocate a dreg in the EMIT_NEW macros
11093 * - get rid of all the <foo>2 stuff when the new JIT is ready.
11094 * - make sure handle_stack_args () is called before the branch is emitted
11095 * - when the new IR is done, get rid of all unused stuff
11096 * - COMPARE/BEQ as separate instructions or unify them ?
11097 * - keeping them separate allows specialized compare instructions like
11098 * compare_imm, compare_membase
11099 * - most back ends unify fp compare+branch, fp compare+ceq
11100 * - integrate mono_save_args into inline_method
11101 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
11102 * - handle long shift opts on 32 bit platforms somehow: they require
11103 * 3 sregs (2 for arg1 and 1 for arg2)
11104 * - make byref a 'normal' type.
11105 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
11106 * variable if needed.
11107 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
11108 * like inline_method.
11109 * - remove inlining restrictions
11110 * - fix LNEG and enable cfold of INEG
11111 * - generalize x86 optimizations like ldelema as a peephole optimization
11112 * - add store_mem_imm for amd64
11113 * - optimize the loading of the interruption flag in the managed->native wrappers
11114 * - avoid special handling of OP_NOP in passes
11115 * - move code inserting instructions into one function/macro.
11116 * - try a coalescing phase after liveness analysis
11117 * - add float -> vreg conversion + local optimizations on !x86
11118 * - figure out how to handle decomposed branches during optimizations, ie.
11119 * compare+branch, op_jump_table+op_br etc.
11120 * - promote RuntimeXHandles to vregs
11121 * - vtype cleanups:
11122 * - add a NEW_VARLOADA_VREG macro
11123 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11124 * accessing vtype fields.
11125 * - get rid of I8CONST on 64 bit platforms
11126 * - dealing with the increase in code size due to branches created during opcode
11128 * - use extended basic blocks
11129 * - all parts of the JIT
11130 * - handle_global_vregs () && local regalloc
11131 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
11132 * - sources of increase in code size:
11135 * - isinst and castclass
11136 * - lvregs not allocated to global registers even if used multiple times
11137 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
11139 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
11140 * - add all micro optimizations from the old JIT
11141 * - put tree optimizations into the deadce pass
11142 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
11143 * specific function.
11144 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
11145 * fcompare + branchCC.
11146 * - create a helper function for allocating a stack slot, taking into account
11147 * MONO_CFG_HAS_SPILLUP.
11149 * - merge the ia64 switch changes.
11150 * - optimize mono_regstate2_alloc_int/float.
11151 * - fix the pessimistic handling of variables accessed in exception handler blocks.
11152 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
11153 * parts of the tree could be separated by other instructions, killing the tree
11154 * arguments, or stores killing loads etc. Also, should we fold loads into other
11155 * instructions if the result of the load is used multiple times ?
11156 * - make the REM_IMM optimization in mini-x86.c arch-independent.
11157 * - LAST MERGE: 108395.
11158 * - when returning vtypes in registers, generate IR and append it to the end of the
11159 * last bb instead of doing it in the epilog.
11160 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
11168 - When to decompose opcodes:
11169 - earlier: this makes some optimizations hard to implement, since the low level IR
11170 no longer contains the neccessary information. But it is easier to do.
11171 - later: harder to implement, enables more optimizations.
11172 - Branches inside bblocks:
11173 - created when decomposing complex opcodes.
11174 - branches to another bblock: harmless, but not tracked by the branch
11175 optimizations, so need to branch to a label at the start of the bblock.
11176 - branches to inside the same bblock: very problematic, trips up the local
11177 reg allocator. Can be fixed by spitting the current bblock, but that is a
11178 complex operation, since some local vregs can become global vregs etc.
11179 - Local/global vregs:
11180 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11181 local register allocator.
11182 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11183 structure, created by mono_create_var (). Assigned to hregs or the stack by
11184 the global register allocator.
11185 - When to do optimizations like alu->alu_imm:
11186 - earlier -> saves work later on since the IR will be smaller/simpler
11187 - later -> can work on more instructions
11188 - Handling of valuetypes:
11189 - When a vtype is pushed on the stack, a new temporary is created, an
11190 instruction computing its address (LDADDR) is emitted and pushed on
11191 the stack. Need to optimize cases when the vtype is used immediately as in
11192 argument passing, stloc etc.
11193 - Instead of the to_end stuff in the old JIT, simply call the function handling
11194 the values on the stack before emitting the last instruction of the bb.
11197 #endif /* DISABLE_JIT */