2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
30 #ifdef HAVE_VALGRIND_MEMCHECK_H
31 #include <valgrind/memcheck.h>
34 #include <mono/metadata/assembly.h>
35 #include <mono/metadata/loader.h>
36 #include <mono/metadata/tabledefs.h>
37 #include <mono/metadata/class.h>
38 #include <mono/metadata/object.h>
39 #include <mono/metadata/exception.h>
40 #include <mono/metadata/opcodes.h>
41 #include <mono/metadata/mono-endian.h>
42 #include <mono/metadata/tokentype.h>
43 #include <mono/metadata/tabledefs.h>
44 #include <mono/metadata/marshal.h>
45 #include <mono/metadata/debug-helpers.h>
46 #include <mono/metadata/mono-debug.h>
47 #include <mono/metadata/gc-internal.h>
48 #include <mono/metadata/security-manager.h>
49 #include <mono/metadata/threads-types.h>
50 #include <mono/metadata/security-core-clr.h>
51 #include <mono/metadata/monitor.h>
52 #include <mono/utils/mono-compiler.h>
59 #include "jit-icalls.h"
61 #define BRANCH_COST 100
62 #define INLINE_LENGTH_LIMIT 20
63 #define INLINE_FAILURE do {\
64 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
67 #define CHECK_CFG_EXCEPTION do {\
68 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
71 #define METHOD_ACCESS_FAILURE do { \
72 char *method_fname = mono_method_full_name (method, TRUE); \
73 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
74 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
75 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
76 g_free (method_fname); \
77 g_free (cil_method_fname); \
78 goto exception_exit; \
80 #define FIELD_ACCESS_FAILURE do { \
81 char *method_fname = mono_method_full_name (method, TRUE); \
82 char *field_fname = mono_field_full_name (field); \
83 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
84 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
85 g_free (method_fname); \
86 g_free (field_fname); \
87 goto exception_exit; \
89 #define GENERIC_SHARING_FAILURE(opcode) do { \
90 if (cfg->generic_sharing_context) { \
91 if (cfg->verbose_level > 2) \
92 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
93 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
94 goto exception_exit; \
98 /* Determine whenever 'ins' represents a load of the 'this' argument */
99 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
101 static int ldind_to_load_membase (int opcode);
102 static int stind_to_store_membase (int opcode);
104 int mono_op_to_op_imm (int opcode);
105 int mono_op_to_op_imm_noemul (int opcode);
107 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
108 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
109 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
111 /* helper methods signature */
112 extern MonoMethodSignature *helper_sig_class_init_trampoline;
113 extern MonoMethodSignature *helper_sig_domain_get;
114 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
115 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
116 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
119 * Instruction metadata
127 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
128 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
134 #if SIZEOF_REGISTER == 8
139 /* keep in sync with the enum in mini.h */
142 #include "mini-ops.h"
147 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
148 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
150 * This should contain the index of the last sreg + 1. This is not the same
151 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
153 const gint8 ins_sreg_counts[] = {
154 #include "mini-ops.h"
159 extern GHashTable *jit_icall_name_hash;
161 #define MONO_INIT_VARINFO(vi,id) do { \
162 (vi)->range.first_use.pos.bid = 0xffff; \
168 mono_inst_set_src_registers (MonoInst *ins, int *regs)
170 ins->sreg1 = regs [0];
171 ins->sreg2 = regs [1];
172 ins->sreg3 = regs [2];
176 mono_alloc_ireg (MonoCompile *cfg)
178 return alloc_ireg (cfg);
182 mono_alloc_freg (MonoCompile *cfg)
184 return alloc_freg (cfg);
188 mono_alloc_preg (MonoCompile *cfg)
190 return alloc_preg (cfg);
194 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
196 return alloc_dreg (cfg, stack_type);
200 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
206 switch (type->type) {
209 case MONO_TYPE_BOOLEAN:
221 case MONO_TYPE_FNPTR:
223 case MONO_TYPE_CLASS:
224 case MONO_TYPE_STRING:
225 case MONO_TYPE_OBJECT:
226 case MONO_TYPE_SZARRAY:
227 case MONO_TYPE_ARRAY:
231 #if SIZEOF_REGISTER == 8
240 case MONO_TYPE_VALUETYPE:
241 if (type->data.klass->enumtype) {
242 type = mono_class_enum_basetype (type->data.klass);
245 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
248 case MONO_TYPE_TYPEDBYREF:
250 case MONO_TYPE_GENERICINST:
251 type = &type->data.generic_class->container_class->byval_arg;
255 g_assert (cfg->generic_sharing_context);
258 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
264 mono_print_bb (MonoBasicBlock *bb, const char *msg)
269 printf ("\n%s %d: [IN: ", msg, bb->block_num);
270 for (i = 0; i < bb->in_count; ++i)
271 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
273 for (i = 0; i < bb->out_count; ++i)
274 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
276 for (tree = bb->code; tree; tree = tree->next)
277 mono_print_ins_index (-1, tree);
281 * Can't put this at the beginning, since other files reference stuff from this
286 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
288 #define GET_BBLOCK(cfg,tblock,ip) do { \
289 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
291 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
292 NEW_BBLOCK (cfg, (tblock)); \
293 (tblock)->cil_code = (ip); \
294 ADD_BBLOCK (cfg, (tblock)); \
298 #if defined(TARGET_X86) || defined(TARGET_AMD64)
299 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
300 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
301 (dest)->dreg = alloc_preg ((cfg)); \
302 (dest)->sreg1 = (sr1); \
303 (dest)->sreg2 = (sr2); \
304 (dest)->inst_imm = (imm); \
305 (dest)->backend.shift_amount = (shift); \
306 MONO_ADD_INS ((cfg)->cbb, (dest)); \
310 #if SIZEOF_REGISTER == 8
311 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
312 /* FIXME: Need to add many more cases */ \
313 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
315 int dr = alloc_preg (cfg); \
316 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
317 (ins)->sreg2 = widen->dreg; \
321 #define ADD_WIDEN_OP(ins, arg1, arg2)
324 #define ADD_BINOP(op) do { \
325 MONO_INST_NEW (cfg, ins, (op)); \
327 ins->sreg1 = sp [0]->dreg; \
328 ins->sreg2 = sp [1]->dreg; \
329 type_from_op (ins, sp [0], sp [1]); \
331 /* Have to insert a widening op */ \
332 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
333 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
334 MONO_ADD_INS ((cfg)->cbb, (ins)); \
335 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
338 #define ADD_UNOP(op) do { \
339 MONO_INST_NEW (cfg, ins, (op)); \
341 ins->sreg1 = sp [0]->dreg; \
342 type_from_op (ins, sp [0], NULL); \
344 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
345 MONO_ADD_INS ((cfg)->cbb, (ins)); \
346 *sp++ = mono_decompose_opcode (cfg, ins); \
349 #define ADD_BINCOND(next_block) do { \
352 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
353 cmp->sreg1 = sp [0]->dreg; \
354 cmp->sreg2 = sp [1]->dreg; \
355 type_from_op (cmp, sp [0], sp [1]); \
357 type_from_op (ins, sp [0], sp [1]); \
358 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
359 GET_BBLOCK (cfg, tblock, target); \
360 link_bblock (cfg, bblock, tblock); \
361 ins->inst_true_bb = tblock; \
362 if ((next_block)) { \
363 link_bblock (cfg, bblock, (next_block)); \
364 ins->inst_false_bb = (next_block); \
365 start_new_bblock = 1; \
367 GET_BBLOCK (cfg, tblock, ip); \
368 link_bblock (cfg, bblock, tblock); \
369 ins->inst_false_bb = tblock; \
370 start_new_bblock = 2; \
372 if (sp != stack_start) { \
373 handle_stack_args (cfg, stack_start, sp - stack_start); \
374 CHECK_UNVERIFIABLE (cfg); \
376 MONO_ADD_INS (bblock, cmp); \
377 MONO_ADD_INS (bblock, ins); \
381 * link_bblock: Links two basic blocks
383 * links two basic blocks in the control flow graph, the 'from'
384 * argument is the starting block and the 'to' argument is the block
385 * the control flow ends to after 'from'.
388 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
390 MonoBasicBlock **newa;
394 if (from->cil_code) {
396 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
398 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
401 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
403 printf ("edge from entry to exit\n");
408 for (i = 0; i < from->out_count; ++i) {
409 if (to == from->out_bb [i]) {
415 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
416 for (i = 0; i < from->out_count; ++i) {
417 newa [i] = from->out_bb [i];
425 for (i = 0; i < to->in_count; ++i) {
426 if (from == to->in_bb [i]) {
432 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
433 for (i = 0; i < to->in_count; ++i) {
434 newa [i] = to->in_bb [i];
443 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
445 link_bblock (cfg, from, to);
449 * mono_find_block_region:
451 * We mark each basic block with a region ID. We use that to avoid BB
452 * optimizations when blocks are in different regions.
455 * A region token that encodes where this region is, and information
456 * about the clause owner for this block.
458 * The region encodes the try/catch/filter clause that owns this block
459 * as well as the type. -1 is a special value that represents a block
460 * that is in none of try/catch/filter.
463 mono_find_block_region (MonoCompile *cfg, int offset)
465 MonoMethod *method = cfg->method;
466 MonoMethodHeader *header = mono_method_get_header (method);
467 MonoExceptionClause *clause;
470 for (i = 0; i < header->num_clauses; ++i) {
471 clause = &header->clauses [i];
472 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
473 (offset < (clause->handler_offset)))
474 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
476 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
477 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
478 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
479 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
480 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
482 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
485 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
486 return ((i + 1) << 8) | clause->flags;
493 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
495 MonoMethod *method = cfg->method;
496 MonoMethodHeader *header = mono_method_get_header (method);
497 MonoExceptionClause *clause;
498 MonoBasicBlock *handler;
502 for (i = 0; i < header->num_clauses; ++i) {
503 clause = &header->clauses [i];
504 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
505 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
506 if (clause->flags == type) {
507 handler = cfg->cil_offset_to_bb [clause->handler_offset];
509 res = g_list_append (res, handler);
517 mono_create_spvar_for_region (MonoCompile *cfg, int region)
521 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
525 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
526 /* prevent it from being register allocated */
527 var->flags |= MONO_INST_INDIRECT;
529 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
533 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
535 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
539 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
543 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
547 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
548 /* prevent it from being register allocated */
549 var->flags |= MONO_INST_INDIRECT;
551 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
557 * Returns the type used in the eval stack when @type is loaded.
558 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
561 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
565 inst->klass = klass = mono_class_from_mono_type (type);
567 inst->type = STACK_MP;
572 switch (type->type) {
574 inst->type = STACK_INV;
578 case MONO_TYPE_BOOLEAN:
584 inst->type = STACK_I4;
589 case MONO_TYPE_FNPTR:
590 inst->type = STACK_PTR;
592 case MONO_TYPE_CLASS:
593 case MONO_TYPE_STRING:
594 case MONO_TYPE_OBJECT:
595 case MONO_TYPE_SZARRAY:
596 case MONO_TYPE_ARRAY:
597 inst->type = STACK_OBJ;
601 inst->type = STACK_I8;
605 inst->type = STACK_R8;
607 case MONO_TYPE_VALUETYPE:
608 if (type->data.klass->enumtype) {
609 type = mono_class_enum_basetype (type->data.klass);
613 inst->type = STACK_VTYPE;
616 case MONO_TYPE_TYPEDBYREF:
617 inst->klass = mono_defaults.typed_reference_class;
618 inst->type = STACK_VTYPE;
620 case MONO_TYPE_GENERICINST:
621 type = &type->data.generic_class->container_class->byval_arg;
624 case MONO_TYPE_MVAR :
625 /* FIXME: all the arguments must be references for now,
626 * later look inside cfg and see if the arg num is
629 g_assert (cfg->generic_sharing_context);
630 inst->type = STACK_OBJ;
633 g_error ("unknown type 0x%02x in eval stack type", type->type);
638 * The following tables are used to quickly validate the IL code in type_from_op ().
641 bin_num_table [STACK_MAX] [STACK_MAX] = {
642 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
643 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
644 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
645 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
646 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
647 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
648 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
649 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
654 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
657 /* reduce the size of this table */
659 bin_int_table [STACK_MAX] [STACK_MAX] = {
660 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
661 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
662 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
663 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
664 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
665 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
666 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
667 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
671 bin_comp_table [STACK_MAX] [STACK_MAX] = {
672 /* Inv i L p F & O vt */
674 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
675 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
676 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
677 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
678 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
679 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
680 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
683 /* reduce the size of this table */
685 shift_table [STACK_MAX] [STACK_MAX] = {
686 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
687 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
688 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
689 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
690 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
691 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
692 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
693 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
697 * Tables to map from the non-specific opcode to the matching
698 * type-specific opcode.
700 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
702 binops_op_map [STACK_MAX] = {
703 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
706 /* handles from CEE_NEG to CEE_CONV_U8 */
708 unops_op_map [STACK_MAX] = {
709 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
712 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
714 ovfops_op_map [STACK_MAX] = {
715 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
718 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
720 ovf2ops_op_map [STACK_MAX] = {
721 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
724 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
726 ovf3ops_op_map [STACK_MAX] = {
727 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
730 /* handles from CEE_BEQ to CEE_BLT_UN */
732 beqops_op_map [STACK_MAX] = {
733 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
736 /* handles from CEE_CEQ to CEE_CLT_UN */
738 ceqops_op_map [STACK_MAX] = {
739 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
743 * Sets ins->type (the type on the eval stack) according to the
744 * type of the opcode and the arguments to it.
745 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
747 * FIXME: this function sets ins->type unconditionally in some cases, but
748 * it should set it to invalid for some types (a conv.x on an object)
751 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
753 switch (ins->opcode) {
760 /* FIXME: check unverifiable args for STACK_MP */
761 ins->type = bin_num_table [src1->type] [src2->type];
762 ins->opcode += binops_op_map [ins->type];
769 ins->type = bin_int_table [src1->type] [src2->type];
770 ins->opcode += binops_op_map [ins->type];
775 ins->type = shift_table [src1->type] [src2->type];
776 ins->opcode += binops_op_map [ins->type];
781 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
782 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
783 ins->opcode = OP_LCOMPARE;
784 else if (src1->type == STACK_R8)
785 ins->opcode = OP_FCOMPARE;
787 ins->opcode = OP_ICOMPARE;
789 case OP_ICOMPARE_IMM:
790 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
791 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
792 ins->opcode = OP_LCOMPARE_IMM;
804 ins->opcode += beqops_op_map [src1->type];
807 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
808 ins->opcode += ceqops_op_map [src1->type];
814 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
815 ins->opcode += ceqops_op_map [src1->type];
819 ins->type = neg_table [src1->type];
820 ins->opcode += unops_op_map [ins->type];
823 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
824 ins->type = src1->type;
826 ins->type = STACK_INV;
827 ins->opcode += unops_op_map [ins->type];
833 ins->type = STACK_I4;
834 ins->opcode += unops_op_map [src1->type];
837 ins->type = STACK_R8;
838 switch (src1->type) {
841 ins->opcode = OP_ICONV_TO_R_UN;
844 ins->opcode = OP_LCONV_TO_R_UN;
848 case CEE_CONV_OVF_I1:
849 case CEE_CONV_OVF_U1:
850 case CEE_CONV_OVF_I2:
851 case CEE_CONV_OVF_U2:
852 case CEE_CONV_OVF_I4:
853 case CEE_CONV_OVF_U4:
854 ins->type = STACK_I4;
855 ins->opcode += ovf3ops_op_map [src1->type];
857 case CEE_CONV_OVF_I_UN:
858 case CEE_CONV_OVF_U_UN:
859 ins->type = STACK_PTR;
860 ins->opcode += ovf2ops_op_map [src1->type];
862 case CEE_CONV_OVF_I1_UN:
863 case CEE_CONV_OVF_I2_UN:
864 case CEE_CONV_OVF_I4_UN:
865 case CEE_CONV_OVF_U1_UN:
866 case CEE_CONV_OVF_U2_UN:
867 case CEE_CONV_OVF_U4_UN:
868 ins->type = STACK_I4;
869 ins->opcode += ovf2ops_op_map [src1->type];
872 ins->type = STACK_PTR;
873 switch (src1->type) {
875 ins->opcode = OP_ICONV_TO_U;
879 #if SIZEOF_REGISTER == 8
880 ins->opcode = OP_LCONV_TO_U;
882 ins->opcode = OP_MOVE;
886 ins->opcode = OP_LCONV_TO_U;
889 ins->opcode = OP_FCONV_TO_U;
895 ins->type = STACK_I8;
896 ins->opcode += unops_op_map [src1->type];
898 case CEE_CONV_OVF_I8:
899 case CEE_CONV_OVF_U8:
900 ins->type = STACK_I8;
901 ins->opcode += ovf3ops_op_map [src1->type];
903 case CEE_CONV_OVF_U8_UN:
904 case CEE_CONV_OVF_I8_UN:
905 ins->type = STACK_I8;
906 ins->opcode += ovf2ops_op_map [src1->type];
910 ins->type = STACK_R8;
911 ins->opcode += unops_op_map [src1->type];
914 ins->type = STACK_R8;
918 ins->type = STACK_I4;
919 ins->opcode += ovfops_op_map [src1->type];
924 ins->type = STACK_PTR;
925 ins->opcode += ovfops_op_map [src1->type];
933 ins->type = bin_num_table [src1->type] [src2->type];
934 ins->opcode += ovfops_op_map [src1->type];
935 if (ins->type == STACK_R8)
936 ins->type = STACK_INV;
938 case OP_LOAD_MEMBASE:
939 ins->type = STACK_PTR;
941 case OP_LOADI1_MEMBASE:
942 case OP_LOADU1_MEMBASE:
943 case OP_LOADI2_MEMBASE:
944 case OP_LOADU2_MEMBASE:
945 case OP_LOADI4_MEMBASE:
946 case OP_LOADU4_MEMBASE:
947 ins->type = STACK_PTR;
949 case OP_LOADI8_MEMBASE:
950 ins->type = STACK_I8;
952 case OP_LOADR4_MEMBASE:
953 case OP_LOADR8_MEMBASE:
954 ins->type = STACK_R8;
957 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
961 if (ins->type == STACK_MP)
962 ins->klass = mono_defaults.object_class;
967 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
973 param_table [STACK_MAX] [STACK_MAX] = {
978 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
982 switch (args->type) {
992 for (i = 0; i < sig->param_count; ++i) {
993 switch (args [i].type) {
997 if (!sig->params [i]->byref)
1001 if (sig->params [i]->byref)
1003 switch (sig->params [i]->type) {
1004 case MONO_TYPE_CLASS:
1005 case MONO_TYPE_STRING:
1006 case MONO_TYPE_OBJECT:
1007 case MONO_TYPE_SZARRAY:
1008 case MONO_TYPE_ARRAY:
1015 if (sig->params [i]->byref)
1017 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1026 /*if (!param_table [args [i].type] [sig->params [i]->type])
1034 * When we need a pointer to the current domain many times in a method, we
1035 * call mono_domain_get() once and we store the result in a local variable.
1036 * This function returns the variable that represents the MonoDomain*.
1038 inline static MonoInst *
1039 mono_get_domainvar (MonoCompile *cfg)
1041 if (!cfg->domainvar)
1042 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1043 return cfg->domainvar;
1047 * The got_var contains the address of the Global Offset Table when AOT
1050 inline static MonoInst *
1051 mono_get_got_var (MonoCompile *cfg)
1053 #ifdef MONO_ARCH_NEED_GOT_VAR
1054 if (!cfg->compile_aot)
1056 if (!cfg->got_var) {
1057 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1059 return cfg->got_var;
1066 mono_get_vtable_var (MonoCompile *cfg)
1068 g_assert (cfg->generic_sharing_context);
1070 if (!cfg->rgctx_var) {
1071 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1072 /* force the var to be stack allocated */
1073 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1076 return cfg->rgctx_var;
1080 type_from_stack_type (MonoInst *ins) {
1081 switch (ins->type) {
1082 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1083 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1084 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1085 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1087 return &ins->klass->this_arg;
1088 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1089 case STACK_VTYPE: return &ins->klass->byval_arg;
1091 g_error ("stack type %d to monotype not handled\n", ins->type);
1096 static G_GNUC_UNUSED int
1097 type_to_stack_type (MonoType *t)
1099 switch (mono_type_get_underlying_type (t)->type) {
1102 case MONO_TYPE_BOOLEAN:
1105 case MONO_TYPE_CHAR:
1112 case MONO_TYPE_FNPTR:
1114 case MONO_TYPE_CLASS:
1115 case MONO_TYPE_STRING:
1116 case MONO_TYPE_OBJECT:
1117 case MONO_TYPE_SZARRAY:
1118 case MONO_TYPE_ARRAY:
1126 case MONO_TYPE_VALUETYPE:
1127 case MONO_TYPE_TYPEDBYREF:
1129 case MONO_TYPE_GENERICINST:
1130 if (mono_type_generic_inst_is_valuetype (t))
1136 g_assert_not_reached ();
1143 array_access_to_klass (int opcode)
1147 return mono_defaults.byte_class;
1149 return mono_defaults.uint16_class;
1152 return mono_defaults.int_class;
1155 return mono_defaults.sbyte_class;
1158 return mono_defaults.int16_class;
1161 return mono_defaults.int32_class;
1163 return mono_defaults.uint32_class;
1166 return mono_defaults.int64_class;
1169 return mono_defaults.single_class;
1172 return mono_defaults.double_class;
1173 case CEE_LDELEM_REF:
1174 case CEE_STELEM_REF:
1175 return mono_defaults.object_class;
1177 g_assert_not_reached ();
1183 * We try to share variables when possible
1186 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1191 /* inlining can result in deeper stacks */
1192 if (slot >= mono_method_get_header (cfg->method)->max_stack)
1193 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1195 pos = ins->type - 1 + slot * STACK_MAX;
1197 switch (ins->type) {
1204 if ((vnum = cfg->intvars [pos]))
1205 return cfg->varinfo [vnum];
1206 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1207 cfg->intvars [pos] = res->inst_c0;
1210 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1216 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1219 * Don't use this if a generic_context is set, since that means AOT can't
1220 * look up the method using just the image+token.
1221 * table == 0 means this is a reference made from a wrapper.
1223 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1224 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1225 jump_info_token->image = image;
1226 jump_info_token->token = token;
1227 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1232 * This function is called to handle items that are left on the evaluation stack
1233 * at basic block boundaries. What happens is that we save the values to local variables
1234 * and we reload them later when first entering the target basic block (with the
1235 * handle_loaded_temps () function).
1236 * A single joint point will use the same variables (stored in the array bb->out_stack or
1237 * bb->in_stack, if the basic block is before or after the joint point).
1239 * This function needs to be called _before_ emitting the last instruction of
1240 * the bb (i.e. before emitting a branch).
1241 * If the stack merge fails at a join point, cfg->unverifiable is set.
1244 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1247 MonoBasicBlock *bb = cfg->cbb;
1248 MonoBasicBlock *outb;
1249 MonoInst *inst, **locals;
1254 if (cfg->verbose_level > 3)
1255 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1256 if (!bb->out_scount) {
1257 bb->out_scount = count;
1258 //printf ("bblock %d has out:", bb->block_num);
1260 for (i = 0; i < bb->out_count; ++i) {
1261 outb = bb->out_bb [i];
1262 /* exception handlers are linked, but they should not be considered for stack args */
1263 if (outb->flags & BB_EXCEPTION_HANDLER)
1265 //printf (" %d", outb->block_num);
1266 if (outb->in_stack) {
1268 bb->out_stack = outb->in_stack;
1274 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1275 for (i = 0; i < count; ++i) {
1277 * try to reuse temps already allocated for this purpouse, if they occupy the same
1278 * stack slot and if they are of the same type.
1279 * This won't cause conflicts since if 'local' is used to
1280 * store one of the values in the in_stack of a bblock, then
1281 * the same variable will be used for the same outgoing stack
1283 * This doesn't work when inlining methods, since the bblocks
1284 * in the inlined methods do not inherit their in_stack from
1285 * the bblock they are inlined to. See bug #58863 for an
1288 if (cfg->inlined_method)
1289 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1291 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1296 for (i = 0; i < bb->out_count; ++i) {
1297 outb = bb->out_bb [i];
1298 /* exception handlers are linked, but they should not be considered for stack args */
1299 if (outb->flags & BB_EXCEPTION_HANDLER)
1301 if (outb->in_scount) {
1302 if (outb->in_scount != bb->out_scount) {
1303 cfg->unverifiable = TRUE;
1306 continue; /* check they are the same locals */
1308 outb->in_scount = count;
1309 outb->in_stack = bb->out_stack;
1312 locals = bb->out_stack;
1314 for (i = 0; i < count; ++i) {
1315 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1316 inst->cil_code = sp [i]->cil_code;
1317 sp [i] = locals [i];
1318 if (cfg->verbose_level > 3)
1319 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1323 * It is possible that the out bblocks already have in_stack assigned, and
1324 * the in_stacks differ. In this case, we will store to all the different
1331 /* Find a bblock which has a different in_stack */
1333 while (bindex < bb->out_count) {
1334 outb = bb->out_bb [bindex];
1335 /* exception handlers are linked, but they should not be considered for stack args */
1336 if (outb->flags & BB_EXCEPTION_HANDLER) {
1340 if (outb->in_stack != locals) {
1341 for (i = 0; i < count; ++i) {
1342 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1343 inst->cil_code = sp [i]->cil_code;
1344 sp [i] = locals [i];
1345 if (cfg->verbose_level > 3)
1346 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1348 locals = outb->in_stack;
1357 /* Emit code which loads interface_offsets [klass->interface_id]
1358 * The array is stored in memory before vtable.
1361 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1363 if (cfg->compile_aot) {
1364 int ioffset_reg = alloc_preg (cfg);
1365 int iid_reg = alloc_preg (cfg);
1367 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1368 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1369 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1372 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1377 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1378 * stored in "klass_reg" implements the interface "klass".
1381 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1383 int ibitmap_reg = alloc_preg (cfg);
1384 int ibitmap_byte_reg = alloc_preg (cfg);
1386 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap));
1388 if (cfg->compile_aot) {
1389 int iid_reg = alloc_preg (cfg);
1390 int shifted_iid_reg = alloc_preg (cfg);
1391 int ibitmap_byte_address_reg = alloc_preg (cfg);
1392 int masked_iid_reg = alloc_preg (cfg);
1393 int iid_one_bit_reg = alloc_preg (cfg);
1394 int iid_bit_reg = alloc_preg (cfg);
1395 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1396 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1397 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1398 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1399 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1400 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1401 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1402 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1404 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1405 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1410 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1411 * stored in "vtable_reg" implements the interface "klass".
1414 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1416 int ibitmap_reg = alloc_preg (cfg);
1417 int ibitmap_byte_reg = alloc_preg (cfg);
1419 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap));
1421 if (cfg->compile_aot) {
1422 int iid_reg = alloc_preg (cfg);
1423 int shifted_iid_reg = alloc_preg (cfg);
1424 int ibitmap_byte_address_reg = alloc_preg (cfg);
1425 int masked_iid_reg = alloc_preg (cfg);
1426 int iid_one_bit_reg = alloc_preg (cfg);
1427 int iid_bit_reg = alloc_preg (cfg);
1428 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1429 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, shifted_iid_reg, iid_reg, 3);
1430 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1431 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1432 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, masked_iid_reg, iid_reg, 7);
1433 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1434 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1435 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1437 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1438 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1443 * Emit code which checks whenever the interface id of @klass is smaller than
1444 * than the value given by max_iid_reg.
1447 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1448 MonoBasicBlock *false_target)
1450 if (cfg->compile_aot) {
1451 int iid_reg = alloc_preg (cfg);
1452 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1453 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1456 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1458 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1460 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1463 /* Same as above, but obtains max_iid from a vtable */
1465 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1466 MonoBasicBlock *false_target)
1468 int max_iid_reg = alloc_preg (cfg);
1470 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1471 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1474 /* Same as above, but obtains max_iid from a klass */
1476 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1477 MonoBasicBlock *false_target)
1479 int max_iid_reg = alloc_preg (cfg);
1481 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1482 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1486 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1488 int idepth_reg = alloc_preg (cfg);
1489 int stypes_reg = alloc_preg (cfg);
1490 int stype = alloc_preg (cfg);
1492 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1493 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1494 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1495 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1497 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1498 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1499 if (cfg->compile_aot) {
1500 int const_reg = alloc_preg (cfg);
1501 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1502 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1504 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1506 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1510 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1512 int intf_reg = alloc_preg (cfg);
1514 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1515 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1516 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1518 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1520 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1524 * Variant of the above that takes a register to the class, not the vtable.
1527 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1529 int intf_bit_reg = alloc_preg (cfg);
1531 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1532 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1533 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1535 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1537 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1541 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1543 if (cfg->compile_aot) {
1544 int const_reg = alloc_preg (cfg);
1545 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1546 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1548 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1550 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1554 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1556 if (cfg->compile_aot) {
1557 int const_reg = alloc_preg (cfg);
1558 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1559 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1561 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1563 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1567 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1570 int rank_reg = alloc_preg (cfg);
1571 int eclass_reg = alloc_preg (cfg);
1573 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1574 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1575 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1576 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1577 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1578 if (klass->cast_class == mono_defaults.object_class) {
1579 int parent_reg = alloc_preg (cfg);
1580 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1581 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1582 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1583 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1584 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1585 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1586 } else if (klass->cast_class == mono_defaults.enum_class) {
1587 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1588 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1589 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1591 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1592 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1595 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1596 /* Check that the object is a vector too */
1597 int bounds_reg = alloc_preg (cfg);
1598 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1599 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1600 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1603 int idepth_reg = alloc_preg (cfg);
1604 int stypes_reg = alloc_preg (cfg);
1605 int stype = alloc_preg (cfg);
1607 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1608 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1609 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1610 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1612 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1613 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1614 mini_emit_class_check (cfg, stype, klass);
1619 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1623 g_assert (val == 0);
1628 if ((size <= 4) && (size <= align)) {
1631 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1634 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1637 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1639 #if SIZEOF_REGISTER == 8
1641 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1647 val_reg = alloc_preg (cfg);
1649 if (SIZEOF_REGISTER == 8)
1650 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1652 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1655 /* This could be optimized further if neccesary */
1657 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1664 #if !NO_UNALIGNED_ACCESS
1665 if (SIZEOF_REGISTER == 8) {
1667 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1672 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1680 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1685 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1690 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1696 #endif /* DISABLE_JIT */
1699 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1707 /* This could be optimized further if neccesary */
1709 cur_reg = alloc_preg (cfg);
1710 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1711 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1718 #if !NO_UNALIGNED_ACCESS
1719 if (SIZEOF_REGISTER == 8) {
1721 cur_reg = alloc_preg (cfg);
1722 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1723 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1732 cur_reg = alloc_preg (cfg);
1733 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1734 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1740 cur_reg = alloc_preg (cfg);
1741 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1742 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1748 cur_reg = alloc_preg (cfg);
1749 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1750 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1760 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1763 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1766 type = mini_get_basic_type_from_generic (gsctx, type);
1767 switch (type->type) {
1768 case MONO_TYPE_VOID:
1769 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1772 case MONO_TYPE_BOOLEAN:
1775 case MONO_TYPE_CHAR:
1778 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1782 case MONO_TYPE_FNPTR:
1783 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1784 case MONO_TYPE_CLASS:
1785 case MONO_TYPE_STRING:
1786 case MONO_TYPE_OBJECT:
1787 case MONO_TYPE_SZARRAY:
1788 case MONO_TYPE_ARRAY:
1789 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1792 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1795 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1796 case MONO_TYPE_VALUETYPE:
1797 if (type->data.klass->enumtype) {
1798 type = mono_class_enum_basetype (type->data.klass);
1801 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1802 case MONO_TYPE_TYPEDBYREF:
1803 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1804 case MONO_TYPE_GENERICINST:
1805 type = &type->data.generic_class->container_class->byval_arg;
1808 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1814 * target_type_is_incompatible:
1815 * @cfg: MonoCompile context
1817 * Check that the item @arg on the evaluation stack can be stored
1818 * in the target type (can be a local, or field, etc).
1819 * The cfg arg can be used to check if we need verification or just
1822 * Returns: non-0 value if arg can't be stored on a target.
1825 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1827 MonoType *simple_type;
1830 if (target->byref) {
1831 /* FIXME: check that the pointed to types match */
1832 if (arg->type == STACK_MP)
1833 return arg->klass != mono_class_from_mono_type (target);
1834 if (arg->type == STACK_PTR)
1839 simple_type = mono_type_get_underlying_type (target);
1840 switch (simple_type->type) {
1841 case MONO_TYPE_VOID:
1845 case MONO_TYPE_BOOLEAN:
1848 case MONO_TYPE_CHAR:
1851 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1855 /* STACK_MP is needed when setting pinned locals */
1856 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1861 case MONO_TYPE_FNPTR:
1862 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1865 case MONO_TYPE_CLASS:
1866 case MONO_TYPE_STRING:
1867 case MONO_TYPE_OBJECT:
1868 case MONO_TYPE_SZARRAY:
1869 case MONO_TYPE_ARRAY:
1870 if (arg->type != STACK_OBJ)
1872 /* FIXME: check type compatibility */
1876 if (arg->type != STACK_I8)
1881 if (arg->type != STACK_R8)
1884 case MONO_TYPE_VALUETYPE:
1885 if (arg->type != STACK_VTYPE)
1887 klass = mono_class_from_mono_type (simple_type);
1888 if (klass != arg->klass)
1891 case MONO_TYPE_TYPEDBYREF:
1892 if (arg->type != STACK_VTYPE)
1894 klass = mono_class_from_mono_type (simple_type);
1895 if (klass != arg->klass)
1898 case MONO_TYPE_GENERICINST:
1899 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1900 if (arg->type != STACK_VTYPE)
1902 klass = mono_class_from_mono_type (simple_type);
1903 if (klass != arg->klass)
1907 if (arg->type != STACK_OBJ)
1909 /* FIXME: check type compatibility */
1913 case MONO_TYPE_MVAR:
1914 /* FIXME: all the arguments must be references for now,
1915 * later look inside cfg and see if the arg num is
1916 * really a reference
1918 g_assert (cfg->generic_sharing_context);
1919 if (arg->type != STACK_OBJ)
1923 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1929 * Prepare arguments for passing to a function call.
1930 * Return a non-zero value if the arguments can't be passed to the given
1932 * The type checks are not yet complete and some conversions may need
1933 * casts on 32 or 64 bit architectures.
1935 * FIXME: implement this using target_type_is_incompatible ()
1938 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1940 MonoType *simple_type;
1944 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1948 for (i = 0; i < sig->param_count; ++i) {
1949 if (sig->params [i]->byref) {
1950 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1954 simple_type = sig->params [i];
1955 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1957 switch (simple_type->type) {
1958 case MONO_TYPE_VOID:
1963 case MONO_TYPE_BOOLEAN:
1966 case MONO_TYPE_CHAR:
1969 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
1975 case MONO_TYPE_FNPTR:
1976 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
1979 case MONO_TYPE_CLASS:
1980 case MONO_TYPE_STRING:
1981 case MONO_TYPE_OBJECT:
1982 case MONO_TYPE_SZARRAY:
1983 case MONO_TYPE_ARRAY:
1984 if (args [i]->type != STACK_OBJ)
1989 if (args [i]->type != STACK_I8)
1994 if (args [i]->type != STACK_R8)
1997 case MONO_TYPE_VALUETYPE:
1998 if (simple_type->data.klass->enumtype) {
1999 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2002 if (args [i]->type != STACK_VTYPE)
2005 case MONO_TYPE_TYPEDBYREF:
2006 if (args [i]->type != STACK_VTYPE)
2009 case MONO_TYPE_GENERICINST:
2010 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2014 g_error ("unknown type 0x%02x in check_call_signature",
2022 callvirt_to_call (int opcode)
2027 case OP_VOIDCALLVIRT:
2036 g_assert_not_reached ();
2043 callvirt_to_call_membase (int opcode)
2047 return OP_CALL_MEMBASE;
2048 case OP_VOIDCALLVIRT:
2049 return OP_VOIDCALL_MEMBASE;
2051 return OP_FCALL_MEMBASE;
2053 return OP_LCALL_MEMBASE;
2055 return OP_VCALL_MEMBASE;
2057 g_assert_not_reached ();
2063 #ifdef MONO_ARCH_HAVE_IMT
2065 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2067 #ifdef MONO_ARCH_IMT_REG
2068 int method_reg = alloc_preg (cfg);
2071 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2072 } else if (cfg->compile_aot) {
2073 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2076 MONO_INST_NEW (cfg, ins, OP_PCONST);
2077 ins->inst_p0 = call->method;
2078 ins->dreg = method_reg;
2079 MONO_ADD_INS (cfg->cbb, ins);
2082 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2084 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2089 static MonoJumpInfo *
2090 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2092 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2096 ji->data.target = target;
2101 inline static MonoInst*
2102 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args);
2104 inline static MonoCallInst *
2105 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2106 MonoInst **args, int calli, int virtual, int tail)
2109 #ifdef MONO_ARCH_SOFT_FLOAT
2114 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2116 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2119 call->signature = sig;
2121 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2124 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2125 call->vret_var = cfg->vret_addr;
2126 //g_assert_not_reached ();
2128 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2129 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2132 temp->backend.is_pinvoke = sig->pinvoke;
2135 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2136 * address of return value to increase optimization opportunities.
2137 * Before vtype decomposition, the dreg of the call ins itself represents the
2138 * fact the call modifies the return value. After decomposition, the call will
2139 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2140 * will be transformed into an LDADDR.
2142 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2143 loada->dreg = alloc_preg (cfg);
2144 loada->inst_p0 = temp;
2145 /* We reference the call too since call->dreg could change during optimization */
2146 loada->inst_p1 = call;
2147 MONO_ADD_INS (cfg->cbb, loada);
2149 call->inst.dreg = temp->dreg;
2151 call->vret_var = loada;
2152 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2153 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2155 #ifdef MONO_ARCH_SOFT_FLOAT
2157 * If the call has a float argument, we would need to do an r8->r4 conversion using
2158 * an icall, but that cannot be done during the call sequence since it would clobber
2159 * the call registers + the stack. So we do it before emitting the call.
2161 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2163 MonoInst *in = call->args [i];
2165 if (i >= sig->hasthis)
2166 t = sig->params [i - sig->hasthis];
2168 t = &mono_defaults.int_class->byval_arg;
2169 t = mono_type_get_underlying_type (t);
2171 if (!t->byref && t->type == MONO_TYPE_R4) {
2172 MonoInst *iargs [1];
2176 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2178 /* The result will be in an int vreg */
2179 call->args [i] = conv;
2185 if (COMPILE_LLVM (cfg))
2186 mono_llvm_emit_call (cfg, call);
2188 mono_arch_emit_call (cfg, call);
2190 mono_arch_emit_call (cfg, call);
2193 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2194 cfg->flags |= MONO_CFG_HAS_CALLS;
2199 inline static MonoInst*
2200 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2202 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE);
2204 call->inst.sreg1 = addr->dreg;
2206 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2208 return (MonoInst*)call;
2211 inline static MonoInst*
2212 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2214 #ifdef MONO_ARCH_RGCTX_REG
2219 rgctx_reg = mono_alloc_preg (cfg);
2220 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2222 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2224 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2225 cfg->uses_rgctx_reg = TRUE;
2226 call->rgctx_reg = TRUE;
2228 return (MonoInst*)call;
2230 g_assert_not_reached ();
2236 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2237 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2239 gboolean virtual = this != NULL;
2240 gboolean enable_for_aot = TRUE;
2243 if (method->string_ctor) {
2244 /* Create the real signature */
2245 /* FIXME: Cache these */
2246 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2247 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2252 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE);
2254 if (this && sig->hasthis &&
2255 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2256 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this)) {
2257 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2259 call->method = method;
2261 call->inst.flags |= MONO_INST_HAS_METHOD;
2262 call->inst.inst_left = this;
2265 int vtable_reg, slot_reg, this_reg;
2267 this_reg = this->dreg;
2269 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2270 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2271 /* Make a call to delegate->invoke_impl */
2272 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2273 call->inst.inst_basereg = this_reg;
2274 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2275 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2277 return (MonoInst*)call;
2281 if ((!cfg->compile_aot || enable_for_aot) &&
2282 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2283 (MONO_METHOD_IS_FINAL (method) &&
2284 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK))) {
2286 * the method is not virtual, we just need to ensure this is not null
2287 * and then we can call the method directly.
2289 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2290 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2293 if (!method->string_ctor) {
2294 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2295 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2296 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2299 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2301 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2303 return (MonoInst*)call;
2306 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2308 * the method is virtual, but we can statically dispatch since either
2309 * it's class or the method itself are sealed.
2310 * But first we need to ensure it's not a null reference.
2312 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2313 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2314 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2316 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2317 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2319 return (MonoInst*)call;
2322 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2324 vtable_reg = alloc_preg (cfg);
2325 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2326 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2328 #ifdef MONO_ARCH_HAVE_IMT
2330 guint32 imt_slot = mono_method_get_imt_slot (method);
2331 emit_imt_argument (cfg, call, imt_arg);
2332 slot_reg = vtable_reg;
2333 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2336 if (slot_reg == -1) {
2337 slot_reg = alloc_preg (cfg);
2338 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2339 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2342 slot_reg = vtable_reg;
2343 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2344 (mono_method_get_vtable_index (method) * SIZEOF_VOID_P);
2345 #ifdef MONO_ARCH_HAVE_IMT
2347 g_assert (mono_method_signature (method)->generic_param_count);
2348 emit_imt_argument (cfg, call, imt_arg);
2353 call->inst.sreg1 = slot_reg;
2354 call->virtual = TRUE;
2357 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2359 return (MonoInst*)call;
2363 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2364 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2371 #ifdef MONO_ARCH_RGCTX_REG
2372 rgctx_reg = mono_alloc_preg (cfg);
2373 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2378 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2380 call = (MonoCallInst*)ins;
2382 #ifdef MONO_ARCH_RGCTX_REG
2383 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2384 cfg->uses_rgctx_reg = TRUE;
2385 call->rgctx_reg = TRUE;
2394 static inline MonoInst*
2395 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2397 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2401 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2408 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE);
2411 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2413 return (MonoInst*)call;
2416 inline static MonoInst*
2417 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2419 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2423 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2427 * mono_emit_abs_call:
2429 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2431 inline static MonoInst*
2432 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2433 MonoMethodSignature *sig, MonoInst **args)
2435 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2439 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2442 if (cfg->abs_patches == NULL)
2443 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2444 g_hash_table_insert (cfg->abs_patches, ji, ji);
2445 ins = mono_emit_native_call (cfg, ji, sig, args);
2446 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2451 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2453 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2454 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2458 * Native code might return non register sized integers
2459 * without initializing the upper bits.
2461 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2462 case OP_LOADI1_MEMBASE:
2463 widen_op = OP_ICONV_TO_I1;
2465 case OP_LOADU1_MEMBASE:
2466 widen_op = OP_ICONV_TO_U1;
2468 case OP_LOADI2_MEMBASE:
2469 widen_op = OP_ICONV_TO_I2;
2471 case OP_LOADU2_MEMBASE:
2472 widen_op = OP_ICONV_TO_U2;
2478 if (widen_op != -1) {
2479 int dreg = alloc_preg (cfg);
2482 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2483 widen->type = ins->type;
2493 get_memcpy_method (void)
2495 static MonoMethod *memcpy_method = NULL;
2496 if (!memcpy_method) {
2497 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2499 g_error ("Old corlib found. Install a new one");
2501 return memcpy_method;
2505 * Emit code to copy a valuetype of type @klass whose address is stored in
2506 * @src->dreg to memory whose address is stored at @dest->dreg.
2509 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2511 MonoInst *iargs [3];
2514 MonoMethod *memcpy_method;
2518 * This check breaks with spilled vars... need to handle it during verification anyway.
2519 * g_assert (klass && klass == src->klass && klass == dest->klass);
2523 n = mono_class_native_size (klass, &align);
2525 n = mono_class_value_size (klass, &align);
2527 #if HAVE_WRITE_BARRIERS
2528 /* if native is true there should be no references in the struct */
2529 if (klass->has_references && !native) {
2530 /* Avoid barriers when storing to the stack */
2531 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2532 (dest->opcode == OP_LDADDR))) {
2535 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2537 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2542 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2543 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2544 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2548 EMIT_NEW_ICONST (cfg, iargs [2], n);
2550 memcpy_method = get_memcpy_method ();
2551 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2556 get_memset_method (void)
2558 static MonoMethod *memset_method = NULL;
2559 if (!memset_method) {
2560 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2562 g_error ("Old corlib found. Install a new one");
2564 return memset_method;
2568 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2570 MonoInst *iargs [3];
2573 MonoMethod *memset_method;
2575 /* FIXME: Optimize this for the case when dest is an LDADDR */
2577 mono_class_init (klass);
2578 n = mono_class_value_size (klass, &align);
2580 if (n <= sizeof (gpointer) * 5) {
2581 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2584 memset_method = get_memset_method ();
2586 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2587 EMIT_NEW_ICONST (cfg, iargs [2], n);
2588 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2593 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2595 MonoInst *this = NULL;
2597 g_assert (cfg->generic_sharing_context);
2599 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2600 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2601 !method->klass->valuetype)
2602 EMIT_NEW_ARGLOAD (cfg, this, 0);
2604 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2605 MonoInst *mrgctx_loc, *mrgctx_var;
2608 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2610 mrgctx_loc = mono_get_vtable_var (cfg);
2611 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2614 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2615 MonoInst *vtable_loc, *vtable_var;
2619 vtable_loc = mono_get_vtable_var (cfg);
2620 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2622 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2623 MonoInst *mrgctx_var = vtable_var;
2626 vtable_reg = alloc_preg (cfg);
2627 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2628 vtable_var->type = STACK_PTR;
2634 int vtable_reg, res_reg;
2636 vtable_reg = alloc_preg (cfg);
2637 res_reg = alloc_preg (cfg);
2638 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2643 static MonoJumpInfoRgctxEntry *
2644 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2646 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2647 res->method = method;
2648 res->in_mrgctx = in_mrgctx;
2649 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2650 res->data->type = patch_type;
2651 res->data->data.target = patch_data;
2652 res->info_type = info_type;
2657 static inline MonoInst*
2658 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2660 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2664 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2665 MonoClass *klass, int rgctx_type)
2667 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2668 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2670 return emit_rgctx_fetch (cfg, rgctx, entry);
2674 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2675 MonoMethod *cmethod, int rgctx_type)
2677 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2678 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2680 return emit_rgctx_fetch (cfg, rgctx, entry);
2684 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2685 MonoClassField *field, int rgctx_type)
2687 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2688 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2690 return emit_rgctx_fetch (cfg, rgctx, entry);
2694 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
2696 int vtable_reg = alloc_preg (cfg);
2697 int context_used = 0;
2699 if (cfg->generic_sharing_context)
2700 context_used = mono_class_check_context_used (array_class);
2702 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2704 if (cfg->opt & MONO_OPT_SHARED) {
2705 int class_reg = alloc_preg (cfg);
2706 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2707 if (cfg->compile_aot) {
2708 int klass_reg = alloc_preg (cfg);
2709 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
2710 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
2712 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
2714 } else if (context_used) {
2715 MonoInst *vtable_ins;
2717 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
2718 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
2720 if (cfg->compile_aot) {
2721 int vt_reg = alloc_preg (cfg);
2722 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, mono_class_vtable (cfg->domain, array_class));
2723 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
2725 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, mono_class_vtable (cfg->domain, array_class));
2729 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
2733 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
2735 if (mini_get_debug_options ()->better_cast_details) {
2736 int to_klass_reg = alloc_preg (cfg);
2737 int vtable_reg = alloc_preg (cfg);
2738 int klass_reg = alloc_preg (cfg);
2739 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2742 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2746 MONO_ADD_INS (cfg->cbb, tls_get);
2747 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2748 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2750 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
2751 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
2752 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
2757 reset_cast_details (MonoCompile *cfg)
2759 /* Reset the variables holding the cast details */
2760 if (mini_get_debug_options ()->better_cast_details) {
2761 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2763 MONO_ADD_INS (cfg->cbb, tls_get);
2764 /* It is enough to reset the from field */
2765 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
2770 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
2771 * generic code is generated.
2774 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
2776 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
2779 MonoInst *rgctx, *addr;
2781 /* FIXME: What if the class is shared? We might not
2782 have to get the address of the method from the
2784 addr = emit_get_rgctx_method (cfg, context_used, method,
2785 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2787 rgctx = emit_get_rgctx (cfg, method, context_used);
2789 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2791 return mono_emit_method_call (cfg, method, &val, NULL);
2796 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
2800 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
2801 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
2802 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
2803 int rank_reg = alloc_dreg (cfg ,STACK_I4);
2805 obj_reg = sp [0]->dreg;
2806 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2807 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2809 /* FIXME: generics */
2810 g_assert (klass->rank == 0);
2813 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
2814 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2816 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2817 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
2820 MonoInst *element_class;
2822 /* This assertion is from the unboxcast insn */
2823 g_assert (klass->rank == 0);
2825 element_class = emit_get_rgctx_klass (cfg, context_used,
2826 klass->element_class, MONO_RGCTX_INFO_KLASS);
2828 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
2829 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2831 save_cast_details (cfg, klass->element_class, obj_reg);
2832 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
2833 reset_cast_details (cfg);
2836 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
2837 MONO_ADD_INS (cfg->cbb, add);
2838 add->type = STACK_MP;
2845 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box)
2847 MonoInst *iargs [2];
2850 if (cfg->opt & MONO_OPT_SHARED) {
2851 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2852 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
2854 alloc_ftn = mono_object_new;
2855 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
2856 /* This happens often in argument checking code, eg. throw new FooException... */
2857 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
2858 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
2859 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
2861 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2862 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2865 if (managed_alloc) {
2866 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2867 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2869 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
2871 guint32 lw = vtable->klass->instance_size;
2872 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
2873 EMIT_NEW_ICONST (cfg, iargs [0], lw);
2874 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
2877 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2881 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2885 handle_alloc_from_inst (MonoCompile *cfg, MonoClass *klass, MonoInst *data_inst,
2888 MonoInst *iargs [2];
2889 MonoMethod *managed_alloc = NULL;
2893 FIXME: we cannot get managed_alloc here because we can't get
2894 the class's vtable (because it's not a closed class)
2896 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2897 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2900 if (cfg->opt & MONO_OPT_SHARED) {
2901 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2902 iargs [1] = data_inst;
2903 alloc_ftn = mono_object_new;
2905 if (managed_alloc) {
2906 iargs [0] = data_inst;
2907 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2910 iargs [0] = data_inst;
2911 alloc_ftn = mono_object_new_specific;
2914 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2918 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass)
2920 MonoInst *alloc, *ins;
2922 if (mono_class_is_nullable (klass)) {
2923 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
2924 return mono_emit_method_call (cfg, method, &val, NULL);
2927 alloc = handle_alloc (cfg, klass, TRUE);
2929 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
2935 handle_box_from_inst (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoInst *data_inst)
2937 MonoInst *alloc, *ins;
2939 if (mono_class_is_nullable (klass)) {
2940 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
2941 /* FIXME: What if the class is shared? We might not
2942 have to get the method address from the RGCTX. */
2943 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
2944 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2945 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2947 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2949 alloc = handle_alloc_from_inst (cfg, klass, data_inst, TRUE);
2951 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
2958 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
2960 MonoBasicBlock *is_null_bb;
2961 int obj_reg = src->dreg;
2962 int vtable_reg = alloc_preg (cfg);
2964 NEW_BBLOCK (cfg, is_null_bb);
2966 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
2967 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
2969 save_cast_details (cfg, klass, obj_reg);
2971 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2972 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2973 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
2975 int klass_reg = alloc_preg (cfg);
2977 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2979 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
2980 /* the remoting code is broken, access the class for now */
2982 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
2983 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
2985 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2986 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
2988 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2990 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2991 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, is_null_bb);
2995 MONO_START_BB (cfg, is_null_bb);
2997 reset_cast_details (cfg);
3003 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3006 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3007 int obj_reg = src->dreg;
3008 int vtable_reg = alloc_preg (cfg);
3009 int res_reg = alloc_preg (cfg);
3011 NEW_BBLOCK (cfg, is_null_bb);
3012 NEW_BBLOCK (cfg, false_bb);
3013 NEW_BBLOCK (cfg, end_bb);
3015 /* Do the assignment at the beginning, so the other assignment can be if converted */
3016 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3017 ins->type = STACK_OBJ;
3020 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3021 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3023 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3024 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3025 /* the is_null_bb target simply copies the input register to the output */
3026 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3028 int klass_reg = alloc_preg (cfg);
3030 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3033 int rank_reg = alloc_preg (cfg);
3034 int eclass_reg = alloc_preg (cfg);
3036 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3037 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3038 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3039 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3040 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3041 if (klass->cast_class == mono_defaults.object_class) {
3042 int parent_reg = alloc_preg (cfg);
3043 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3044 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3045 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3046 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3047 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3048 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3049 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3050 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3051 } else if (klass->cast_class == mono_defaults.enum_class) {
3052 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3053 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3054 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3055 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3057 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3058 /* Check that the object is a vector too */
3059 int bounds_reg = alloc_preg (cfg);
3060 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3061 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3062 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3065 /* the is_null_bb target simply copies the input register to the output */
3066 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3068 } else if (mono_class_is_nullable (klass)) {
3069 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3070 /* the is_null_bb target simply copies the input register to the output */
3071 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3073 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3074 /* the remoting code is broken, access the class for now */
3076 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3077 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3079 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3080 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3082 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3083 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3085 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3086 /* the is_null_bb target simply copies the input register to the output */
3087 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, is_null_bb);
3092 MONO_START_BB (cfg, false_bb);
3094 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3095 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3097 MONO_START_BB (cfg, is_null_bb);
3099 MONO_START_BB (cfg, end_bb);
3105 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3107 /* This opcode takes as input an object reference and a class, and returns:
3108 0) if the object is an instance of the class,
3109 1) if the object is not instance of the class,
3110 2) if the object is a proxy whose type cannot be determined */
3113 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3114 int obj_reg = src->dreg;
3115 int dreg = alloc_ireg (cfg);
3117 int klass_reg = alloc_preg (cfg);
3119 NEW_BBLOCK (cfg, true_bb);
3120 NEW_BBLOCK (cfg, false_bb);
3121 NEW_BBLOCK (cfg, false2_bb);
3122 NEW_BBLOCK (cfg, end_bb);
3123 NEW_BBLOCK (cfg, no_proxy_bb);
3125 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3126 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3128 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3129 NEW_BBLOCK (cfg, interface_fail_bb);
3131 tmp_reg = alloc_preg (cfg);
3132 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3133 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3134 MONO_START_BB (cfg, interface_fail_bb);
3135 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3137 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3139 tmp_reg = alloc_preg (cfg);
3140 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3141 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3142 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3144 tmp_reg = alloc_preg (cfg);
3145 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3146 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3148 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3149 tmp_reg = alloc_preg (cfg);
3150 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3151 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3153 tmp_reg = alloc_preg (cfg);
3154 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3155 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3156 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3158 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3159 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3161 MONO_START_BB (cfg, no_proxy_bb);
3163 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3166 MONO_START_BB (cfg, false_bb);
3168 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3169 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3171 MONO_START_BB (cfg, false2_bb);
3173 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3174 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3176 MONO_START_BB (cfg, true_bb);
3178 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3180 MONO_START_BB (cfg, end_bb);
3183 MONO_INST_NEW (cfg, ins, OP_ICONST);
3185 ins->type = STACK_I4;
3191 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3193 /* This opcode takes as input an object reference and a class, and returns:
3194 0) if the object is an instance of the class,
3195 1) if the object is a proxy whose type cannot be determined
3196 an InvalidCastException exception is thrown otherwhise*/
3199 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3200 int obj_reg = src->dreg;
3201 int dreg = alloc_ireg (cfg);
3202 int tmp_reg = alloc_preg (cfg);
3203 int klass_reg = alloc_preg (cfg);
3205 NEW_BBLOCK (cfg, end_bb);
3206 NEW_BBLOCK (cfg, ok_result_bb);
3208 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3209 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3211 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3212 NEW_BBLOCK (cfg, interface_fail_bb);
3214 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3215 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3216 MONO_START_BB (cfg, interface_fail_bb);
3217 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3219 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3221 tmp_reg = alloc_preg (cfg);
3222 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3223 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3224 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3226 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3227 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3230 NEW_BBLOCK (cfg, no_proxy_bb);
3232 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3233 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3234 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3236 tmp_reg = alloc_preg (cfg);
3237 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3238 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3240 tmp_reg = alloc_preg (cfg);
3241 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3242 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3243 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3245 NEW_BBLOCK (cfg, fail_1_bb);
3247 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3249 MONO_START_BB (cfg, fail_1_bb);
3251 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3252 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3254 MONO_START_BB (cfg, no_proxy_bb);
3256 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3259 MONO_START_BB (cfg, ok_result_bb);
3261 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3263 MONO_START_BB (cfg, end_bb);
3266 MONO_INST_NEW (cfg, ins, OP_ICONST);
3268 ins->type = STACK_I4;
3273 static G_GNUC_UNUSED MonoInst*
3274 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method)
3276 gpointer *trampoline;
3277 MonoInst *obj, *method_ins, *tramp_ins;
3281 obj = handle_alloc (cfg, klass, FALSE);
3283 /* Inline the contents of mono_delegate_ctor */
3285 /* Set target field */
3286 /* Optimize away setting of NULL target */
3287 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3288 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3290 /* Set method field */
3291 EMIT_NEW_METHODCONST (cfg, method_ins, method);
3292 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3295 * To avoid looking up the compiled code belonging to the target method
3296 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3297 * store it, and we fill it after the method has been compiled.
3299 if (!cfg->compile_aot && !method->dynamic) {
3300 MonoInst *code_slot_ins;
3302 domain = mono_domain_get ();
3303 mono_domain_lock (domain);
3304 if (!domain_jit_info (domain)->method_code_hash)
3305 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3306 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3308 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3309 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3311 mono_domain_unlock (domain);
3313 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3314 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3317 /* Set invoke_impl field */
3318 if (cfg->compile_aot) {
3319 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3321 trampoline = mono_create_delegate_trampoline (klass);
3322 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3324 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3326 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3332 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3334 MonoJitICallInfo *info;
3336 /* Need to register the icall so it gets an icall wrapper */
3337 info = mono_get_array_new_va_icall (rank);
3339 cfg->flags |= MONO_CFG_HAS_VARARGS;
3341 /* mono_array_new_va () needs a vararg calling convention */
3342 cfg->disable_llvm = TRUE;
3344 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3345 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3349 mono_emit_load_got_addr (MonoCompile *cfg)
3351 MonoInst *getaddr, *dummy_use;
3353 if (!cfg->got_var || cfg->got_var_allocated)
3356 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3357 getaddr->dreg = cfg->got_var->dreg;
3359 /* Add it to the start of the first bblock */
3360 if (cfg->bb_entry->code) {
3361 getaddr->next = cfg->bb_entry->code;
3362 cfg->bb_entry->code = getaddr;
3365 MONO_ADD_INS (cfg->bb_entry, getaddr);
3367 cfg->got_var_allocated = TRUE;
3370 * Add a dummy use to keep the got_var alive, since real uses might
3371 * only be generated by the back ends.
3372 * Add it to end_bblock, so the variable's lifetime covers the whole
3374 * It would be better to make the usage of the got var explicit in all
3375 * cases when the backend needs it (i.e. calls, throw etc.), so this
3376 * wouldn't be needed.
3378 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3379 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3382 static int inline_limit;
3383 static gboolean inline_limit_inited;
3386 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3388 MonoMethodHeader *header;
3390 #ifdef MONO_ARCH_SOFT_FLOAT
3391 MonoMethodSignature *sig = mono_method_signature (method);
3395 if (cfg->generic_sharing_context)
3398 #ifdef MONO_ARCH_HAVE_LMF_OPS
3399 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3400 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3401 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3405 if (method->is_inflated)
3406 /* Avoid inflating the header */
3407 header = mono_method_get_header (((MonoMethodInflated*)method)->declaring);
3409 header = mono_method_get_header (method);
3411 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME) ||
3412 (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3413 (method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3414 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3415 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) ||
3416 (method->klass->marshalbyref) ||
3417 !header || header->num_clauses)
3420 /* also consider num_locals? */
3421 /* Do the size check early to avoid creating vtables */
3422 if (!inline_limit_inited) {
3423 if (getenv ("MONO_INLINELIMIT"))
3424 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3426 inline_limit = INLINE_LENGTH_LIMIT;
3427 inline_limit_inited = TRUE;
3429 if (header->code_size >= inline_limit)
3433 * if we can initialize the class of the method right away, we do,
3434 * otherwise we don't allow inlining if the class needs initialization,
3435 * since it would mean inserting a call to mono_runtime_class_init()
3436 * inside the inlined code
3438 if (!(cfg->opt & MONO_OPT_SHARED)) {
3439 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3440 if (cfg->run_cctors && method->klass->has_cctor) {
3441 if (!method->klass->runtime_info)
3442 /* No vtable created yet */
3444 vtable = mono_class_vtable (cfg->domain, method->klass);
3447 /* This makes so that inline cannot trigger */
3448 /* .cctors: too many apps depend on them */
3449 /* running with a specific order... */
3450 if (! vtable->initialized)
3452 mono_runtime_class_init (vtable);
3454 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3455 if (!method->klass->runtime_info)
3456 /* No vtable created yet */
3458 vtable = mono_class_vtable (cfg->domain, method->klass);
3461 if (!vtable->initialized)
3466 * If we're compiling for shared code
3467 * the cctor will need to be run at aot method load time, for example,
3468 * or at the end of the compilation of the inlining method.
3470 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3475 * CAS - do not inline methods with declarative security
3476 * Note: this has to be before any possible return TRUE;
3478 if (mono_method_has_declsec (method))
3481 #ifdef MONO_ARCH_SOFT_FLOAT
3483 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3485 for (i = 0; i < sig->param_count; ++i)
3486 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3494 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3496 if (vtable->initialized && !cfg->compile_aot)
3499 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3502 if (!mono_class_needs_cctor_run (vtable->klass, method))
3505 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3506 /* The initialization is already done before the method is called */
3513 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index)
3517 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3519 mono_class_init (klass);
3520 size = mono_class_array_element_size (klass);
3522 mult_reg = alloc_preg (cfg);
3523 array_reg = arr->dreg;
3524 index_reg = index->dreg;
3526 #if SIZEOF_REGISTER == 8
3527 /* The array reg is 64 bits but the index reg is only 32 */
3528 index2_reg = alloc_preg (cfg);
3529 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3531 if (index->type == STACK_I8) {
3532 index2_reg = alloc_preg (cfg);
3533 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
3535 index2_reg = index_reg;
3539 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3541 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3542 if (size == 1 || size == 2 || size == 4 || size == 8) {
3543 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3545 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3546 ins->type = STACK_PTR;
3552 add_reg = alloc_preg (cfg);
3554 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3555 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3556 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3557 ins->type = STACK_PTR;
3558 MONO_ADD_INS (cfg->cbb, ins);
3563 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3565 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3567 int bounds_reg = alloc_preg (cfg);
3568 int add_reg = alloc_preg (cfg);
3569 int mult_reg = alloc_preg (cfg);
3570 int mult2_reg = alloc_preg (cfg);
3571 int low1_reg = alloc_preg (cfg);
3572 int low2_reg = alloc_preg (cfg);
3573 int high1_reg = alloc_preg (cfg);
3574 int high2_reg = alloc_preg (cfg);
3575 int realidx1_reg = alloc_preg (cfg);
3576 int realidx2_reg = alloc_preg (cfg);
3577 int sum_reg = alloc_preg (cfg);
3582 mono_class_init (klass);
3583 size = mono_class_array_element_size (klass);
3585 index1 = index_ins1->dreg;
3586 index2 = index_ins2->dreg;
3588 /* range checking */
3589 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3590 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3592 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3593 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3594 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3595 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3596 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3597 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3598 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3600 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3601 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3602 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3603 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3604 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3605 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3606 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3608 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3609 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3610 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3611 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3612 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3614 ins->type = STACK_MP;
3616 MONO_ADD_INS (cfg->cbb, ins);
3623 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3627 MonoMethod *addr_method;
3630 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3633 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1]);
3635 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3636 /* emit_ldelema_2 depends on OP_LMUL */
3637 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3638 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3642 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3643 addr_method = mono_marshal_get_array_address (rank, element_size);
3644 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
3650 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
3652 MonoInst *ins = NULL;
3654 static MonoClass *runtime_helpers_class = NULL;
3655 if (! runtime_helpers_class)
3656 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
3657 "System.Runtime.CompilerServices", "RuntimeHelpers");
3659 if (cmethod->klass == mono_defaults.string_class) {
3660 if (strcmp (cmethod->name, "get_Chars") == 0) {
3661 int dreg = alloc_ireg (cfg);
3662 int index_reg = alloc_preg (cfg);
3663 int mult_reg = alloc_preg (cfg);
3664 int add_reg = alloc_preg (cfg);
3666 #if SIZEOF_REGISTER == 8
3667 /* The array reg is 64 bits but the index reg is only 32 */
3668 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
3670 index_reg = args [1]->dreg;
3672 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
3674 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3675 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
3676 add_reg = ins->dreg;
3677 /* Avoid a warning */
3679 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3682 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
3683 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3684 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3685 add_reg, G_STRUCT_OFFSET (MonoString, chars));
3687 type_from_op (ins, NULL, NULL);
3689 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3690 int dreg = alloc_ireg (cfg);
3691 /* Decompose later to allow more optimizations */
3692 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
3693 ins->type = STACK_I4;
3694 cfg->cbb->has_array_access = TRUE;
3695 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
3698 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
3699 int mult_reg = alloc_preg (cfg);
3700 int add_reg = alloc_preg (cfg);
3702 /* The corlib functions check for oob already. */
3703 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
3704 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3705 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
3708 } else if (cmethod->klass == mono_defaults.object_class) {
3710 if (strcmp (cmethod->name, "GetType") == 0) {
3711 int dreg = alloc_preg (cfg);
3712 int vt_reg = alloc_preg (cfg);
3713 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3714 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
3715 type_from_op (ins, NULL, NULL);
3718 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
3719 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0) {
3720 int dreg = alloc_ireg (cfg);
3721 int t1 = alloc_ireg (cfg);
3723 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
3724 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
3725 ins->type = STACK_I4;
3729 } else if (strcmp (cmethod->name, ".ctor") == 0) {
3730 MONO_INST_NEW (cfg, ins, OP_NOP);
3731 MONO_ADD_INS (cfg->cbb, ins);
3735 } else if (cmethod->klass == mono_defaults.array_class) {
3736 if (cmethod->name [0] != 'g')
3739 if (strcmp (cmethod->name, "get_Rank") == 0) {
3740 int dreg = alloc_ireg (cfg);
3741 int vtable_reg = alloc_preg (cfg);
3742 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, vtable_reg,
3743 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3744 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
3745 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3746 type_from_op (ins, NULL, NULL);
3749 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3750 int dreg = alloc_ireg (cfg);
3752 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
3753 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
3754 type_from_op (ins, NULL, NULL);
3759 } else if (cmethod->klass == runtime_helpers_class) {
3761 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
3762 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
3766 } else if (cmethod->klass == mono_defaults.thread_class) {
3767 if (strcmp (cmethod->name, "get_CurrentThread") == 0 && (ins = mono_arch_get_thread_intrinsic (cfg))) {
3768 ins->dreg = alloc_preg (cfg);
3769 ins->type = STACK_OBJ;
3770 MONO_ADD_INS (cfg->cbb, ins);
3772 } else if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
3773 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
3774 MONO_ADD_INS (cfg->cbb, ins);
3776 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
3777 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
3778 MONO_ADD_INS (cfg->cbb, ins);
3781 } else if (cmethod->klass == mono_defaults.monitor_class) {
3782 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
3783 if (strcmp (cmethod->name, "Enter") == 0) {
3786 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
3787 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
3788 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
3789 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
3791 return (MonoInst*)call;
3792 } else if (strcmp (cmethod->name, "Exit") == 0) {
3795 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
3796 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
3797 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
3798 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
3800 return (MonoInst*)call;
3802 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
3803 MonoMethod *fast_method = NULL;
3805 /* Avoid infinite recursion */
3806 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
3807 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
3808 strcmp (cfg->method->name, "FastMonitorExit") == 0))
3811 if (strcmp (cmethod->name, "Enter") == 0 ||
3812 strcmp (cmethod->name, "Exit") == 0)
3813 fast_method = mono_monitor_get_fast_path (cmethod);
3817 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
3819 } else if (mini_class_is_system_array (cmethod->klass) &&
3820 strcmp (cmethod->name, "GetGenericValueImpl") == 0) {
3821 MonoInst *addr, *store, *load;
3822 MonoClass *eklass = mono_class_from_mono_type (fsig->params [1]);
3824 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1]);
3825 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
3826 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
3828 } else if (cmethod->klass->image == mono_defaults.corlib &&
3829 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
3830 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
3833 #if SIZEOF_REGISTER == 8
3834 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
3835 /* 64 bit reads are already atomic */
3836 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
3837 ins->dreg = mono_alloc_preg (cfg);
3838 ins->inst_basereg = args [0]->dreg;
3839 ins->inst_offset = 0;
3840 MONO_ADD_INS (cfg->cbb, ins);
3844 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
3845 if (strcmp (cmethod->name, "Increment") == 0) {
3846 MonoInst *ins_iconst;
3849 if (fsig->params [0]->type == MONO_TYPE_I4)
3850 opcode = OP_ATOMIC_ADD_NEW_I4;
3851 #if SIZEOF_REGISTER == 8
3852 else if (fsig->params [0]->type == MONO_TYPE_I8)
3853 opcode = OP_ATOMIC_ADD_NEW_I8;
3856 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3857 ins_iconst->inst_c0 = 1;
3858 ins_iconst->dreg = mono_alloc_ireg (cfg);
3859 MONO_ADD_INS (cfg->cbb, ins_iconst);
3861 MONO_INST_NEW (cfg, ins, opcode);
3862 ins->dreg = mono_alloc_ireg (cfg);
3863 ins->inst_basereg = args [0]->dreg;
3864 ins->inst_offset = 0;
3865 ins->sreg2 = ins_iconst->dreg;
3866 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3867 MONO_ADD_INS (cfg->cbb, ins);
3869 } else if (strcmp (cmethod->name, "Decrement") == 0) {
3870 MonoInst *ins_iconst;
3873 if (fsig->params [0]->type == MONO_TYPE_I4)
3874 opcode = OP_ATOMIC_ADD_NEW_I4;
3875 #if SIZEOF_REGISTER == 8
3876 else if (fsig->params [0]->type == MONO_TYPE_I8)
3877 opcode = OP_ATOMIC_ADD_NEW_I8;
3880 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3881 ins_iconst->inst_c0 = -1;
3882 ins_iconst->dreg = mono_alloc_ireg (cfg);
3883 MONO_ADD_INS (cfg->cbb, ins_iconst);
3885 MONO_INST_NEW (cfg, ins, opcode);
3886 ins->dreg = mono_alloc_ireg (cfg);
3887 ins->inst_basereg = args [0]->dreg;
3888 ins->inst_offset = 0;
3889 ins->sreg2 = ins_iconst->dreg;
3890 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3891 MONO_ADD_INS (cfg->cbb, ins);
3893 } else if (strcmp (cmethod->name, "Add") == 0) {
3896 if (fsig->params [0]->type == MONO_TYPE_I4)
3897 opcode = OP_ATOMIC_ADD_NEW_I4;
3898 #if SIZEOF_REGISTER == 8
3899 else if (fsig->params [0]->type == MONO_TYPE_I8)
3900 opcode = OP_ATOMIC_ADD_NEW_I8;
3904 MONO_INST_NEW (cfg, ins, opcode);
3905 ins->dreg = mono_alloc_ireg (cfg);
3906 ins->inst_basereg = args [0]->dreg;
3907 ins->inst_offset = 0;
3908 ins->sreg2 = args [1]->dreg;
3909 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3910 MONO_ADD_INS (cfg->cbb, ins);
3913 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
3915 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
3916 if (strcmp (cmethod->name, "Exchange") == 0) {
3919 if (fsig->params [0]->type == MONO_TYPE_I4)
3920 opcode = OP_ATOMIC_EXCHANGE_I4;
3921 #if SIZEOF_REGISTER == 8
3922 else if ((fsig->params [0]->type == MONO_TYPE_I8) ||
3923 (fsig->params [0]->type == MONO_TYPE_I) ||
3924 (fsig->params [0]->type == MONO_TYPE_OBJECT))
3925 opcode = OP_ATOMIC_EXCHANGE_I8;
3927 else if ((fsig->params [0]->type == MONO_TYPE_I) ||
3928 (fsig->params [0]->type == MONO_TYPE_OBJECT))
3929 opcode = OP_ATOMIC_EXCHANGE_I4;
3934 MONO_INST_NEW (cfg, ins, opcode);
3935 ins->dreg = mono_alloc_ireg (cfg);
3936 ins->inst_basereg = args [0]->dreg;
3937 ins->inst_offset = 0;
3938 ins->sreg2 = args [1]->dreg;
3939 MONO_ADD_INS (cfg->cbb, ins);
3941 switch (fsig->params [0]->type) {
3943 ins->type = STACK_I4;
3947 ins->type = STACK_I8;
3949 case MONO_TYPE_OBJECT:
3950 ins->type = STACK_OBJ;
3953 g_assert_not_reached ();
3956 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
3958 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
3959 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
3961 if (fsig->params [1]->type == MONO_TYPE_I4)
3963 else if (fsig->params [1]->type == MONO_TYPE_I || MONO_TYPE_IS_REFERENCE (fsig->params [1]))
3964 size = sizeof (gpointer);
3965 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I4)
3968 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
3969 ins->dreg = alloc_ireg (cfg);
3970 ins->sreg1 = args [0]->dreg;
3971 ins->sreg2 = args [1]->dreg;
3972 ins->sreg3 = args [2]->dreg;
3973 ins->type = STACK_I4;
3974 MONO_ADD_INS (cfg->cbb, ins);
3975 } else if (size == 8) {
3976 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
3977 ins->dreg = alloc_ireg (cfg);
3978 ins->sreg1 = args [0]->dreg;
3979 ins->sreg2 = args [1]->dreg;
3980 ins->sreg3 = args [2]->dreg;
3981 ins->type = STACK_I8;
3982 MONO_ADD_INS (cfg->cbb, ins);
3984 /* g_assert_not_reached (); */
3987 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
3991 } else if (cmethod->klass->image == mono_defaults.corlib) {
3992 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
3993 && strcmp (cmethod->klass->name, "Debugger") == 0) {
3994 MONO_INST_NEW (cfg, ins, OP_BREAK);
3995 MONO_ADD_INS (cfg->cbb, ins);
3998 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
3999 && strcmp (cmethod->klass->name, "Environment") == 0) {
4000 #ifdef PLATFORM_WIN32
4001 EMIT_NEW_ICONST (cfg, ins, 1);
4003 EMIT_NEW_ICONST (cfg, ins, 0);
4007 } else if (cmethod->klass == mono_defaults.math_class) {
4009 * There is general branches code for Min/Max, but it does not work for
4011 * http://everything2.com/?node_id=1051618
4015 #ifdef MONO_ARCH_SIMD_INTRINSICS
4016 if (cfg->opt & MONO_OPT_SIMD) {
4017 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4023 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4027 * This entry point could be used later for arbitrary method
4030 inline static MonoInst*
4031 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4032 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4034 if (method->klass == mono_defaults.string_class) {
4035 /* managed string allocation support */
4036 if (strcmp (method->name, "InternalAllocateStr") == 0) {
4037 MonoInst *iargs [2];
4038 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4039 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4042 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4043 iargs [1] = args [0];
4044 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4051 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4053 MonoInst *store, *temp;
4056 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4057 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4060 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4061 * would be different than the MonoInst's used to represent arguments, and
4062 * the ldelema implementation can't deal with that.
4063 * Solution: When ldelema is used on an inline argument, create a var for
4064 * it, emit ldelema on that var, and emit the saving code below in
4065 * inline_method () if needed.
4067 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4068 cfg->args [i] = temp;
4069 /* This uses cfg->args [i] which is set by the preceeding line */
4070 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4071 store->cil_code = sp [0]->cil_code;
4076 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4077 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4079 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4081 check_inline_called_method_name_limit (MonoMethod *called_method)
4084 static char *limit = NULL;
4086 if (limit == NULL) {
4087 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4089 if (limit_string != NULL)
4090 limit = limit_string;
4092 limit = (char *) "";
4095 if (limit [0] != '\0') {
4096 char *called_method_name = mono_method_full_name (called_method, TRUE);
4098 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4099 g_free (called_method_name);
4101 //return (strncmp_result <= 0);
4102 return (strncmp_result == 0);
4109 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4111 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4114 static char *limit = NULL;
4116 if (limit == NULL) {
4117 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4118 if (limit_string != NULL) {
4119 limit = limit_string;
4121 limit = (char *) "";
4125 if (limit [0] != '\0') {
4126 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4128 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4129 g_free (caller_method_name);
4131 //return (strncmp_result <= 0);
4132 return (strncmp_result == 0);
4140 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4141 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4143 MonoInst *ins, *rvar = NULL;
4144 MonoMethodHeader *cheader;
4145 MonoBasicBlock *ebblock, *sbblock;
4147 MonoMethod *prev_inlined_method;
4148 MonoInst **prev_locals, **prev_args;
4149 MonoType **prev_arg_types;
4150 guint prev_real_offset;
4151 GHashTable *prev_cbb_hash;
4152 MonoBasicBlock **prev_cil_offset_to_bb;
4153 MonoBasicBlock *prev_cbb;
4154 unsigned char* prev_cil_start;
4155 guint32 prev_cil_offset_to_bb_len;
4156 MonoMethod *prev_current_method;
4157 MonoGenericContext *prev_generic_context;
4158 gboolean ret_var_set, prev_ret_var_set;
4160 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4162 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4163 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4166 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4167 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4171 if (cfg->verbose_level > 2)
4172 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4174 if (!cmethod->inline_info) {
4175 mono_jit_stats.inlineable_methods++;
4176 cmethod->inline_info = 1;
4178 /* allocate space to store the return value */
4179 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4180 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4183 /* allocate local variables */
4184 cheader = mono_method_get_header (cmethod);
4185 prev_locals = cfg->locals;
4186 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4187 for (i = 0; i < cheader->num_locals; ++i)
4188 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4190 /* allocate start and end blocks */
4191 /* This is needed so if the inline is aborted, we can clean up */
4192 NEW_BBLOCK (cfg, sbblock);
4193 sbblock->real_offset = real_offset;
4195 NEW_BBLOCK (cfg, ebblock);
4196 ebblock->block_num = cfg->num_bblocks++;
4197 ebblock->real_offset = real_offset;
4199 prev_args = cfg->args;
4200 prev_arg_types = cfg->arg_types;
4201 prev_inlined_method = cfg->inlined_method;
4202 cfg->inlined_method = cmethod;
4203 cfg->ret_var_set = FALSE;
4204 prev_real_offset = cfg->real_offset;
4205 prev_cbb_hash = cfg->cbb_hash;
4206 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4207 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4208 prev_cil_start = cfg->cil_start;
4209 prev_cbb = cfg->cbb;
4210 prev_current_method = cfg->current_method;
4211 prev_generic_context = cfg->generic_context;
4212 prev_ret_var_set = cfg->ret_var_set;
4214 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4216 ret_var_set = cfg->ret_var_set;
4218 cfg->inlined_method = prev_inlined_method;
4219 cfg->real_offset = prev_real_offset;
4220 cfg->cbb_hash = prev_cbb_hash;
4221 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4222 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4223 cfg->cil_start = prev_cil_start;
4224 cfg->locals = prev_locals;
4225 cfg->args = prev_args;
4226 cfg->arg_types = prev_arg_types;
4227 cfg->current_method = prev_current_method;
4228 cfg->generic_context = prev_generic_context;
4229 cfg->ret_var_set = prev_ret_var_set;
4231 if ((costs >= 0 && costs < 60) || inline_allways) {
4232 if (cfg->verbose_level > 2)
4233 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4235 mono_jit_stats.inlined_methods++;
4237 /* always add some code to avoid block split failures */
4238 MONO_INST_NEW (cfg, ins, OP_NOP);
4239 MONO_ADD_INS (prev_cbb, ins);
4241 prev_cbb->next_bb = sbblock;
4242 link_bblock (cfg, prev_cbb, sbblock);
4245 * Get rid of the begin and end bblocks if possible to aid local
4248 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4250 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4251 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4253 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4254 MonoBasicBlock *prev = ebblock->in_bb [0];
4255 mono_merge_basic_blocks (cfg, prev, ebblock);
4257 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4258 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4259 cfg->cbb = prev_cbb;
4267 * If the inlined method contains only a throw, then the ret var is not
4268 * set, so set it to a dummy value.
4271 static double r8_0 = 0.0;
4273 switch (rvar->type) {
4275 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4278 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4283 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4286 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4287 ins->type = STACK_R8;
4288 ins->inst_p0 = (void*)&r8_0;
4289 ins->dreg = rvar->dreg;
4290 MONO_ADD_INS (cfg->cbb, ins);
4293 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4296 g_assert_not_reached ();
4300 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4305 if (cfg->verbose_level > 2)
4306 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4307 cfg->exception_type = MONO_EXCEPTION_NONE;
4308 mono_loader_clear_error ();
4310 /* This gets rid of the newly added bblocks */
4311 cfg->cbb = prev_cbb;
4317 * Some of these comments may well be out-of-date.
4318 * Design decisions: we do a single pass over the IL code (and we do bblock
4319 * splitting/merging in the few cases when it's required: a back jump to an IL
4320 * address that was not already seen as bblock starting point).
4321 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4322 * Complex operations are decomposed in simpler ones right away. We need to let the
4323 * arch-specific code peek and poke inside this process somehow (except when the
4324 * optimizations can take advantage of the full semantic info of coarse opcodes).
4325 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4326 * MonoInst->opcode initially is the IL opcode or some simplification of that
4327 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4328 * opcode with value bigger than OP_LAST.
4329 * At this point the IR can be handed over to an interpreter, a dumb code generator
4330 * or to the optimizing code generator that will translate it to SSA form.
4332 * Profiling directed optimizations.
4333 * We may compile by default with few or no optimizations and instrument the code
4334 * or the user may indicate what methods to optimize the most either in a config file
4335 * or through repeated runs where the compiler applies offline the optimizations to
4336 * each method and then decides if it was worth it.
4339 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4340 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4341 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4342 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4343 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4344 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4345 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4346 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4348 /* offset from br.s -> br like opcodes */
4349 #define BIG_BRANCH_OFFSET 13
4352 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4354 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4356 return b == NULL || b == bb;
4360 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4362 unsigned char *ip = start;
4363 unsigned char *target;
4366 MonoBasicBlock *bblock;
4367 const MonoOpcode *opcode;
4370 cli_addr = ip - start;
4371 i = mono_opcode_value ((const guint8 **)&ip, end);
4374 opcode = &mono_opcodes [i];
4375 switch (opcode->argument) {
4376 case MonoInlineNone:
4379 case MonoInlineString:
4380 case MonoInlineType:
4381 case MonoInlineField:
4382 case MonoInlineMethod:
4385 case MonoShortInlineR:
4392 case MonoShortInlineVar:
4393 case MonoShortInlineI:
4396 case MonoShortInlineBrTarget:
4397 target = start + cli_addr + 2 + (signed char)ip [1];
4398 GET_BBLOCK (cfg, bblock, target);
4401 GET_BBLOCK (cfg, bblock, ip);
4403 case MonoInlineBrTarget:
4404 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4405 GET_BBLOCK (cfg, bblock, target);
4408 GET_BBLOCK (cfg, bblock, ip);
4410 case MonoInlineSwitch: {
4411 guint32 n = read32 (ip + 1);
4414 cli_addr += 5 + 4 * n;
4415 target = start + cli_addr;
4416 GET_BBLOCK (cfg, bblock, target);
4418 for (j = 0; j < n; ++j) {
4419 target = start + cli_addr + (gint32)read32 (ip);
4420 GET_BBLOCK (cfg, bblock, target);
4430 g_assert_not_reached ();
4433 if (i == CEE_THROW) {
4434 unsigned char *bb_start = ip - 1;
4436 /* Find the start of the bblock containing the throw */
4438 while ((bb_start >= start) && !bblock) {
4439 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4443 bblock->out_of_line = 1;
4452 static inline MonoMethod *
4453 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4457 if (m->wrapper_type != MONO_WRAPPER_NONE)
4458 return mono_method_get_wrapper_data (m, token);
4460 method = mono_get_method_full (m->klass->image, token, klass, context);
4465 static inline MonoMethod *
4466 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4468 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4470 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4476 static inline MonoClass*
4477 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4481 if (method->wrapper_type != MONO_WRAPPER_NONE)
4482 klass = mono_method_get_wrapper_data (method, token);
4484 klass = mono_class_get_full (method->klass->image, token, context);
4486 mono_class_init (klass);
4491 * Returns TRUE if the JIT should abort inlining because "callee"
4492 * is influenced by security attributes.
4495 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4499 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
4503 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
4504 if (result == MONO_JIT_SECURITY_OK)
4507 if (result == MONO_JIT_LINKDEMAND_ECMA) {
4508 /* Generate code to throw a SecurityException before the actual call/link */
4509 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4512 NEW_ICONST (cfg, args [0], 4);
4513 NEW_METHODCONST (cfg, args [1], caller);
4514 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
4515 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
4516 /* don't hide previous results */
4517 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
4518 cfg->exception_data = result;
4526 method_access_exception (void)
4528 static MonoMethod *method = NULL;
4531 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4532 method = mono_class_get_method_from_name (secman->securitymanager,
4533 "MethodAccessException", 2);
4540 emit_throw_method_access_exception (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4541 MonoBasicBlock *bblock, unsigned char *ip)
4543 MonoMethod *thrower = method_access_exception ();
4546 EMIT_NEW_METHODCONST (cfg, args [0], caller);
4547 EMIT_NEW_METHODCONST (cfg, args [1], callee);
4548 mono_emit_method_call (cfg, thrower, args, NULL);
4552 field_access_exception (void)
4554 static MonoMethod *method = NULL;
4557 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4558 method = mono_class_get_method_from_name (secman->securitymanager,
4559 "FieldAccessException", 2);
4566 emit_throw_field_access_exception (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
4567 MonoBasicBlock *bblock, unsigned char *ip)
4569 MonoMethod *thrower = field_access_exception ();
4572 EMIT_NEW_METHODCONST (cfg, args [0], caller);
4573 EMIT_NEW_METHODCONST (cfg, args [1], field);
4574 mono_emit_method_call (cfg, thrower, args, NULL);
4578 * Return the original method is a wrapper is specified. We can only access
4579 * the custom attributes from the original method.
4582 get_original_method (MonoMethod *method)
4584 if (method->wrapper_type == MONO_WRAPPER_NONE)
4587 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
4588 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
4591 /* in other cases we need to find the original method */
4592 return mono_marshal_method_from_wrapper (method);
4596 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
4597 MonoBasicBlock *bblock, unsigned char *ip)
4599 /* there's no restriction to access Transparent or SafeCritical fields, so we only check calls to Critical methods */
4600 if (mono_security_core_clr_class_level (mono_field_get_parent (field)) != MONO_SECURITY_CORE_CLR_CRITICAL)
4603 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4604 caller = get_original_method (caller);
4608 /* caller is Critical! only SafeCritical and Critical callers can access the field, so we throw if caller is Transparent */
4609 if (mono_security_core_clr_method_level (caller, TRUE) == MONO_SECURITY_CORE_CLR_TRANSPARENT)
4610 emit_throw_field_access_exception (cfg, caller, field, bblock, ip);
4614 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4615 MonoBasicBlock *bblock, unsigned char *ip)
4617 /* there's no restriction to call Transparent or SafeCritical code, so we only check calls to Critical methods */
4618 if (mono_security_core_clr_method_level (callee, TRUE) != MONO_SECURITY_CORE_CLR_CRITICAL)
4621 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4622 caller = get_original_method (caller);
4626 /* caller is Critical! only SafeCritical and Critical callers can call it, so we throw if the caller is Transparent */
4627 if (mono_security_core_clr_method_level (caller, TRUE) == MONO_SECURITY_CORE_CLR_TRANSPARENT)
4628 emit_throw_method_access_exception (cfg, caller, callee, bblock, ip);
4632 * Check that the IL instructions at ip are the array initialization
4633 * sequence and return the pointer to the data and the size.
4636 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
4639 * newarr[System.Int32]
4641 * ldtoken field valuetype ...
4642 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
4644 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
4645 guint32 token = read32 (ip + 7);
4646 guint32 field_token = read32 (ip + 2);
4647 guint32 field_index = field_token & 0xffffff;
4649 const char *data_ptr;
4651 MonoMethod *cmethod;
4652 MonoClass *dummy_class;
4653 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
4659 *out_field_token = field_token;
4661 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
4664 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
4666 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
4667 case MONO_TYPE_BOOLEAN:
4671 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
4672 #if G_BYTE_ORDER == G_LITTLE_ENDIAN
4673 case MONO_TYPE_CHAR:
4683 return NULL; /* stupid ARM FP swapped format */
4693 if (size > mono_type_size (field->type, &dummy_align))
4696 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
4697 if (!method->klass->image->dynamic) {
4698 field_index = read32 (ip + 2) & 0xffffff;
4699 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
4700 data_ptr = mono_image_rva_map (method->klass->image, rva);
4701 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
4702 /* for aot code we do the lookup on load */
4703 if (aot && data_ptr)
4704 return GUINT_TO_POINTER (rva);
4706 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
4708 data_ptr = mono_field_get_data (field);
4716 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
4718 char *method_fname = mono_method_full_name (method, TRUE);
4721 if (mono_method_get_header (method)->code_size == 0)
4722 method_code = g_strdup ("method body is empty.");
4724 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
4725 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
4726 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
4727 g_free (method_fname);
4728 g_free (method_code);
4732 set_exception_object (MonoCompile *cfg, MonoException *exception)
4734 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
4735 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
4736 cfg->exception_ptr = exception;
4740 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4744 if (cfg->generic_sharing_context)
4745 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
4747 type = &klass->byval_arg;
4748 return MONO_TYPE_IS_REFERENCE (type);
4752 * mono_decompose_array_access_opts:
4754 * Decompose array access opcodes.
4755 * This should be in decompose.c, but it emits calls so it has to stay here until
4756 * the old JIT is gone.
4759 mono_decompose_array_access_opts (MonoCompile *cfg)
4761 MonoBasicBlock *bb, *first_bb;
4764 * Unlike decompose_long_opts, this pass does not alter the CFG of the method so it
4765 * can be executed anytime. It should be run before decompose_long
4769 * Create a dummy bblock and emit code into it so we can use the normal
4770 * code generation macros.
4772 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4773 first_bb = cfg->cbb;
4775 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4777 MonoInst *prev = NULL;
4779 MonoInst *iargs [3];
4782 if (!bb->has_array_access)
4785 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE DECOMPOSE-ARRAY-ACCESS-OPTS ");
4787 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4793 for (ins = bb->code; ins; ins = ins->next) {
4794 switch (ins->opcode) {
4796 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg, ins->sreg1,
4797 G_STRUCT_OFFSET (MonoArray, max_length));
4798 MONO_ADD_INS (cfg->cbb, dest);
4800 case OP_BOUNDS_CHECK:
4801 MONO_ARCH_EMIT_BOUNDS_CHECK (cfg, ins->sreg1, ins->inst_imm, ins->sreg2);
4804 if (cfg->opt & MONO_OPT_SHARED) {
4805 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4806 EMIT_NEW_CLASSCONST (cfg, iargs [1], ins->inst_newa_class);
4807 MONO_INST_NEW (cfg, iargs [2], OP_MOVE);
4808 iargs [2]->dreg = ins->sreg1;
4810 dest = mono_emit_jit_icall (cfg, mono_array_new, iargs);
4811 dest->dreg = ins->dreg;
4813 MonoVTable *vtable = mono_class_vtable (cfg->domain, mono_array_class_get (ins->inst_newa_class, 1));
4816 NEW_VTABLECONST (cfg, iargs [0], vtable);
4817 MONO_ADD_INS (cfg->cbb, iargs [0]);
4818 MONO_INST_NEW (cfg, iargs [1], OP_MOVE);
4819 iargs [1]->dreg = ins->sreg1;
4821 dest = mono_emit_jit_icall (cfg, mono_array_new_specific, iargs);
4822 dest->dreg = ins->dreg;
4826 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg,
4827 ins->sreg1, G_STRUCT_OFFSET (MonoString, length));
4828 MONO_ADD_INS (cfg->cbb, dest);
4834 g_assert (cfg->cbb == first_bb);
4836 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
4837 /* Replace the original instruction with the new code sequence */
4839 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
4840 first_bb->code = first_bb->last_ins = NULL;
4841 first_bb->in_count = first_bb->out_count = 0;
4842 cfg->cbb = first_bb;
4849 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER DECOMPOSE-ARRAY-ACCESS-OPTS ");
4859 #ifdef MONO_ARCH_SOFT_FLOAT
4862 * mono_decompose_soft_float:
4864 * Soft float support on ARM. We store each double value in a pair of integer vregs,
4865 * similar to long support on 32 bit platforms. 32 bit float values require special
4866 * handling when used as locals, arguments, and in calls.
4867 * One big problem with soft-float is that there are few r4 test cases in our test suite.
4870 mono_decompose_soft_float (MonoCompile *cfg)
4872 MonoBasicBlock *bb, *first_bb;
4875 * This pass creates long opcodes, so it should be run before decompose_long_opts ().
4879 * Create a dummy bblock and emit code into it so we can use the normal
4880 * code generation macros.
4882 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4883 first_bb = cfg->cbb;
4885 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4887 MonoInst *prev = NULL;
4890 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE HANDLE-SOFT-FLOAT ");
4892 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4898 for (ins = bb->code; ins; ins = ins->next) {
4899 const char *spec = INS_INFO (ins->opcode);
4901 /* Most fp operations are handled automatically by opcode emulation */
4903 switch (ins->opcode) {
4906 d.vald = *(double*)ins->inst_p0;
4907 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
4912 /* We load the r8 value */
4913 d.vald = *(float*)ins->inst_p0;
4914 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
4918 ins->opcode = OP_LMOVE;
4921 ins->opcode = OP_MOVE;
4922 ins->sreg1 = ins->sreg1 + 1;
4925 ins->opcode = OP_MOVE;
4926 ins->sreg1 = ins->sreg1 + 2;
4929 int reg = ins->sreg1;
4931 ins->opcode = OP_SETLRET;
4933 ins->sreg1 = reg + 1;
4934 ins->sreg2 = reg + 2;
4937 case OP_LOADR8_MEMBASE:
4938 ins->opcode = OP_LOADI8_MEMBASE;
4940 case OP_STORER8_MEMBASE_REG:
4941 ins->opcode = OP_STOREI8_MEMBASE_REG;
4943 case OP_STORER4_MEMBASE_REG: {
4944 MonoInst *iargs [2];
4947 /* Arg 1 is the double value */
4948 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4949 iargs [0]->dreg = ins->sreg1;
4951 /* Arg 2 is the address to store to */
4952 addr_reg = mono_alloc_preg (cfg);
4953 EMIT_NEW_BIALU_IMM (cfg, iargs [1], OP_PADD_IMM, addr_reg, ins->inst_destbasereg, ins->inst_offset);
4954 mono_emit_jit_icall (cfg, mono_fstore_r4, iargs);
4958 case OP_LOADR4_MEMBASE: {
4959 MonoInst *iargs [1];
4963 addr_reg = mono_alloc_preg (cfg);
4964 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, addr_reg, ins->inst_basereg, ins->inst_offset);
4965 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
4966 conv->dreg = ins->dreg;
4971 case OP_FCALL_MEMBASE: {
4972 MonoCallInst *call = (MonoCallInst*)ins;
4973 if (call->signature->ret->type == MONO_TYPE_R4) {
4974 MonoCallInst *call2;
4975 MonoInst *iargs [1];
4978 /* Convert the call into a call returning an int */
4979 MONO_INST_NEW_CALL (cfg, call2, OP_CALL);
4980 memcpy (call2, call, sizeof (MonoCallInst));
4981 switch (ins->opcode) {
4983 call2->inst.opcode = OP_CALL;
4986 call2->inst.opcode = OP_CALL_REG;
4988 case OP_FCALL_MEMBASE:
4989 call2->inst.opcode = OP_CALL_MEMBASE;
4992 g_assert_not_reached ();
4994 call2->inst.dreg = mono_alloc_ireg (cfg);
4995 MONO_ADD_INS (cfg->cbb, (MonoInst*)call2);
4997 /* FIXME: Optimize this */
4999 /* Emit an r4->r8 conversion */
5000 EMIT_NEW_VARLOADA_VREG (cfg, iargs [0], call2->inst.dreg, &mono_defaults.int32_class->byval_arg);
5001 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
5002 conv->dreg = ins->dreg;
5004 switch (ins->opcode) {
5006 ins->opcode = OP_LCALL;
5009 ins->opcode = OP_LCALL_REG;
5011 case OP_FCALL_MEMBASE:
5012 ins->opcode = OP_LCALL_MEMBASE;
5015 g_assert_not_reached ();
5021 MonoJitICallInfo *info;
5022 MonoInst *iargs [2];
5023 MonoInst *call, *cmp, *br;
5025 /* Convert fcompare+fbcc to icall+icompare+beq */
5027 info = mono_find_jit_opcode_emulation (ins->next->opcode);
5030 /* Create dummy MonoInst's for the arguments */
5031 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
5032 iargs [0]->dreg = ins->sreg1;
5033 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
5034 iargs [1]->dreg = ins->sreg2;
5036 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
5038 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE_IMM);
5039 cmp->sreg1 = call->dreg;
5041 MONO_ADD_INS (cfg->cbb, cmp);
5043 MONO_INST_NEW (cfg, br, OP_IBNE_UN);
5044 br->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * 2);
5045 br->inst_true_bb = ins->next->inst_true_bb;
5046 br->inst_false_bb = ins->next->inst_false_bb;
5047 MONO_ADD_INS (cfg->cbb, br);
5049 /* The call sequence might include fp ins */
5052 /* Skip fbcc or fccc */
5053 NULLIFY_INS (ins->next);
5061 MonoJitICallInfo *info;
5062 MonoInst *iargs [2];
5065 /* Convert fccc to icall+icompare+iceq */
5067 info = mono_find_jit_opcode_emulation (ins->opcode);
5070 /* Create dummy MonoInst's for the arguments */
5071 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
5072 iargs [0]->dreg = ins->sreg1;
5073 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
5074 iargs [1]->dreg = ins->sreg2;
5076 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
5078 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, call->dreg, 1);
5079 MONO_EMIT_NEW_UNALU (cfg, OP_ICEQ, ins->dreg, -1);
5081 /* The call sequence might include fp ins */
5086 MonoInst *iargs [2];
5087 MonoInst *call, *cmp;
5089 /* Convert to icall+icompare+cond_exc+move */
5091 /* Create dummy MonoInst's for the arguments */
5092 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
5093 iargs [0]->dreg = ins->sreg1;
5095 call = mono_emit_jit_icall (cfg, mono_isfinite, iargs);
5097 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE_IMM);
5098 cmp->sreg1 = call->dreg;
5100 MONO_ADD_INS (cfg->cbb, cmp);
5102 MONO_EMIT_NEW_COND_EXC (cfg, INE_UN, "ArithmeticException");
5104 /* Do the assignment if the value is finite */
5105 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, ins->dreg, ins->sreg1);
5111 if (spec [MONO_INST_SRC1] == 'f' || spec [MONO_INST_SRC2] == 'f' || spec [MONO_INST_DEST] == 'f') {
5112 mono_print_ins (ins);
5113 g_assert_not_reached ();
5118 g_assert (cfg->cbb == first_bb);
5120 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
5121 /* Replace the original instruction with the new code sequence */
5123 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
5124 first_bb->code = first_bb->last_ins = NULL;
5125 first_bb->in_count = first_bb->out_count = 0;
5126 cfg->cbb = first_bb;
5133 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER HANDLE-SOFT-FLOAT ");
5136 mono_decompose_long_opts (cfg);
5142 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5145 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5146 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5147 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5148 /* Optimize reg-reg moves away */
5150 * Can't optimize other opcodes, since sp[0] might point to
5151 * the last ins of a decomposed opcode.
5153 sp [0]->dreg = (cfg)->locals [n]->dreg;
5155 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5160 * ldloca inhibits many optimizations so try to get rid of it in common
5163 static inline unsigned char *
5164 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5173 local = read16 (ip + 2);
5177 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5178 gboolean skip = FALSE;
5180 /* From the INITOBJ case */
5181 token = read32 (ip + 2);
5182 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5183 CHECK_TYPELOAD (klass);
5184 if (generic_class_is_reference_type (cfg, klass)) {
5185 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5186 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5187 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5188 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5189 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5202 is_exception_class (MonoClass *class)
5205 if (class == mono_defaults.exception_class)
5207 class = class->parent;
5213 * mono_method_to_ir:
5215 * Translate the .net IL into linear IR.
5218 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5219 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5220 guint inline_offset, gboolean is_virtual_call)
5222 MonoInst *ins, **sp, **stack_start;
5223 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5224 MonoMethod *cmethod, *method_definition;
5225 MonoInst **arg_array;
5226 MonoMethodHeader *header;
5228 guint32 token, ins_flag;
5230 MonoClass *constrained_call = NULL;
5231 unsigned char *ip, *end, *target, *err_pos;
5232 static double r8_0 = 0.0;
5233 MonoMethodSignature *sig;
5234 MonoGenericContext *generic_context = NULL;
5235 MonoGenericContainer *generic_container = NULL;
5236 MonoType **param_types;
5237 int i, n, start_new_bblock, dreg;
5238 int num_calls = 0, inline_costs = 0;
5239 int breakpoint_id = 0;
5241 MonoBoolean security, pinvoke;
5242 MonoSecurityManager* secman = NULL;
5243 MonoDeclSecurityActions actions;
5244 GSList *class_inits = NULL;
5245 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5247 gboolean init_locals;
5249 /* serialization and xdomain stuff may need access to private fields and methods */
5250 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5251 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5252 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5253 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5254 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5255 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5257 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5259 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5260 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5261 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5262 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5264 image = method->klass->image;
5265 header = mono_method_get_header (method);
5266 generic_container = mono_method_get_generic_container (method);
5267 sig = mono_method_signature (method);
5268 num_args = sig->hasthis + sig->param_count;
5269 ip = (unsigned char*)header->code;
5270 cfg->cil_start = ip;
5271 end = ip + header->code_size;
5272 mono_jit_stats.cil_code_size += header->code_size;
5273 init_locals = header->init_locals;
5276 * Methods without init_locals set could cause asserts in various passes
5281 method_definition = method;
5282 while (method_definition->is_inflated) {
5283 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5284 method_definition = imethod->declaring;
5287 /* SkipVerification is not allowed if core-clr is enabled */
5288 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5290 dont_verify_stloc = TRUE;
5293 if (!dont_verify && mini_method_verify (cfg, method_definition))
5294 goto exception_exit;
5296 if (mono_debug_using_mono_debugger ())
5297 cfg->keep_cil_nops = TRUE;
5299 if (sig->is_inflated)
5300 generic_context = mono_method_get_context (method);
5301 else if (generic_container)
5302 generic_context = &generic_container->context;
5303 cfg->generic_context = generic_context;
5305 if (!cfg->generic_sharing_context)
5306 g_assert (!sig->has_type_parameters);
5308 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5309 g_assert (method->is_inflated);
5310 g_assert (mono_method_get_context (method)->method_inst);
5312 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5313 g_assert (sig->generic_param_count);
5315 if (cfg->method == method) {
5316 cfg->real_offset = 0;
5318 cfg->real_offset = inline_offset;
5321 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5322 cfg->cil_offset_to_bb_len = header->code_size;
5324 cfg->current_method = method;
5326 if (cfg->verbose_level > 2)
5327 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5329 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5331 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5332 for (n = 0; n < sig->param_count; ++n)
5333 param_types [n + sig->hasthis] = sig->params [n];
5334 cfg->arg_types = param_types;
5336 dont_inline = g_list_prepend (dont_inline, method);
5337 if (cfg->method == method) {
5339 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5340 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5343 NEW_BBLOCK (cfg, start_bblock);
5344 cfg->bb_entry = start_bblock;
5345 start_bblock->cil_code = NULL;
5346 start_bblock->cil_length = 0;
5349 NEW_BBLOCK (cfg, end_bblock);
5350 cfg->bb_exit = end_bblock;
5351 end_bblock->cil_code = NULL;
5352 end_bblock->cil_length = 0;
5353 g_assert (cfg->num_bblocks == 2);
5355 arg_array = cfg->args;
5357 if (header->num_clauses) {
5358 cfg->spvars = g_hash_table_new (NULL, NULL);
5359 cfg->exvars = g_hash_table_new (NULL, NULL);
5361 /* handle exception clauses */
5362 for (i = 0; i < header->num_clauses; ++i) {
5363 MonoBasicBlock *try_bb;
5364 MonoExceptionClause *clause = &header->clauses [i];
5365 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5366 try_bb->real_offset = clause->try_offset;
5367 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5368 tblock->real_offset = clause->handler_offset;
5369 tblock->flags |= BB_EXCEPTION_HANDLER;
5371 link_bblock (cfg, try_bb, tblock);
5373 if (*(ip + clause->handler_offset) == CEE_POP)
5374 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5376 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5377 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5378 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5379 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5380 MONO_ADD_INS (tblock, ins);
5382 /* todo: is a fault block unsafe to optimize? */
5383 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5384 tblock->flags |= BB_EXCEPTION_UNSAFE;
5388 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5390 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5392 /* catch and filter blocks get the exception object on the stack */
5393 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5394 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5395 MonoInst *dummy_use;
5397 /* mostly like handle_stack_args (), but just sets the input args */
5398 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5399 tblock->in_scount = 1;
5400 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5401 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5404 * Add a dummy use for the exvar so its liveness info will be
5408 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5410 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5411 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5412 tblock->flags |= BB_EXCEPTION_HANDLER;
5413 tblock->real_offset = clause->data.filter_offset;
5414 tblock->in_scount = 1;
5415 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5416 /* The filter block shares the exvar with the handler block */
5417 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5418 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5419 MONO_ADD_INS (tblock, ins);
5423 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5424 clause->data.catch_class &&
5425 cfg->generic_sharing_context &&
5426 mono_class_check_context_used (clause->data.catch_class)) {
5428 * In shared generic code with catch
5429 * clauses containing type variables
5430 * the exception handling code has to
5431 * be able to get to the rgctx.
5432 * Therefore we have to make sure that
5433 * the vtable/mrgctx argument (for
5434 * static or generic methods) or the
5435 * "this" argument (for non-static
5436 * methods) are live.
5438 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5439 mini_method_get_context (method)->method_inst ||
5440 method->klass->valuetype) {
5441 mono_get_vtable_var (cfg);
5443 MonoInst *dummy_use;
5445 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5450 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5451 cfg->cbb = start_bblock;
5452 cfg->args = arg_array;
5453 mono_save_args (cfg, sig, inline_args);
5456 /* FIRST CODE BLOCK */
5457 NEW_BBLOCK (cfg, bblock);
5458 bblock->cil_code = ip;
5462 ADD_BBLOCK (cfg, bblock);
5464 if (cfg->method == method) {
5465 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5466 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5467 MONO_INST_NEW (cfg, ins, OP_BREAK);
5468 MONO_ADD_INS (bblock, ins);
5472 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5473 secman = mono_security_manager_get_methods ();
5475 security = (secman && mono_method_has_declsec (method));
5476 /* at this point having security doesn't mean we have any code to generate */
5477 if (security && (cfg->method == method)) {
5478 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5479 * And we do not want to enter the next section (with allocation) if we
5480 * have nothing to generate */
5481 security = mono_declsec_get_demands (method, &actions);
5484 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5485 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5487 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5488 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5489 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5491 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5492 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5496 mono_custom_attrs_free (custom);
5499 custom = mono_custom_attrs_from_class (wrapped->klass);
5500 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5504 mono_custom_attrs_free (custom);
5507 /* not a P/Invoke after all */
5512 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5513 /* we use a separate basic block for the initialization code */
5514 NEW_BBLOCK (cfg, init_localsbb);
5515 cfg->bb_init = init_localsbb;
5516 init_localsbb->real_offset = cfg->real_offset;
5517 start_bblock->next_bb = init_localsbb;
5518 init_localsbb->next_bb = bblock;
5519 link_bblock (cfg, start_bblock, init_localsbb);
5520 link_bblock (cfg, init_localsbb, bblock);
5522 cfg->cbb = init_localsbb;
5524 start_bblock->next_bb = bblock;
5525 link_bblock (cfg, start_bblock, bblock);
5528 /* at this point we know, if security is TRUE, that some code needs to be generated */
5529 if (security && (cfg->method == method)) {
5532 mono_jit_stats.cas_demand_generation++;
5534 if (actions.demand.blob) {
5535 /* Add code for SecurityAction.Demand */
5536 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5537 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5538 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5539 mono_emit_method_call (cfg, secman->demand, args, NULL);
5541 if (actions.noncasdemand.blob) {
5542 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5543 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5544 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5545 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5546 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5547 mono_emit_method_call (cfg, secman->demand, args, NULL);
5549 if (actions.demandchoice.blob) {
5550 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5551 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5552 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5553 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5554 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5558 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5560 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5563 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5564 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5565 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5566 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5567 if (!(method->klass && method->klass->image &&
5568 mono_security_core_clr_is_platform_image (method->klass->image))) {
5569 emit_throw_method_access_exception (cfg, method, wrapped, bblock, ip);
5575 if (header->code_size == 0)
5578 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5583 if (cfg->method == method)
5584 mono_debug_init_method (cfg, bblock, breakpoint_id);
5586 for (n = 0; n < header->num_locals; ++n) {
5587 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5592 /* We force the vtable variable here for all shared methods
5593 for the possibility that they might show up in a stack
5594 trace where their exact instantiation is needed. */
5595 if (cfg->generic_sharing_context && method == cfg->method) {
5596 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5597 mini_method_get_context (method)->method_inst ||
5598 method->klass->valuetype) {
5599 mono_get_vtable_var (cfg);
5601 /* FIXME: Is there a better way to do this?
5602 We need the variable live for the duration
5603 of the whole method. */
5604 cfg->args [0]->flags |= MONO_INST_INDIRECT;
5608 /* add a check for this != NULL to inlined methods */
5609 if (is_virtual_call) {
5612 NEW_ARGLOAD (cfg, arg_ins, 0);
5613 MONO_ADD_INS (cfg->cbb, arg_ins);
5614 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
5615 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, arg_ins->dreg);
5616 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, arg_ins->dreg);
5619 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5620 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5623 start_new_bblock = 0;
5627 if (cfg->method == method)
5628 cfg->real_offset = ip - header->code;
5630 cfg->real_offset = inline_offset;
5635 if (start_new_bblock) {
5636 bblock->cil_length = ip - bblock->cil_code;
5637 if (start_new_bblock == 2) {
5638 g_assert (ip == tblock->cil_code);
5640 GET_BBLOCK (cfg, tblock, ip);
5642 bblock->next_bb = tblock;
5645 start_new_bblock = 0;
5646 for (i = 0; i < bblock->in_scount; ++i) {
5647 if (cfg->verbose_level > 3)
5648 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5649 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5653 g_slist_free (class_inits);
5656 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5657 link_bblock (cfg, bblock, tblock);
5658 if (sp != stack_start) {
5659 handle_stack_args (cfg, stack_start, sp - stack_start);
5661 CHECK_UNVERIFIABLE (cfg);
5663 bblock->next_bb = tblock;
5666 for (i = 0; i < bblock->in_scount; ++i) {
5667 if (cfg->verbose_level > 3)
5668 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5669 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5672 g_slist_free (class_inits);
5677 bblock->real_offset = cfg->real_offset;
5679 if ((cfg->method == method) && cfg->coverage_info) {
5680 guint32 cil_offset = ip - header->code;
5681 cfg->coverage_info->data [cil_offset].cil_code = ip;
5683 /* TODO: Use an increment here */
5684 #if defined(TARGET_X86)
5685 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5686 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5688 MONO_ADD_INS (cfg->cbb, ins);
5690 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5691 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5695 if (cfg->verbose_level > 3)
5696 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5700 if (cfg->keep_cil_nops)
5701 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
5703 MONO_INST_NEW (cfg, ins, OP_NOP);
5705 MONO_ADD_INS (bblock, ins);
5708 MONO_INST_NEW (cfg, ins, OP_BREAK);
5710 MONO_ADD_INS (bblock, ins);
5716 CHECK_STACK_OVF (1);
5717 n = (*ip)-CEE_LDARG_0;
5719 EMIT_NEW_ARGLOAD (cfg, ins, n);
5727 CHECK_STACK_OVF (1);
5728 n = (*ip)-CEE_LDLOC_0;
5730 EMIT_NEW_LOCLOAD (cfg, ins, n);
5739 n = (*ip)-CEE_STLOC_0;
5742 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5744 emit_stloc_ir (cfg, sp, header, n);
5751 CHECK_STACK_OVF (1);
5754 EMIT_NEW_ARGLOAD (cfg, ins, n);
5760 CHECK_STACK_OVF (1);
5763 NEW_ARGLOADA (cfg, ins, n);
5764 MONO_ADD_INS (cfg->cbb, ins);
5774 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5776 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5781 CHECK_STACK_OVF (1);
5784 EMIT_NEW_LOCLOAD (cfg, ins, n);
5788 case CEE_LDLOCA_S: {
5789 unsigned char *tmp_ip;
5791 CHECK_STACK_OVF (1);
5792 CHECK_LOCAL (ip [1]);
5794 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
5800 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5809 CHECK_LOCAL (ip [1]);
5810 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5812 emit_stloc_ir (cfg, sp, header, ip [1]);
5817 CHECK_STACK_OVF (1);
5818 EMIT_NEW_PCONST (cfg, ins, NULL);
5819 ins->type = STACK_OBJ;
5824 CHECK_STACK_OVF (1);
5825 EMIT_NEW_ICONST (cfg, ins, -1);
5838 CHECK_STACK_OVF (1);
5839 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
5845 CHECK_STACK_OVF (1);
5847 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
5853 CHECK_STACK_OVF (1);
5854 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
5860 CHECK_STACK_OVF (1);
5861 MONO_INST_NEW (cfg, ins, OP_I8CONST);
5862 ins->type = STACK_I8;
5863 ins->dreg = alloc_dreg (cfg, STACK_I8);
5865 ins->inst_l = (gint64)read64 (ip);
5866 MONO_ADD_INS (bblock, ins);
5872 /* FIXME: we should really allocate this only late in the compilation process */
5873 f = mono_domain_alloc (cfg->domain, sizeof (float));
5875 CHECK_STACK_OVF (1);
5876 MONO_INST_NEW (cfg, ins, OP_R4CONST);
5877 ins->type = STACK_R8;
5878 ins->dreg = alloc_dreg (cfg, STACK_R8);
5882 MONO_ADD_INS (bblock, ins);
5890 /* FIXME: we should really allocate this only late in the compilation process */
5891 d = mono_domain_alloc (cfg->domain, sizeof (double));
5893 CHECK_STACK_OVF (1);
5894 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5895 ins->type = STACK_R8;
5896 ins->dreg = alloc_dreg (cfg, STACK_R8);
5900 MONO_ADD_INS (bblock, ins);
5907 MonoInst *temp, *store;
5909 CHECK_STACK_OVF (1);
5913 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
5914 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
5916 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5919 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5932 if (sp [0]->type == STACK_R8)
5933 /* we need to pop the value from the x86 FP stack */
5934 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
5943 if (stack_start != sp)
5945 token = read32 (ip + 1);
5946 /* FIXME: check the signature matches */
5947 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5952 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
5953 GENERIC_SHARING_FAILURE (CEE_JMP);
5955 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5956 CHECK_CFG_EXCEPTION;
5958 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
5960 MonoMethodSignature *fsig = mono_method_signature (cmethod);
5963 /* Handle tail calls similarly to calls */
5964 n = fsig->param_count + fsig->hasthis;
5966 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
5967 call->method = cmethod;
5968 call->tail_call = TRUE;
5969 call->signature = mono_method_signature (cmethod);
5970 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
5971 call->inst.inst_p0 = cmethod;
5972 for (i = 0; i < n; ++i)
5973 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
5975 mono_arch_emit_call (cfg, call);
5976 MONO_ADD_INS (bblock, (MonoInst*)call);
5979 for (i = 0; i < num_args; ++i)
5980 /* Prevent arguments from being optimized away */
5981 arg_array [i]->flags |= MONO_INST_VOLATILE;
5983 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
5984 ins = (MonoInst*)call;
5985 ins->inst_p0 = cmethod;
5986 MONO_ADD_INS (bblock, ins);
5990 start_new_bblock = 1;
5995 case CEE_CALLVIRT: {
5996 MonoInst *addr = NULL;
5997 MonoMethodSignature *fsig = NULL;
5999 int virtual = *ip == CEE_CALLVIRT;
6000 int calli = *ip == CEE_CALLI;
6001 gboolean pass_imt_from_rgctx = FALSE;
6002 MonoInst *imt_arg = NULL;
6003 gboolean pass_vtable = FALSE;
6004 gboolean pass_mrgctx = FALSE;
6005 MonoInst *vtable_arg = NULL;
6006 gboolean check_this = FALSE;
6007 gboolean supported_tail_call = FALSE;
6010 token = read32 (ip + 1);
6017 if (method->wrapper_type != MONO_WRAPPER_NONE)
6018 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6020 fsig = mono_metadata_parse_signature (image, token);
6022 n = fsig->param_count + fsig->hasthis;
6024 MonoMethod *cil_method;
6026 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6027 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6028 cil_method = cmethod;
6029 } else if (constrained_call) {
6030 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6032 * This is needed since get_method_constrained can't find
6033 * the method in klass representing a type var.
6034 * The type var is guaranteed to be a reference type in this
6037 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6038 cil_method = cmethod;
6039 g_assert (!cmethod->klass->valuetype);
6041 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6044 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6045 cil_method = cmethod;
6050 if (!dont_verify && !cfg->skip_visibility) {
6051 MonoMethod *target_method = cil_method;
6052 if (method->is_inflated) {
6053 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6055 if (!mono_method_can_access_method (method_definition, target_method) &&
6056 !mono_method_can_access_method (method, cil_method))
6057 METHOD_ACCESS_FAILURE;
6060 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6061 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6063 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6064 /* MS.NET seems to silently convert this to a callvirt */
6067 if (!cmethod->klass->inited)
6068 if (!mono_class_init (cmethod->klass))
6071 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6072 mini_class_is_system_array (cmethod->klass)) {
6073 array_rank = cmethod->klass->rank;
6074 fsig = mono_method_signature (cmethod);
6076 if (mono_method_signature (cmethod)->pinvoke) {
6077 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6078 check_for_pending_exc, FALSE);
6079 fsig = mono_method_signature (wrapper);
6080 } else if (constrained_call) {
6081 fsig = mono_method_signature (cmethod);
6083 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6087 mono_save_token_info (cfg, image, token, cil_method);
6089 n = fsig->param_count + fsig->hasthis;
6091 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6092 if (check_linkdemand (cfg, method, cmethod))
6094 CHECK_CFG_EXCEPTION;
6097 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6098 g_assert_not_reached ();
6101 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6104 if (!cfg->generic_sharing_context && cmethod)
6105 g_assert (!mono_method_check_context_used (cmethod));
6109 //g_assert (!virtual || fsig->hasthis);
6113 if (constrained_call) {
6115 * We have the `constrained.' prefix opcode.
6117 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6121 * The type parameter is instantiated as a valuetype,
6122 * but that type doesn't override the method we're
6123 * calling, so we need to box `this'.
6125 dreg = alloc_dreg (cfg, STACK_VTYPE);
6126 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADV_MEMBASE, dreg, sp [0]->dreg, 0);
6127 ins->klass = constrained_call;
6128 sp [0] = handle_box (cfg, ins, constrained_call);
6129 } else if (!constrained_call->valuetype) {
6130 int dreg = alloc_preg (cfg);
6133 * The type parameter is instantiated as a reference
6134 * type. We have a managed pointer on the stack, so
6135 * we need to dereference it here.
6137 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6138 ins->type = STACK_OBJ;
6140 } else if (cmethod->klass->valuetype)
6142 constrained_call = NULL;
6145 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6149 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6150 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6151 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6152 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6153 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6156 * Pass vtable iff target method might
6157 * be shared, which means that sharing
6158 * is enabled for its class and its
6159 * context is sharable (and it's not a
6162 if (sharing_enabled && context_sharable &&
6163 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6167 if (cmethod && mini_method_get_context (cmethod) &&
6168 mini_method_get_context (cmethod)->method_inst) {
6169 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6170 MonoGenericContext *context = mini_method_get_context (cmethod);
6171 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6173 g_assert (!pass_vtable);
6175 if (sharing_enabled && context_sharable)
6179 if (cfg->generic_sharing_context && cmethod) {
6180 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6182 context_used = mono_method_check_context_used (cmethod);
6184 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6185 /* Generic method interface
6186 calls are resolved via a
6187 helper function and don't
6189 if (!cmethod_context || !cmethod_context->method_inst)
6190 pass_imt_from_rgctx = TRUE;
6194 * If a shared method calls another
6195 * shared method then the caller must
6196 * have a generic sharing context
6197 * because the magic trampoline
6198 * requires it. FIXME: We shouldn't
6199 * have to force the vtable/mrgctx
6200 * variable here. Instead there
6201 * should be a flag in the cfg to
6202 * request a generic sharing context.
6205 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6206 mono_get_vtable_var (cfg);
6211 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6213 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6215 CHECK_TYPELOAD (cmethod->klass);
6216 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6221 g_assert (!vtable_arg);
6224 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6226 EMIT_NEW_METHOD_RGCTX_CONST (cfg, vtable_arg, cmethod);
6229 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6230 MONO_METHOD_IS_FINAL (cmethod)) {
6237 if (pass_imt_from_rgctx) {
6238 g_assert (!pass_vtable);
6241 imt_arg = emit_get_rgctx_method (cfg, context_used,
6242 cmethod, MONO_RGCTX_INFO_METHOD);
6248 MONO_INST_NEW (cfg, check, OP_CHECK_THIS);
6249 check->sreg1 = sp [0]->dreg;
6250 MONO_ADD_INS (cfg->cbb, check);
6253 /* Calling virtual generic methods */
6254 if (cmethod && virtual &&
6255 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6256 !(MONO_METHOD_IS_FINAL (cmethod) &&
6257 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6258 mono_method_signature (cmethod)->generic_param_count) {
6259 MonoInst *this_temp, *this_arg_temp, *store;
6260 MonoInst *iargs [4];
6262 g_assert (mono_method_signature (cmethod)->is_inflated);
6264 /* Prevent inlining of methods that contain indirect calls */
6267 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && !defined(ENABLE_LLVM)
6268 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt) {
6269 g_assert (!imt_arg);
6271 imt_arg = emit_get_rgctx_method (cfg, context_used,
6272 cmethod, MONO_RGCTX_INFO_METHOD);
6275 g_assert (cmethod->is_inflated);
6276 EMIT_NEW_METHODCONST (cfg, imt_arg, cmethod);
6278 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6282 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6283 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6284 MONO_ADD_INS (bblock, store);
6286 /* FIXME: This should be a managed pointer */
6287 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6289 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6291 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6292 cmethod, MONO_RGCTX_INFO_METHOD);
6293 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6294 addr = mono_emit_jit_icall (cfg,
6295 mono_helper_compile_generic_method, iargs);
6297 EMIT_NEW_METHODCONST (cfg, iargs [1], cmethod);
6298 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6299 addr = mono_emit_jit_icall (cfg, mono_helper_compile_generic_method, iargs);
6302 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6304 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6307 if (!MONO_TYPE_IS_VOID (fsig->ret))
6308 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6315 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6316 supported_tail_call = cmethod && MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
6318 supported_tail_call = cmethod && mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6322 /* FIXME: runtime generic context pointer for jumps? */
6323 /* FIXME: handle this for generic sharing eventually */
6324 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) && supported_tail_call) {
6327 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6330 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6331 /* Handle tail calls similarly to calls */
6332 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE);
6334 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6335 call->tail_call = TRUE;
6336 call->method = cmethod;
6337 call->signature = mono_method_signature (cmethod);
6340 * We implement tail calls by storing the actual arguments into the
6341 * argument variables, then emitting a CEE_JMP.
6343 for (i = 0; i < n; ++i) {
6344 /* Prevent argument from being register allocated */
6345 arg_array [i]->flags |= MONO_INST_VOLATILE;
6346 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6350 ins = (MonoInst*)call;
6351 ins->inst_p0 = cmethod;
6352 ins->inst_p1 = arg_array [0];
6353 MONO_ADD_INS (bblock, ins);
6354 link_bblock (cfg, bblock, end_bblock);
6355 start_new_bblock = 1;
6356 /* skip CEE_RET as well */
6362 /* Conversion to a JIT intrinsic */
6363 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6364 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6365 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6376 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6377 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6378 mono_method_check_inlining (cfg, cmethod) &&
6379 !g_list_find (dont_inline, cmethod)) {
6381 gboolean allways = FALSE;
6383 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6384 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6385 /* Prevent inlining of methods that call wrappers */
6387 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6391 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6393 cfg->real_offset += 5;
6396 if (!MONO_TYPE_IS_VOID (fsig->ret))
6397 /* *sp is already set by inline_method */
6400 inline_costs += costs;
6406 inline_costs += 10 * num_calls++;
6408 /* Tail recursion elimination */
6409 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6410 gboolean has_vtargs = FALSE;
6413 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6416 /* keep it simple */
6417 for (i = fsig->param_count - 1; i >= 0; i--) {
6418 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6423 for (i = 0; i < n; ++i)
6424 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6425 MONO_INST_NEW (cfg, ins, OP_BR);
6426 MONO_ADD_INS (bblock, ins);
6427 tblock = start_bblock->out_bb [0];
6428 link_bblock (cfg, bblock, tblock);
6429 ins->inst_target_bb = tblock;
6430 start_new_bblock = 1;
6432 /* skip the CEE_RET, too */
6433 if (ip_in_bb (cfg, bblock, ip + 5))
6443 /* Generic sharing */
6444 /* FIXME: only do this for generic methods if
6445 they are not shared! */
6446 if (context_used && !imt_arg && !array_rank &&
6447 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6448 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6449 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6450 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6453 g_assert (cfg->generic_sharing_context && cmethod);
6457 * We are compiling a call to a
6458 * generic method from shared code,
6459 * which means that we have to look up
6460 * the method in the rgctx and do an
6463 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6466 /* Indirect calls */
6468 g_assert (!imt_arg);
6470 if (*ip == CEE_CALL)
6471 g_assert (context_used);
6472 else if (*ip == CEE_CALLI)
6473 g_assert (!vtable_arg);
6475 /* FIXME: what the hell is this??? */
6476 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6477 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6479 /* Prevent inlining of methods with indirect calls */
6483 #ifdef MONO_ARCH_RGCTX_REG
6485 int rgctx_reg = mono_alloc_preg (cfg);
6487 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6488 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6489 call = (MonoCallInst*)ins;
6490 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
6491 cfg->uses_rgctx_reg = TRUE;
6492 call->rgctx_reg = TRUE;
6497 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6499 * Instead of emitting an indirect call, emit a direct call
6500 * with the contents of the aotconst as the patch info.
6502 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
6505 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6508 if (!MONO_TYPE_IS_VOID (fsig->ret))
6509 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6520 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6521 if (sp [fsig->param_count]->type == STACK_OBJ) {
6522 MonoInst *iargs [2];
6525 iargs [1] = sp [fsig->param_count];
6527 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6530 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6531 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6532 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6533 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6535 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6538 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6539 if (!cmethod->klass->element_class->valuetype && !readonly)
6540 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6543 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6546 g_assert_not_reached ();
6554 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6556 if (!MONO_TYPE_IS_VOID (fsig->ret))
6557 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6567 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6569 } else if (imt_arg) {
6570 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6572 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6575 if (!MONO_TYPE_IS_VOID (fsig->ret))
6576 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6583 if (cfg->method != method) {
6584 /* return from inlined method */
6586 * If in_count == 0, that means the ret is unreachable due to
6587 * being preceeded by a throw. In that case, inline_method () will
6588 * handle setting the return value
6589 * (test case: test_0_inline_throw ()).
6591 if (return_var && cfg->cbb->in_count) {
6595 //g_assert (returnvar != -1);
6596 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6597 cfg->ret_var_set = TRUE;
6601 MonoType *ret_type = mono_method_signature (method)->ret;
6603 g_assert (!return_var);
6606 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6609 if (!cfg->vret_addr) {
6612 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6614 EMIT_NEW_RETLOADA (cfg, ret_addr);
6616 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6617 ins->klass = mono_class_from_mono_type (ret_type);
6620 #ifdef MONO_ARCH_SOFT_FLOAT
6621 if (!ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6622 MonoInst *iargs [1];
6626 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6627 mono_arch_emit_setret (cfg, method, conv);
6629 mono_arch_emit_setret (cfg, method, *sp);
6632 mono_arch_emit_setret (cfg, method, *sp);
6637 if (sp != stack_start)
6639 MONO_INST_NEW (cfg, ins, OP_BR);
6641 ins->inst_target_bb = end_bblock;
6642 MONO_ADD_INS (bblock, ins);
6643 link_bblock (cfg, bblock, end_bblock);
6644 start_new_bblock = 1;
6648 MONO_INST_NEW (cfg, ins, OP_BR);
6650 target = ip + 1 + (signed char)(*ip);
6652 GET_BBLOCK (cfg, tblock, target);
6653 link_bblock (cfg, bblock, tblock);
6654 ins->inst_target_bb = tblock;
6655 if (sp != stack_start) {
6656 handle_stack_args (cfg, stack_start, sp - stack_start);
6658 CHECK_UNVERIFIABLE (cfg);
6660 MONO_ADD_INS (bblock, ins);
6661 start_new_bblock = 1;
6662 inline_costs += BRANCH_COST;
6676 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6678 target = ip + 1 + *(signed char*)ip;
6684 inline_costs += BRANCH_COST;
6688 MONO_INST_NEW (cfg, ins, OP_BR);
6691 target = ip + 4 + (gint32)read32(ip);
6693 GET_BBLOCK (cfg, tblock, target);
6694 link_bblock (cfg, bblock, tblock);
6695 ins->inst_target_bb = tblock;
6696 if (sp != stack_start) {
6697 handle_stack_args (cfg, stack_start, sp - stack_start);
6699 CHECK_UNVERIFIABLE (cfg);
6702 MONO_ADD_INS (bblock, ins);
6704 start_new_bblock = 1;
6705 inline_costs += BRANCH_COST;
6712 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6713 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6714 guint32 opsize = is_short ? 1 : 4;
6716 CHECK_OPSIZE (opsize);
6718 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6721 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6726 GET_BBLOCK (cfg, tblock, target);
6727 link_bblock (cfg, bblock, tblock);
6728 GET_BBLOCK (cfg, tblock, ip);
6729 link_bblock (cfg, bblock, tblock);
6731 if (sp != stack_start) {
6732 handle_stack_args (cfg, stack_start, sp - stack_start);
6733 CHECK_UNVERIFIABLE (cfg);
6736 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6737 cmp->sreg1 = sp [0]->dreg;
6738 type_from_op (cmp, sp [0], NULL);
6741 #if SIZEOF_REGISTER == 4
6742 if (cmp->opcode == OP_LCOMPARE_IMM) {
6743 /* Convert it to OP_LCOMPARE */
6744 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6745 ins->type = STACK_I8;
6746 ins->dreg = alloc_dreg (cfg, STACK_I8);
6748 MONO_ADD_INS (bblock, ins);
6749 cmp->opcode = OP_LCOMPARE;
6750 cmp->sreg2 = ins->dreg;
6753 MONO_ADD_INS (bblock, cmp);
6755 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
6756 type_from_op (ins, sp [0], NULL);
6757 MONO_ADD_INS (bblock, ins);
6758 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
6759 GET_BBLOCK (cfg, tblock, target);
6760 ins->inst_true_bb = tblock;
6761 GET_BBLOCK (cfg, tblock, ip);
6762 ins->inst_false_bb = tblock;
6763 start_new_bblock = 2;
6766 inline_costs += BRANCH_COST;
6781 MONO_INST_NEW (cfg, ins, *ip);
6783 target = ip + 4 + (gint32)read32(ip);
6789 inline_costs += BRANCH_COST;
6793 MonoBasicBlock **targets;
6794 MonoBasicBlock *default_bblock;
6795 MonoJumpInfoBBTable *table;
6796 int offset_reg = alloc_preg (cfg);
6797 int target_reg = alloc_preg (cfg);
6798 int table_reg = alloc_preg (cfg);
6799 int sum_reg = alloc_preg (cfg);
6800 gboolean use_op_switch;
6804 n = read32 (ip + 1);
6807 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
6811 CHECK_OPSIZE (n * sizeof (guint32));
6812 target = ip + n * sizeof (guint32);
6814 GET_BBLOCK (cfg, default_bblock, target);
6816 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
6817 for (i = 0; i < n; ++i) {
6818 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
6819 targets [i] = tblock;
6823 if (sp != stack_start) {
6825 * Link the current bb with the targets as well, so handle_stack_args
6826 * will set their in_stack correctly.
6828 link_bblock (cfg, bblock, default_bblock);
6829 for (i = 0; i < n; ++i)
6830 link_bblock (cfg, bblock, targets [i]);
6832 handle_stack_args (cfg, stack_start, sp - stack_start);
6834 CHECK_UNVERIFIABLE (cfg);
6837 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
6838 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
6841 for (i = 0; i < n; ++i)
6842 link_bblock (cfg, bblock, targets [i]);
6844 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
6845 table->table = targets;
6846 table->table_size = n;
6848 use_op_switch = FALSE;
6850 /* ARM implements SWITCH statements differently */
6851 /* FIXME: Make it use the generic implementation */
6852 if (!cfg->compile_aot)
6853 use_op_switch = TRUE;
6856 if (COMPILE_LLVM (cfg))
6857 use_op_switch = TRUE;
6859 if (use_op_switch) {
6860 MONO_INST_NEW (cfg, ins, OP_SWITCH);
6861 ins->sreg1 = src1->dreg;
6862 ins->inst_p0 = table;
6863 ins->inst_many_bb = targets;
6864 ins->klass = GUINT_TO_POINTER (n);
6865 MONO_ADD_INS (cfg->cbb, ins);
6867 if (sizeof (gpointer) == 8)
6868 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
6870 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
6872 #if SIZEOF_REGISTER == 8
6873 /* The upper word might not be zero, and we add it to a 64 bit address later */
6874 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
6877 if (cfg->compile_aot) {
6878 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
6880 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
6881 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
6882 ins->inst_p0 = table;
6883 ins->dreg = table_reg;
6884 MONO_ADD_INS (cfg->cbb, ins);
6887 /* FIXME: Use load_memindex */
6888 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
6889 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
6890 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
6892 start_new_bblock = 1;
6893 inline_costs += (BRANCH_COST * 2);
6913 dreg = alloc_freg (cfg);
6916 dreg = alloc_lreg (cfg);
6919 dreg = alloc_preg (cfg);
6922 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
6923 ins->type = ldind_type [*ip - CEE_LDIND_I1];
6924 ins->flags |= ins_flag;
6926 MONO_ADD_INS (bblock, ins);
6941 #if HAVE_WRITE_BARRIERS
6942 if (*ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0))) {
6943 /* insert call to write barrier */
6944 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
6945 mono_emit_method_call (cfg, write_barrier, sp, NULL);
6952 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
6953 ins->flags |= ins_flag;
6955 MONO_ADD_INS (bblock, ins);
6963 MONO_INST_NEW (cfg, ins, (*ip));
6965 ins->sreg1 = sp [0]->dreg;
6966 ins->sreg2 = sp [1]->dreg;
6967 type_from_op (ins, sp [0], sp [1]);
6969 ins->dreg = alloc_dreg ((cfg), (ins)->type);
6971 /* Use the immediate opcodes if possible */
6972 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
6973 int imm_opcode = mono_op_to_op_imm (ins->opcode);
6974 if (imm_opcode != -1) {
6975 ins->opcode = imm_opcode;
6976 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
6979 sp [1]->opcode = OP_NOP;
6983 MONO_ADD_INS ((cfg)->cbb, (ins));
6985 *sp++ = mono_decompose_opcode (cfg, ins);
7002 MONO_INST_NEW (cfg, ins, (*ip));
7004 ins->sreg1 = sp [0]->dreg;
7005 ins->sreg2 = sp [1]->dreg;
7006 type_from_op (ins, sp [0], sp [1]);
7008 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7009 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7011 /* FIXME: Pass opcode to is_inst_imm */
7013 /* Use the immediate opcodes if possible */
7014 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7017 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7018 if (imm_opcode != -1) {
7019 ins->opcode = imm_opcode;
7020 if (sp [1]->opcode == OP_I8CONST) {
7021 #if SIZEOF_REGISTER == 8
7022 ins->inst_imm = sp [1]->inst_l;
7024 ins->inst_ls_word = sp [1]->inst_ls_word;
7025 ins->inst_ms_word = sp [1]->inst_ms_word;
7029 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7032 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7033 if (sp [1]->next == NULL)
7034 sp [1]->opcode = OP_NOP;
7037 MONO_ADD_INS ((cfg)->cbb, (ins));
7039 *sp++ = mono_decompose_opcode (cfg, ins);
7052 case CEE_CONV_OVF_I8:
7053 case CEE_CONV_OVF_U8:
7057 /* Special case this earlier so we have long constants in the IR */
7058 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7059 int data = sp [-1]->inst_c0;
7060 sp [-1]->opcode = OP_I8CONST;
7061 sp [-1]->type = STACK_I8;
7062 #if SIZEOF_REGISTER == 8
7063 if ((*ip) == CEE_CONV_U8)
7064 sp [-1]->inst_c0 = (guint32)data;
7066 sp [-1]->inst_c0 = data;
7068 sp [-1]->inst_ls_word = data;
7069 if ((*ip) == CEE_CONV_U8)
7070 sp [-1]->inst_ms_word = 0;
7072 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7074 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7081 case CEE_CONV_OVF_I4:
7082 case CEE_CONV_OVF_I1:
7083 case CEE_CONV_OVF_I2:
7084 case CEE_CONV_OVF_I:
7085 case CEE_CONV_OVF_U:
7088 if (sp [-1]->type == STACK_R8) {
7089 ADD_UNOP (CEE_CONV_OVF_I8);
7096 case CEE_CONV_OVF_U1:
7097 case CEE_CONV_OVF_U2:
7098 case CEE_CONV_OVF_U4:
7101 if (sp [-1]->type == STACK_R8) {
7102 ADD_UNOP (CEE_CONV_OVF_U8);
7109 case CEE_CONV_OVF_I1_UN:
7110 case CEE_CONV_OVF_I2_UN:
7111 case CEE_CONV_OVF_I4_UN:
7112 case CEE_CONV_OVF_I8_UN:
7113 case CEE_CONV_OVF_U1_UN:
7114 case CEE_CONV_OVF_U2_UN:
7115 case CEE_CONV_OVF_U4_UN:
7116 case CEE_CONV_OVF_U8_UN:
7117 case CEE_CONV_OVF_I_UN:
7118 case CEE_CONV_OVF_U_UN:
7128 case CEE_ADD_OVF_UN:
7130 case CEE_MUL_OVF_UN:
7132 case CEE_SUB_OVF_UN:
7140 token = read32 (ip + 1);
7141 klass = mini_get_class (method, token, generic_context);
7142 CHECK_TYPELOAD (klass);
7144 if (generic_class_is_reference_type (cfg, klass)) {
7145 MonoInst *store, *load;
7146 int dreg = alloc_preg (cfg);
7148 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7149 load->flags |= ins_flag;
7150 MONO_ADD_INS (cfg->cbb, load);
7152 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7153 store->flags |= ins_flag;
7154 MONO_ADD_INS (cfg->cbb, store);
7156 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7168 token = read32 (ip + 1);
7169 klass = mini_get_class (method, token, generic_context);
7170 CHECK_TYPELOAD (klass);
7172 /* Optimize the common ldobj+stloc combination */
7182 loc_index = ip [5] - CEE_STLOC_0;
7189 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7190 CHECK_LOCAL (loc_index);
7192 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7193 ins->dreg = cfg->locals [loc_index]->dreg;
7199 /* Optimize the ldobj+stobj combination */
7200 /* The reference case ends up being a load+store anyway */
7201 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7206 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7213 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7222 CHECK_STACK_OVF (1);
7224 n = read32 (ip + 1);
7226 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7227 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7228 ins->type = STACK_OBJ;
7231 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7232 MonoInst *iargs [1];
7234 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7235 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7237 if (cfg->opt & MONO_OPT_SHARED) {
7238 MonoInst *iargs [3];
7240 if (cfg->compile_aot) {
7241 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7243 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7244 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7245 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7246 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7247 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7249 if (bblock->out_of_line) {
7250 MonoInst *iargs [2];
7252 if (image == mono_defaults.corlib) {
7254 * Avoid relocations in AOT and save some space by using a
7255 * version of helper_ldstr specialized to mscorlib.
7257 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7258 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7260 /* Avoid creating the string object */
7261 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7262 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7263 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7267 if (cfg->compile_aot) {
7268 NEW_LDSTRCONST (cfg, ins, image, n);
7270 MONO_ADD_INS (bblock, ins);
7273 NEW_PCONST (cfg, ins, NULL);
7274 ins->type = STACK_OBJ;
7275 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7277 MONO_ADD_INS (bblock, ins);
7286 MonoInst *iargs [2];
7287 MonoMethodSignature *fsig;
7290 MonoInst *vtable_arg = NULL;
7293 token = read32 (ip + 1);
7294 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7297 fsig = mono_method_get_signature (cmethod, image, token);
7299 mono_save_token_info (cfg, image, token, cmethod);
7301 if (!mono_class_init (cmethod->klass))
7304 if (cfg->generic_sharing_context)
7305 context_used = mono_method_check_context_used (cmethod);
7307 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7308 if (check_linkdemand (cfg, method, cmethod))
7310 CHECK_CFG_EXCEPTION;
7311 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7312 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7315 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7316 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7317 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7319 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7320 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7322 EMIT_NEW_METHOD_RGCTX_CONST (cfg, vtable_arg, cmethod);
7326 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7327 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7329 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7331 CHECK_TYPELOAD (cmethod->klass);
7332 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7337 n = fsig->param_count;
7341 * Generate smaller code for the common newobj <exception> instruction in
7342 * argument checking code.
7344 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7345 is_exception_class (cmethod->klass) && n <= 2 &&
7346 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7347 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7348 MonoInst *iargs [3];
7350 g_assert (!vtable_arg);
7354 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7357 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7361 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7366 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7369 g_assert_not_reached ();
7377 /* move the args to allow room for 'this' in the first position */
7383 /* check_call_signature () requires sp[0] to be set */
7384 this_ins.type = STACK_OBJ;
7386 if (check_call_signature (cfg, fsig, sp))
7391 if (mini_class_is_system_array (cmethod->klass)) {
7392 g_assert (!vtable_arg);
7395 *sp = emit_get_rgctx_method (cfg, context_used,
7396 cmethod, MONO_RGCTX_INFO_METHOD);
7398 EMIT_NEW_METHODCONST (cfg, *sp, cmethod);
7401 /* Avoid varargs in the common case */
7402 if (fsig->param_count == 1)
7403 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7404 else if (fsig->param_count == 2)
7405 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7407 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7408 } else if (cmethod->string_ctor) {
7409 g_assert (!context_used);
7410 g_assert (!vtable_arg);
7411 /* we simply pass a null pointer */
7412 EMIT_NEW_PCONST (cfg, *sp, NULL);
7413 /* now call the string ctor */
7414 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7416 MonoInst* callvirt_this_arg = NULL;
7418 if (cmethod->klass->valuetype) {
7419 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7420 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7421 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7426 * The code generated by mini_emit_virtual_call () expects
7427 * iargs [0] to be a boxed instance, but luckily the vcall
7428 * will be transformed into a normal call there.
7430 } else if (context_used) {
7434 if (cfg->opt & MONO_OPT_SHARED)
7435 rgctx_info = MONO_RGCTX_INFO_KLASS;
7437 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7438 data = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, rgctx_info);
7440 alloc = handle_alloc_from_inst (cfg, cmethod->klass, data, FALSE);
7443 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7445 CHECK_TYPELOAD (cmethod->klass);
7448 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7449 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7450 * As a workaround, we call class cctors before allocating objects.
7452 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7453 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7454 if (cfg->verbose_level > 2)
7455 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7456 class_inits = g_slist_prepend (class_inits, vtable);
7459 alloc = handle_alloc (cfg, cmethod->klass, FALSE);
7464 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7466 /* Now call the actual ctor */
7467 /* Avoid virtual calls to ctors if possible */
7468 if (cmethod->klass->marshalbyref)
7469 callvirt_this_arg = sp [0];
7471 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7472 mono_method_check_inlining (cfg, cmethod) &&
7473 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7474 !g_list_find (dont_inline, cmethod)) {
7477 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7478 cfg->real_offset += 5;
7481 inline_costs += costs - 5;
7484 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7486 } else if (context_used &&
7487 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7488 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7489 MonoInst *cmethod_addr;
7491 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7492 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7494 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7497 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7498 callvirt_this_arg, NULL, vtable_arg);
7499 if (mono_method_is_generic_sharable_impl (cmethod, TRUE) && ((MonoCallInst*)ins)->method->wrapper_type == MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)
7500 GENERIC_SHARING_FAILURE (*ip);
7504 if (alloc == NULL) {
7506 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7507 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7521 token = read32 (ip + 1);
7522 klass = mini_get_class (method, token, generic_context);
7523 CHECK_TYPELOAD (klass);
7524 if (sp [0]->type != STACK_OBJ)
7527 if (cfg->generic_sharing_context)
7528 context_used = mono_class_check_context_used (klass);
7537 args [1] = emit_get_rgctx_klass (cfg, context_used,
7538 klass, MONO_RGCTX_INFO_KLASS);
7540 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7544 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7545 MonoMethod *mono_castclass;
7546 MonoInst *iargs [1];
7549 mono_castclass = mono_marshal_get_castclass (klass);
7552 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7553 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7554 g_assert (costs > 0);
7557 cfg->real_offset += 5;
7562 inline_costs += costs;
7565 ins = handle_castclass (cfg, klass, *sp);
7575 token = read32 (ip + 1);
7576 klass = mini_get_class (method, token, generic_context);
7577 CHECK_TYPELOAD (klass);
7578 if (sp [0]->type != STACK_OBJ)
7581 if (cfg->generic_sharing_context)
7582 context_used = mono_class_check_context_used (klass);
7591 args [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7593 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7597 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7598 MonoMethod *mono_isinst;
7599 MonoInst *iargs [1];
7602 mono_isinst = mono_marshal_get_isinst (klass);
7605 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7606 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7607 g_assert (costs > 0);
7610 cfg->real_offset += 5;
7615 inline_costs += costs;
7618 ins = handle_isinst (cfg, klass, *sp);
7625 case CEE_UNBOX_ANY: {
7629 token = read32 (ip + 1);
7630 klass = mini_get_class (method, token, generic_context);
7631 CHECK_TYPELOAD (klass);
7633 mono_save_token_info (cfg, image, token, klass);
7635 if (cfg->generic_sharing_context)
7636 context_used = mono_class_check_context_used (klass);
7638 if (generic_class_is_reference_type (cfg, klass)) {
7641 MonoInst *iargs [2];
7646 iargs [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7647 ins = mono_emit_jit_icall (cfg, mono_object_castclass, iargs);
7651 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7652 MonoMethod *mono_castclass;
7653 MonoInst *iargs [1];
7656 mono_castclass = mono_marshal_get_castclass (klass);
7659 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7660 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7662 g_assert (costs > 0);
7665 cfg->real_offset += 5;
7669 inline_costs += costs;
7671 ins = handle_castclass (cfg, klass, *sp);
7679 if (mono_class_is_nullable (klass)) {
7680 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
7687 ins = handle_unbox (cfg, klass, sp, context_used);
7693 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7706 token = read32 (ip + 1);
7707 klass = mini_get_class (method, token, generic_context);
7708 CHECK_TYPELOAD (klass);
7710 mono_save_token_info (cfg, image, token, klass);
7712 if (cfg->generic_sharing_context)
7713 context_used = mono_class_check_context_used (klass);
7715 if (generic_class_is_reference_type (cfg, klass)) {
7721 if (klass == mono_defaults.void_class)
7723 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7725 /* frequent check in generic code: box (struct), brtrue */
7726 if (!mono_class_is_nullable (klass) &&
7727 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) && (ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S)) {
7728 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7730 MONO_INST_NEW (cfg, ins, OP_BR);
7731 if (*ip == CEE_BRTRUE_S) {
7734 target = ip + 1 + (signed char)(*ip);
7739 target = ip + 4 + (gint)(read32 (ip));
7742 GET_BBLOCK (cfg, tblock, target);
7743 link_bblock (cfg, bblock, tblock);
7744 ins->inst_target_bb = tblock;
7745 GET_BBLOCK (cfg, tblock, ip);
7747 * This leads to some inconsistency, since the two bblocks are
7748 * not really connected, but it is needed for handling stack
7749 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
7750 * FIXME: This should only be needed if sp != stack_start, but that
7751 * doesn't work for some reason (test failure in mcs/tests on x86).
7753 link_bblock (cfg, bblock, tblock);
7754 if (sp != stack_start) {
7755 handle_stack_args (cfg, stack_start, sp - stack_start);
7757 CHECK_UNVERIFIABLE (cfg);
7759 MONO_ADD_INS (bblock, ins);
7760 start_new_bblock = 1;
7768 if (cfg->opt & MONO_OPT_SHARED)
7769 rgctx_info = MONO_RGCTX_INFO_KLASS;
7771 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7772 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
7773 *sp++ = handle_box_from_inst (cfg, val, klass, context_used, data);
7775 *sp++ = handle_box (cfg, val, klass);
7786 token = read32 (ip + 1);
7787 klass = mini_get_class (method, token, generic_context);
7788 CHECK_TYPELOAD (klass);
7790 mono_save_token_info (cfg, image, token, klass);
7792 if (cfg->generic_sharing_context)
7793 context_used = mono_class_check_context_used (klass);
7795 if (mono_class_is_nullable (klass)) {
7798 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
7799 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
7803 ins = handle_unbox (cfg, klass, sp, context_used);
7813 MonoClassField *field;
7817 if (*ip == CEE_STFLD) {
7824 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
7826 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
7829 token = read32 (ip + 1);
7830 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7831 field = mono_method_get_wrapper_data (method, token);
7832 klass = field->parent;
7835 field = mono_field_from_token (image, token, &klass, generic_context);
7839 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7840 FIELD_ACCESS_FAILURE;
7841 mono_class_init (klass);
7843 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
7844 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
7845 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
7846 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
7849 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
7850 if (*ip == CEE_STFLD) {
7851 if (target_type_is_incompatible (cfg, field->type, sp [1]))
7853 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7854 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
7855 MonoInst *iargs [5];
7858 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7859 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7860 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
7864 if (cfg->opt & MONO_OPT_INLINE) {
7865 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
7866 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7867 g_assert (costs > 0);
7869 cfg->real_offset += 5;
7872 inline_costs += costs;
7874 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
7879 #if HAVE_WRITE_BARRIERS
7880 if (mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
7881 /* insert call to write barrier */
7882 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
7883 MonoInst *iargs [2];
7886 dreg = alloc_preg (cfg);
7887 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7889 mono_emit_method_call (cfg, write_barrier, iargs, NULL);
7893 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
7895 store->flags |= ins_flag;
7902 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7903 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
7904 MonoInst *iargs [4];
7907 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7908 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7909 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
7910 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
7911 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
7912 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7914 g_assert (costs > 0);
7916 cfg->real_offset += 5;
7920 inline_costs += costs;
7922 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
7926 if (sp [0]->type == STACK_VTYPE) {
7929 /* Have to compute the address of the variable */
7931 var = get_vreg_to_inst (cfg, sp [0]->dreg);
7933 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
7935 g_assert (var->klass == klass);
7937 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
7941 if (*ip == CEE_LDFLDA) {
7942 dreg = alloc_preg (cfg);
7944 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7945 ins->klass = mono_class_from_mono_type (field->type);
7946 ins->type = STACK_MP;
7951 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
7952 load->flags |= ins_flag;
7963 MonoClassField *field;
7964 gpointer addr = NULL;
7965 gboolean is_special_static;
7968 token = read32 (ip + 1);
7970 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7971 field = mono_method_get_wrapper_data (method, token);
7972 klass = field->parent;
7975 field = mono_field_from_token (image, token, &klass, generic_context);
7978 mono_class_init (klass);
7979 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7980 FIELD_ACCESS_FAILURE;
7982 /* if the class is Critical then transparent code cannot access it's fields */
7983 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
7984 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
7987 * We can only support shared generic static
7988 * field access on architectures where the
7989 * trampoline code has been extended to handle
7990 * the generic class init.
7992 #ifndef MONO_ARCH_VTABLE_REG
7993 GENERIC_SHARING_FAILURE (*ip);
7996 if (cfg->generic_sharing_context)
7997 context_used = mono_class_check_context_used (klass);
7999 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
8001 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8002 * to be called here.
8004 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8005 mono_class_vtable (cfg->domain, klass);
8006 CHECK_TYPELOAD (klass);
8008 mono_domain_lock (cfg->domain);
8009 if (cfg->domain->special_static_fields)
8010 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8011 mono_domain_unlock (cfg->domain);
8013 is_special_static = mono_class_field_is_special_static (field);
8015 /* Generate IR to compute the field address */
8017 if ((cfg->opt & MONO_OPT_SHARED) ||
8018 (cfg->compile_aot && is_special_static) ||
8019 (context_used && is_special_static)) {
8020 MonoInst *iargs [2];
8022 g_assert (field->parent);
8023 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8025 iargs [1] = emit_get_rgctx_field (cfg, context_used,
8026 field, MONO_RGCTX_INFO_CLASS_FIELD);
8028 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8030 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8031 } else if (context_used) {
8032 MonoInst *static_data;
8035 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8036 method->klass->name_space, method->klass->name, method->name,
8037 depth, field->offset);
8040 if (mono_class_needs_cctor_run (klass, method)) {
8044 vtable = emit_get_rgctx_klass (cfg, context_used,
8045 klass, MONO_RGCTX_INFO_VTABLE);
8047 // FIXME: This doesn't work since it tries to pass the argument
8048 // in the normal way, instead of using MONO_ARCH_VTABLE_REG
8050 * The vtable pointer is always passed in a register regardless of
8051 * the calling convention, so assign it manually, and make a call
8052 * using a signature without parameters.
8054 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable);
8055 #ifdef MONO_ARCH_VTABLE_REG
8056 mono_call_inst_add_outarg_reg (cfg, call, vtable->dreg, MONO_ARCH_VTABLE_REG, FALSE);
8057 cfg->uses_vtable_reg = TRUE;
8064 * The pointer we're computing here is
8066 * super_info.static_data + field->offset
8068 static_data = emit_get_rgctx_klass (cfg, context_used,
8069 klass, MONO_RGCTX_INFO_STATIC_DATA);
8071 if (field->offset == 0) {
8074 int addr_reg = mono_alloc_preg (cfg);
8075 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8077 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8078 MonoInst *iargs [2];
8080 g_assert (field->parent);
8081 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8082 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8083 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8085 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8087 CHECK_TYPELOAD (klass);
8089 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8090 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8091 if (cfg->verbose_level > 2)
8092 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8093 class_inits = g_slist_prepend (class_inits, vtable);
8095 if (cfg->run_cctors) {
8097 /* This makes so that inline cannot trigger */
8098 /* .cctors: too many apps depend on them */
8099 /* running with a specific order... */
8100 if (! vtable->initialized)
8102 ex = mono_runtime_class_init_full (vtable, FALSE);
8104 set_exception_object (cfg, ex);
8105 goto exception_exit;
8109 addr = (char*)vtable->data + field->offset;
8111 if (cfg->compile_aot)
8112 EMIT_NEW_SFLDACONST (cfg, ins, field);
8114 EMIT_NEW_PCONST (cfg, ins, addr);
8117 * insert call to mono_threads_get_static_data (GPOINTER_TO_UINT (addr))
8118 * This could be later optimized to do just a couple of
8119 * memory dereferences with constant offsets.
8121 MonoInst *iargs [1];
8122 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8123 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8127 /* Generate IR to do the actual load/store operation */
8129 if (*ip == CEE_LDSFLDA) {
8130 ins->klass = mono_class_from_mono_type (field->type);
8131 ins->type = STACK_PTR;
8133 } else if (*ip == CEE_STSFLD) {
8138 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
8139 store->flags |= ins_flag;
8141 gboolean is_const = FALSE;
8142 MonoVTable *vtable = NULL;
8144 if (!context_used) {
8145 vtable = mono_class_vtable (cfg->domain, klass);
8146 CHECK_TYPELOAD (klass);
8148 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8149 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8150 gpointer addr = (char*)vtable->data + field->offset;
8151 int ro_type = field->type->type;
8152 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
8153 ro_type = mono_class_enum_basetype (field->type->data.klass)->type;
8155 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8158 case MONO_TYPE_BOOLEAN:
8160 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8164 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8167 case MONO_TYPE_CHAR:
8169 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8173 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8178 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8182 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8185 #ifndef HAVE_MOVING_COLLECTOR
8188 case MONO_TYPE_STRING:
8189 case MONO_TYPE_OBJECT:
8190 case MONO_TYPE_CLASS:
8191 case MONO_TYPE_SZARRAY:
8193 case MONO_TYPE_FNPTR:
8194 case MONO_TYPE_ARRAY:
8195 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8196 type_to_eval_stack_type ((cfg), field->type, *sp);
8202 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8207 case MONO_TYPE_VALUETYPE:
8217 CHECK_STACK_OVF (1);
8219 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8220 load->flags |= ins_flag;
8233 token = read32 (ip + 1);
8234 klass = mini_get_class (method, token, generic_context);
8235 CHECK_TYPELOAD (klass);
8236 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8237 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8248 const char *data_ptr;
8250 guint32 field_token;
8256 token = read32 (ip + 1);
8258 klass = mini_get_class (method, token, generic_context);
8259 CHECK_TYPELOAD (klass);
8261 if (cfg->generic_sharing_context)
8262 context_used = mono_class_check_context_used (klass);
8267 /* FIXME: Decompose later to help abcrem */
8270 args [0] = emit_get_rgctx_klass (cfg, context_used,
8271 mono_array_class_get (klass, 1), MONO_RGCTX_INFO_VTABLE);
8276 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8278 if (cfg->opt & MONO_OPT_SHARED) {
8279 /* Decompose now to avoid problems with references to the domainvar */
8280 MonoInst *iargs [3];
8282 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8283 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8286 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8288 /* Decompose later since it is needed by abcrem */
8289 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8290 ins->dreg = alloc_preg (cfg);
8291 ins->sreg1 = sp [0]->dreg;
8292 ins->inst_newa_class = klass;
8293 ins->type = STACK_OBJ;
8295 MONO_ADD_INS (cfg->cbb, ins);
8296 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8297 cfg->cbb->has_array_access = TRUE;
8299 /* Needed so mono_emit_load_get_addr () gets called */
8300 mono_get_got_var (cfg);
8310 * we inline/optimize the initialization sequence if possible.
8311 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8312 * for small sizes open code the memcpy
8313 * ensure the rva field is big enough
8315 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8316 MonoMethod *memcpy_method = get_memcpy_method ();
8317 MonoInst *iargs [3];
8318 int add_reg = alloc_preg (cfg);
8320 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8321 if (cfg->compile_aot) {
8322 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8324 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8326 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8327 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8336 if (sp [0]->type != STACK_OBJ)
8339 dreg = alloc_preg (cfg);
8340 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8341 ins->dreg = alloc_preg (cfg);
8342 ins->sreg1 = sp [0]->dreg;
8343 ins->type = STACK_I4;
8344 MONO_ADD_INS (cfg->cbb, ins);
8345 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8346 cfg->cbb->has_array_access = TRUE;
8354 if (sp [0]->type != STACK_OBJ)
8357 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8359 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8360 CHECK_TYPELOAD (klass);
8361 /* we need to make sure that this array is exactly the type it needs
8362 * to be for correctness. the wrappers are lax with their usage
8363 * so we need to ignore them here
8365 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly)
8366 mini_emit_check_array_type (cfg, sp [0], mono_array_class_get (klass, 1));
8369 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8373 case CEE_LDELEM_ANY:
8384 case CEE_LDELEM_REF: {
8390 if (*ip == CEE_LDELEM_ANY) {
8392 token = read32 (ip + 1);
8393 klass = mini_get_class (method, token, generic_context);
8394 CHECK_TYPELOAD (klass);
8395 mono_class_init (klass);
8398 klass = array_access_to_klass (*ip);
8400 if (sp [0]->type != STACK_OBJ)
8403 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8405 if (sp [1]->opcode == OP_ICONST) {
8406 int array_reg = sp [0]->dreg;
8407 int index_reg = sp [1]->dreg;
8408 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8410 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8411 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8413 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8414 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8417 if (*ip == CEE_LDELEM_ANY)
8430 case CEE_STELEM_REF:
8431 case CEE_STELEM_ANY: {
8437 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8439 if (*ip == CEE_STELEM_ANY) {
8441 token = read32 (ip + 1);
8442 klass = mini_get_class (method, token, generic_context);
8443 CHECK_TYPELOAD (klass);
8444 mono_class_init (klass);
8447 klass = array_access_to_klass (*ip);
8449 if (sp [0]->type != STACK_OBJ)
8452 /* storing a NULL doesn't need any of the complex checks in stelemref */
8453 if (generic_class_is_reference_type (cfg, klass) &&
8454 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8455 MonoMethod* helper = mono_marshal_get_stelemref ();
8456 MonoInst *iargs [3];
8458 if (sp [0]->type != STACK_OBJ)
8460 if (sp [2]->type != STACK_OBJ)
8467 mono_emit_method_call (cfg, helper, iargs, NULL);
8469 if (sp [1]->opcode == OP_ICONST) {
8470 int array_reg = sp [0]->dreg;
8471 int index_reg = sp [1]->dreg;
8472 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8474 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8475 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8477 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8478 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8482 if (*ip == CEE_STELEM_ANY)
8489 case CEE_CKFINITE: {
8493 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8494 ins->sreg1 = sp [0]->dreg;
8495 ins->dreg = alloc_freg (cfg);
8496 ins->type = STACK_R8;
8497 MONO_ADD_INS (bblock, ins);
8499 *sp++ = mono_decompose_opcode (cfg, ins);
8504 case CEE_REFANYVAL: {
8505 MonoInst *src_var, *src;
8507 int klass_reg = alloc_preg (cfg);
8508 int dreg = alloc_preg (cfg);
8511 MONO_INST_NEW (cfg, ins, *ip);
8514 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8515 CHECK_TYPELOAD (klass);
8516 mono_class_init (klass);
8518 if (cfg->generic_sharing_context)
8519 context_used = mono_class_check_context_used (klass);
8522 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8524 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8525 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8526 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8529 MonoInst *klass_ins;
8531 klass_ins = emit_get_rgctx_klass (cfg, context_used,
8532 klass, MONO_RGCTX_INFO_KLASS);
8535 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8536 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8538 mini_emit_class_check (cfg, klass_reg, klass);
8540 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8541 ins->type = STACK_MP;
8546 case CEE_MKREFANY: {
8547 MonoInst *loc, *addr;
8550 MONO_INST_NEW (cfg, ins, *ip);
8553 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8554 CHECK_TYPELOAD (klass);
8555 mono_class_init (klass);
8557 if (cfg->generic_sharing_context)
8558 context_used = mono_class_check_context_used (klass);
8560 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8561 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8564 MonoInst *const_ins;
8565 int type_reg = alloc_preg (cfg);
8567 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
8568 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8569 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8570 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8571 } else if (cfg->compile_aot) {
8572 int const_reg = alloc_preg (cfg);
8573 int type_reg = alloc_preg (cfg);
8575 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8576 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8577 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8578 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8580 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8581 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8583 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8585 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8586 ins->type = STACK_VTYPE;
8587 ins->klass = mono_defaults.typed_reference_class;
8594 MonoClass *handle_class;
8596 CHECK_STACK_OVF (1);
8599 n = read32 (ip + 1);
8601 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
8602 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8603 handle = mono_method_get_wrapper_data (method, n);
8604 handle_class = mono_method_get_wrapper_data (method, n + 1);
8605 if (handle_class == mono_defaults.typehandle_class)
8606 handle = &((MonoClass*)handle)->byval_arg;
8609 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8613 mono_class_init (handle_class);
8614 if (cfg->generic_sharing_context) {
8615 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
8616 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
8617 /* This case handles ldtoken
8618 of an open type, like for
8621 } else if (handle_class == mono_defaults.typehandle_class) {
8622 /* If we get a MONO_TYPE_CLASS
8623 then we need to provide the
8625 instantiation of it. */
8626 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8629 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8630 } else if (handle_class == mono_defaults.fieldhandle_class)
8631 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8632 else if (handle_class == mono_defaults.methodhandle_class)
8633 context_used = mono_method_check_context_used (handle);
8635 g_assert_not_reached ();
8638 if ((cfg->opt & MONO_OPT_SHARED) &&
8639 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
8640 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
8641 MonoInst *addr, *vtvar, *iargs [3];
8642 int method_context_used;
8644 if (cfg->generic_sharing_context)
8645 method_context_used = mono_method_check_context_used (method);
8647 method_context_used = 0;
8649 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8651 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8652 EMIT_NEW_ICONST (cfg, iargs [1], n);
8653 if (method_context_used) {
8654 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
8655 method, MONO_RGCTX_INFO_METHOD);
8656 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
8658 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
8659 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
8661 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8663 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8665 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8667 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
8668 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
8669 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
8670 (cmethod->klass == mono_defaults.monotype_class->parent) &&
8671 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
8672 MonoClass *tclass = mono_class_from_mono_type (handle);
8674 mono_class_init (tclass);
8676 ins = emit_get_rgctx_klass (cfg, context_used,
8677 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
8678 } else if (cfg->compile_aot) {
8679 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
8681 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
8683 ins->type = STACK_OBJ;
8684 ins->klass = cmethod->klass;
8687 MonoInst *addr, *vtvar;
8689 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8692 if (handle_class == mono_defaults.typehandle_class) {
8693 ins = emit_get_rgctx_klass (cfg, context_used,
8694 mono_class_from_mono_type (handle),
8695 MONO_RGCTX_INFO_TYPE);
8696 } else if (handle_class == mono_defaults.methodhandle_class) {
8697 ins = emit_get_rgctx_method (cfg, context_used,
8698 handle, MONO_RGCTX_INFO_METHOD);
8699 } else if (handle_class == mono_defaults.fieldhandle_class) {
8700 ins = emit_get_rgctx_field (cfg, context_used,
8701 handle, MONO_RGCTX_INFO_CLASS_FIELD);
8703 g_assert_not_reached ();
8705 } else if (cfg->compile_aot) {
8706 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
8708 EMIT_NEW_PCONST (cfg, ins, handle);
8710 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8711 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8712 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8722 MONO_INST_NEW (cfg, ins, OP_THROW);
8724 ins->sreg1 = sp [0]->dreg;
8726 bblock->out_of_line = TRUE;
8727 MONO_ADD_INS (bblock, ins);
8728 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
8729 MONO_ADD_INS (bblock, ins);
8732 link_bblock (cfg, bblock, end_bblock);
8733 start_new_bblock = 1;
8735 case CEE_ENDFINALLY:
8736 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
8737 MONO_ADD_INS (bblock, ins);
8739 start_new_bblock = 1;
8742 * Control will leave the method so empty the stack, otherwise
8743 * the next basic block will start with a nonempty stack.
8745 while (sp != stack_start) {
8753 if (*ip == CEE_LEAVE) {
8755 target = ip + 5 + (gint32)read32(ip + 1);
8758 target = ip + 2 + (signed char)(ip [1]);
8761 /* empty the stack */
8762 while (sp != stack_start) {
8767 * If this leave statement is in a catch block, check for a
8768 * pending exception, and rethrow it if necessary.
8770 for (i = 0; i < header->num_clauses; ++i) {
8771 MonoExceptionClause *clause = &header->clauses [i];
8774 * Use <= in the final comparison to handle clauses with multiple
8775 * leave statements, like in bug #78024.
8776 * The ordering of the exception clauses guarantees that we find the
8779 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len)) {
8781 MonoBasicBlock *dont_throw;
8786 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
8789 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
8791 NEW_BBLOCK (cfg, dont_throw);
8794 * Currently, we allways rethrow the abort exception, despite the
8795 * fact that this is not correct. See thread6.cs for an example.
8796 * But propagating the abort exception is more important than
8797 * getting the sematics right.
8799 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
8800 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
8801 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
8803 MONO_START_BB (cfg, dont_throw);
8808 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
8810 for (tmp = handlers; tmp; tmp = tmp->next) {
8812 link_bblock (cfg, bblock, tblock);
8813 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
8814 ins->inst_target_bb = tblock;
8815 MONO_ADD_INS (bblock, ins);
8817 g_list_free (handlers);
8820 MONO_INST_NEW (cfg, ins, OP_BR);
8821 MONO_ADD_INS (bblock, ins);
8822 GET_BBLOCK (cfg, tblock, target);
8823 link_bblock (cfg, bblock, tblock);
8824 ins->inst_target_bb = tblock;
8825 start_new_bblock = 1;
8827 if (*ip == CEE_LEAVE)
8836 * Mono specific opcodes
8838 case MONO_CUSTOM_PREFIX: {
8840 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
8844 case CEE_MONO_ICALL: {
8846 MonoJitICallInfo *info;
8848 token = read32 (ip + 2);
8849 func = mono_method_get_wrapper_data (method, token);
8850 info = mono_find_jit_icall_by_addr (func);
8853 CHECK_STACK (info->sig->param_count);
8854 sp -= info->sig->param_count;
8856 ins = mono_emit_jit_icall (cfg, info->func, sp);
8857 if (!MONO_TYPE_IS_VOID (info->sig->ret))
8861 inline_costs += 10 * num_calls++;
8865 case CEE_MONO_LDPTR: {
8868 CHECK_STACK_OVF (1);
8870 token = read32 (ip + 2);
8872 ptr = mono_method_get_wrapper_data (method, token);
8873 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
8874 MonoJitICallInfo *callinfo;
8875 const char *icall_name;
8877 icall_name = method->name + strlen ("__icall_wrapper_");
8878 g_assert (icall_name);
8879 callinfo = mono_find_jit_icall_by_name (icall_name);
8880 g_assert (callinfo);
8882 if (ptr == callinfo->func) {
8883 /* Will be transformed into an AOTCONST later */
8884 EMIT_NEW_PCONST (cfg, ins, ptr);
8890 /* FIXME: Generalize this */
8891 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
8892 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
8897 EMIT_NEW_PCONST (cfg, ins, ptr);
8900 inline_costs += 10 * num_calls++;
8901 /* Can't embed random pointers into AOT code */
8902 cfg->disable_aot = 1;
8905 case CEE_MONO_ICALL_ADDR: {
8906 MonoMethod *cmethod;
8909 CHECK_STACK_OVF (1);
8911 token = read32 (ip + 2);
8913 cmethod = mono_method_get_wrapper_data (method, token);
8915 if (cfg->compile_aot) {
8916 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
8918 ptr = mono_lookup_internal_call (cmethod);
8920 EMIT_NEW_PCONST (cfg, ins, ptr);
8926 case CEE_MONO_VTADDR: {
8927 MonoInst *src_var, *src;
8933 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8934 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
8939 case CEE_MONO_NEWOBJ: {
8940 MonoInst *iargs [2];
8942 CHECK_STACK_OVF (1);
8944 token = read32 (ip + 2);
8945 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8946 mono_class_init (klass);
8947 NEW_DOMAINCONST (cfg, iargs [0]);
8948 MONO_ADD_INS (cfg->cbb, iargs [0]);
8949 NEW_CLASSCONST (cfg, iargs [1], klass);
8950 MONO_ADD_INS (cfg->cbb, iargs [1]);
8951 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
8953 inline_costs += 10 * num_calls++;
8956 case CEE_MONO_OBJADDR:
8959 MONO_INST_NEW (cfg, ins, OP_MOVE);
8960 ins->dreg = alloc_preg (cfg);
8961 ins->sreg1 = sp [0]->dreg;
8962 ins->type = STACK_MP;
8963 MONO_ADD_INS (cfg->cbb, ins);
8967 case CEE_MONO_LDNATIVEOBJ:
8969 * Similar to LDOBJ, but instead load the unmanaged
8970 * representation of the vtype to the stack.
8975 token = read32 (ip + 2);
8976 klass = mono_method_get_wrapper_data (method, token);
8977 g_assert (klass->valuetype);
8978 mono_class_init (klass);
8981 MonoInst *src, *dest, *temp;
8984 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
8985 temp->backend.is_pinvoke = 1;
8986 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
8987 mini_emit_stobj (cfg, dest, src, klass, TRUE);
8989 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
8990 dest->type = STACK_VTYPE;
8991 dest->klass = klass;
8997 case CEE_MONO_RETOBJ: {
8999 * Same as RET, but return the native representation of a vtype
9002 g_assert (cfg->ret);
9003 g_assert (mono_method_signature (method)->pinvoke);
9008 token = read32 (ip + 2);
9009 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9011 if (!cfg->vret_addr) {
9012 g_assert (cfg->ret_var_is_local);
9014 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
9016 EMIT_NEW_RETLOADA (cfg, ins);
9018 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
9020 if (sp != stack_start)
9023 MONO_INST_NEW (cfg, ins, OP_BR);
9024 ins->inst_target_bb = end_bblock;
9025 MONO_ADD_INS (bblock, ins);
9026 link_bblock (cfg, bblock, end_bblock);
9027 start_new_bblock = 1;
9031 case CEE_MONO_CISINST:
9032 case CEE_MONO_CCASTCLASS: {
9037 token = read32 (ip + 2);
9038 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9039 if (ip [1] == CEE_MONO_CISINST)
9040 ins = handle_cisinst (cfg, klass, sp [0]);
9042 ins = handle_ccastclass (cfg, klass, sp [0]);
9048 case CEE_MONO_SAVE_LMF:
9049 case CEE_MONO_RESTORE_LMF:
9050 #ifdef MONO_ARCH_HAVE_LMF_OPS
9051 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9052 MONO_ADD_INS (bblock, ins);
9053 cfg->need_lmf_area = TRUE;
9057 case CEE_MONO_CLASSCONST:
9058 CHECK_STACK_OVF (1);
9060 token = read32 (ip + 2);
9061 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9064 inline_costs += 10 * num_calls++;
9066 case CEE_MONO_NOT_TAKEN:
9067 bblock->out_of_line = TRUE;
9071 CHECK_STACK_OVF (1);
9073 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9074 ins->dreg = alloc_preg (cfg);
9075 ins->inst_offset = (gint32)read32 (ip + 2);
9076 ins->type = STACK_PTR;
9077 MONO_ADD_INS (bblock, ins);
9082 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9092 /* somewhat similar to LDTOKEN */
9093 MonoInst *addr, *vtvar;
9094 CHECK_STACK_OVF (1);
9095 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9097 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9098 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9100 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9101 ins->type = STACK_VTYPE;
9102 ins->klass = mono_defaults.argumenthandle_class;
9115 * The following transforms:
9116 * CEE_CEQ into OP_CEQ
9117 * CEE_CGT into OP_CGT
9118 * CEE_CGT_UN into OP_CGT_UN
9119 * CEE_CLT into OP_CLT
9120 * CEE_CLT_UN into OP_CLT_UN
9122 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9124 MONO_INST_NEW (cfg, ins, cmp->opcode);
9126 cmp->sreg1 = sp [0]->dreg;
9127 cmp->sreg2 = sp [1]->dreg;
9128 type_from_op (cmp, sp [0], sp [1]);
9130 if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9131 cmp->opcode = OP_LCOMPARE;
9132 else if (sp [0]->type == STACK_R8)
9133 cmp->opcode = OP_FCOMPARE;
9135 cmp->opcode = OP_ICOMPARE;
9136 MONO_ADD_INS (bblock, cmp);
9137 ins->type = STACK_I4;
9138 ins->dreg = alloc_dreg (cfg, ins->type);
9139 type_from_op (ins, sp [0], sp [1]);
9141 if (cmp->opcode == OP_FCOMPARE) {
9143 * The backends expect the fceq opcodes to do the
9146 cmp->opcode = OP_NOP;
9147 ins->sreg1 = cmp->sreg1;
9148 ins->sreg2 = cmp->sreg2;
9150 MONO_ADD_INS (bblock, ins);
9157 MonoMethod *cil_method;
9158 gboolean needs_static_rgctx_invoke;
9160 CHECK_STACK_OVF (1);
9162 n = read32 (ip + 2);
9163 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9166 mono_class_init (cmethod->klass);
9168 mono_save_token_info (cfg, image, n, cmethod);
9170 if (cfg->generic_sharing_context)
9171 context_used = mono_method_check_context_used (cmethod);
9173 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9175 cil_method = cmethod;
9176 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9177 METHOD_ACCESS_FAILURE;
9179 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9180 if (check_linkdemand (cfg, method, cmethod))
9182 CHECK_CFG_EXCEPTION;
9183 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9184 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9188 * Optimize the common case of ldftn+delegate creation
9190 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
9191 /* FIXME: SGEN support */
9192 /* FIXME: handle shared static generic methods */
9193 /* FIXME: handle this in shared code */
9194 if (!needs_static_rgctx_invoke && !context_used && (sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9195 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9196 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9197 MonoInst *target_ins;
9200 invoke = mono_get_delegate_invoke (ctor_method->klass);
9201 if (!invoke || !mono_method_signature (invoke))
9205 if (cfg->verbose_level > 3)
9206 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9207 target_ins = sp [-1];
9209 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod);
9218 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
9220 EMIT_NEW_METHODCONST (cfg, argconst, cmethod);
9222 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
9226 inline_costs += 10 * num_calls++;
9229 case CEE_LDVIRTFTN: {
9234 n = read32 (ip + 2);
9235 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9238 mono_class_init (cmethod->klass);
9240 if (cfg->generic_sharing_context)
9241 context_used = mono_method_check_context_used (cmethod);
9243 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9244 if (check_linkdemand (cfg, method, cmethod))
9246 CHECK_CFG_EXCEPTION;
9247 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9248 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9255 args [1] = emit_get_rgctx_method (cfg, context_used,
9256 cmethod, MONO_RGCTX_INFO_METHOD);
9257 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
9259 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
9260 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
9264 inline_costs += 10 * num_calls++;
9268 CHECK_STACK_OVF (1);
9270 n = read16 (ip + 2);
9272 EMIT_NEW_ARGLOAD (cfg, ins, n);
9277 CHECK_STACK_OVF (1);
9279 n = read16 (ip + 2);
9281 NEW_ARGLOADA (cfg, ins, n);
9282 MONO_ADD_INS (cfg->cbb, ins);
9290 n = read16 (ip + 2);
9292 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9294 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9298 CHECK_STACK_OVF (1);
9300 n = read16 (ip + 2);
9302 EMIT_NEW_LOCLOAD (cfg, ins, n);
9307 unsigned char *tmp_ip;
9308 CHECK_STACK_OVF (1);
9310 n = read16 (ip + 2);
9313 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9319 EMIT_NEW_LOCLOADA (cfg, ins, n);
9328 n = read16 (ip + 2);
9330 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9332 emit_stloc_ir (cfg, sp, header, n);
9339 if (sp != stack_start)
9341 if (cfg->method != method)
9343 * Inlining this into a loop in a parent could lead to
9344 * stack overflows which is different behavior than the
9345 * non-inlined case, thus disable inlining in this case.
9347 goto inline_failure;
9349 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9350 ins->dreg = alloc_preg (cfg);
9351 ins->sreg1 = sp [0]->dreg;
9352 ins->type = STACK_PTR;
9353 MONO_ADD_INS (cfg->cbb, ins);
9355 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9357 ins->flags |= MONO_INST_INIT;
9362 case CEE_ENDFILTER: {
9363 MonoExceptionClause *clause, *nearest;
9364 int cc, nearest_num;
9368 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9370 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9371 ins->sreg1 = (*sp)->dreg;
9372 MONO_ADD_INS (bblock, ins);
9373 start_new_bblock = 1;
9378 for (cc = 0; cc < header->num_clauses; ++cc) {
9379 clause = &header->clauses [cc];
9380 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9381 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9382 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9388 if ((ip - header->code) != nearest->handler_offset)
9393 case CEE_UNALIGNED_:
9394 ins_flag |= MONO_INST_UNALIGNED;
9395 /* FIXME: record alignment? we can assume 1 for now */
9400 ins_flag |= MONO_INST_VOLATILE;
9404 ins_flag |= MONO_INST_TAILCALL;
9405 cfg->flags |= MONO_CFG_HAS_TAIL;
9406 /* Can't inline tail calls at this time */
9407 inline_costs += 100000;
9414 token = read32 (ip + 2);
9415 klass = mini_get_class (method, token, generic_context);
9416 CHECK_TYPELOAD (klass);
9417 if (generic_class_is_reference_type (cfg, klass))
9418 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9420 mini_emit_initobj (cfg, *sp, NULL, klass);
9424 case CEE_CONSTRAINED_:
9426 token = read32 (ip + 2);
9427 constrained_call = mono_class_get_full (image, token, generic_context);
9428 CHECK_TYPELOAD (constrained_call);
9433 MonoInst *iargs [3];
9437 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9438 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9439 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9440 /* emit_memset only works when val == 0 */
9441 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9446 if (ip [1] == CEE_CPBLK) {
9447 MonoMethod *memcpy_method = get_memcpy_method ();
9448 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9450 MonoMethod *memset_method = get_memset_method ();
9451 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9461 ins_flag |= MONO_INST_NOTYPECHECK;
9463 ins_flag |= MONO_INST_NORANGECHECK;
9464 /* we ignore the no-nullcheck for now since we
9465 * really do it explicitly only when doing callvirt->call
9471 int handler_offset = -1;
9473 for (i = 0; i < header->num_clauses; ++i) {
9474 MonoExceptionClause *clause = &header->clauses [i];
9475 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9476 handler_offset = clause->handler_offset;
9481 bblock->flags |= BB_EXCEPTION_UNSAFE;
9483 g_assert (handler_offset != -1);
9485 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9486 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9487 ins->sreg1 = load->dreg;
9488 MONO_ADD_INS (bblock, ins);
9490 link_bblock (cfg, bblock, end_bblock);
9491 start_new_bblock = 1;
9499 CHECK_STACK_OVF (1);
9501 token = read32 (ip + 2);
9502 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC) {
9503 MonoType *type = mono_type_create_from_typespec (image, token);
9504 token = mono_type_size (type, &ialign);
9506 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9507 CHECK_TYPELOAD (klass);
9508 mono_class_init (klass);
9509 token = mono_class_value_size (klass, &align);
9511 EMIT_NEW_ICONST (cfg, ins, token);
9516 case CEE_REFANYTYPE: {
9517 MonoInst *src_var, *src;
9523 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9525 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9526 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9527 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9537 g_error ("opcode 0xfe 0x%02x not handled", ip [1]);
9542 g_error ("opcode 0x%02x not handled", *ip);
9545 if (start_new_bblock != 1)
9548 bblock->cil_length = ip - bblock->cil_code;
9549 bblock->next_bb = end_bblock;
9551 if (cfg->method == method && cfg->domainvar) {
9553 MonoInst *get_domain;
9555 cfg->cbb = init_localsbb;
9557 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9558 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9561 get_domain->dreg = alloc_preg (cfg);
9562 MONO_ADD_INS (cfg->cbb, get_domain);
9564 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9565 MONO_ADD_INS (cfg->cbb, store);
9568 if (cfg->method == method && cfg->got_var)
9569 mono_emit_load_got_addr (cfg);
9574 cfg->cbb = init_localsbb;
9576 for (i = 0; i < header->num_locals; ++i) {
9577 MonoType *ptype = header->locals [i];
9578 int t = ptype->type;
9579 dreg = cfg->locals [i]->dreg;
9581 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
9582 t = mono_class_enum_basetype (ptype->data.klass)->type;
9584 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9585 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
9586 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
9587 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
9588 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
9589 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
9590 MONO_INST_NEW (cfg, ins, OP_R8CONST);
9591 ins->type = STACK_R8;
9592 ins->inst_p0 = (void*)&r8_0;
9593 ins->dreg = alloc_dreg (cfg, STACK_R8);
9594 MONO_ADD_INS (init_localsbb, ins);
9595 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
9596 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
9597 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
9598 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
9600 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9607 if (cfg->method == method) {
9609 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9610 bb->region = mono_find_block_region (cfg, bb->real_offset);
9612 mono_create_spvar_for_region (cfg, bb->region);
9613 if (cfg->verbose_level > 2)
9614 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
9618 g_slist_free (class_inits);
9619 dont_inline = g_list_remove (dont_inline, method);
9621 if (inline_costs < 0) {
9624 /* Method is too large */
9625 mname = mono_method_full_name (method, TRUE);
9626 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
9627 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
9632 if ((cfg->verbose_level > 2) && (cfg->method == method))
9633 mono_print_code (cfg, "AFTER METHOD-TO-IR");
9635 return inline_costs;
9638 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
9639 g_slist_free (class_inits);
9640 dont_inline = g_list_remove (dont_inline, method);
9644 g_slist_free (class_inits);
9645 dont_inline = g_list_remove (dont_inline, method);
9649 g_slist_free (class_inits);
9650 dont_inline = g_list_remove (dont_inline, method);
9651 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
9655 g_slist_free (class_inits);
9656 dont_inline = g_list_remove (dont_inline, method);
9657 set_exception_type_from_invalid_il (cfg, method, ip);
9662 store_membase_reg_to_store_membase_imm (int opcode)
9665 case OP_STORE_MEMBASE_REG:
9666 return OP_STORE_MEMBASE_IMM;
9667 case OP_STOREI1_MEMBASE_REG:
9668 return OP_STOREI1_MEMBASE_IMM;
9669 case OP_STOREI2_MEMBASE_REG:
9670 return OP_STOREI2_MEMBASE_IMM;
9671 case OP_STOREI4_MEMBASE_REG:
9672 return OP_STOREI4_MEMBASE_IMM;
9673 case OP_STOREI8_MEMBASE_REG:
9674 return OP_STOREI8_MEMBASE_IMM;
9676 g_assert_not_reached ();
9682 #endif /* DISABLE_JIT */
9685 mono_op_to_op_imm (int opcode)
9695 return OP_IDIV_UN_IMM;
9699 return OP_IREM_UN_IMM;
9713 return OP_ISHR_UN_IMM;
9730 return OP_LSHR_UN_IMM;
9733 return OP_COMPARE_IMM;
9735 return OP_ICOMPARE_IMM;
9737 return OP_LCOMPARE_IMM;
9739 case OP_STORE_MEMBASE_REG:
9740 return OP_STORE_MEMBASE_IMM;
9741 case OP_STOREI1_MEMBASE_REG:
9742 return OP_STOREI1_MEMBASE_IMM;
9743 case OP_STOREI2_MEMBASE_REG:
9744 return OP_STOREI2_MEMBASE_IMM;
9745 case OP_STOREI4_MEMBASE_REG:
9746 return OP_STOREI4_MEMBASE_IMM;
9748 #if defined(TARGET_X86) || defined (TARGET_AMD64)
9750 return OP_X86_PUSH_IMM;
9751 case OP_X86_COMPARE_MEMBASE_REG:
9752 return OP_X86_COMPARE_MEMBASE_IMM;
9754 #if defined(TARGET_AMD64)
9755 case OP_AMD64_ICOMPARE_MEMBASE_REG:
9756 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9758 case OP_VOIDCALL_REG:
9767 return OP_LOCALLOC_IMM;
9774 ldind_to_load_membase (int opcode)
9778 return OP_LOADI1_MEMBASE;
9780 return OP_LOADU1_MEMBASE;
9782 return OP_LOADI2_MEMBASE;
9784 return OP_LOADU2_MEMBASE;
9786 return OP_LOADI4_MEMBASE;
9788 return OP_LOADU4_MEMBASE;
9790 return OP_LOAD_MEMBASE;
9792 return OP_LOAD_MEMBASE;
9794 return OP_LOADI8_MEMBASE;
9796 return OP_LOADR4_MEMBASE;
9798 return OP_LOADR8_MEMBASE;
9800 g_assert_not_reached ();
9807 stind_to_store_membase (int opcode)
9811 return OP_STOREI1_MEMBASE_REG;
9813 return OP_STOREI2_MEMBASE_REG;
9815 return OP_STOREI4_MEMBASE_REG;
9818 return OP_STORE_MEMBASE_REG;
9820 return OP_STOREI8_MEMBASE_REG;
9822 return OP_STORER4_MEMBASE_REG;
9824 return OP_STORER8_MEMBASE_REG;
9826 g_assert_not_reached ();
9833 mono_load_membase_to_load_mem (int opcode)
9835 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
9836 #if defined(TARGET_X86) || defined(TARGET_AMD64)
9838 case OP_LOAD_MEMBASE:
9840 case OP_LOADU1_MEMBASE:
9841 return OP_LOADU1_MEM;
9842 case OP_LOADU2_MEMBASE:
9843 return OP_LOADU2_MEM;
9844 case OP_LOADI4_MEMBASE:
9845 return OP_LOADI4_MEM;
9846 case OP_LOADU4_MEMBASE:
9847 return OP_LOADU4_MEM;
9848 #if SIZEOF_REGISTER == 8
9849 case OP_LOADI8_MEMBASE:
9850 return OP_LOADI8_MEM;
9859 op_to_op_dest_membase (int store_opcode, int opcode)
9861 #if defined(TARGET_X86)
9862 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
9867 return OP_X86_ADD_MEMBASE_REG;
9869 return OP_X86_SUB_MEMBASE_REG;
9871 return OP_X86_AND_MEMBASE_REG;
9873 return OP_X86_OR_MEMBASE_REG;
9875 return OP_X86_XOR_MEMBASE_REG;
9878 return OP_X86_ADD_MEMBASE_IMM;
9881 return OP_X86_SUB_MEMBASE_IMM;
9884 return OP_X86_AND_MEMBASE_IMM;
9887 return OP_X86_OR_MEMBASE_IMM;
9890 return OP_X86_XOR_MEMBASE_IMM;
9896 #if defined(TARGET_AMD64)
9897 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
9902 return OP_X86_ADD_MEMBASE_REG;
9904 return OP_X86_SUB_MEMBASE_REG;
9906 return OP_X86_AND_MEMBASE_REG;
9908 return OP_X86_OR_MEMBASE_REG;
9910 return OP_X86_XOR_MEMBASE_REG;
9912 return OP_X86_ADD_MEMBASE_IMM;
9914 return OP_X86_SUB_MEMBASE_IMM;
9916 return OP_X86_AND_MEMBASE_IMM;
9918 return OP_X86_OR_MEMBASE_IMM;
9920 return OP_X86_XOR_MEMBASE_IMM;
9922 return OP_AMD64_ADD_MEMBASE_REG;
9924 return OP_AMD64_SUB_MEMBASE_REG;
9926 return OP_AMD64_AND_MEMBASE_REG;
9928 return OP_AMD64_OR_MEMBASE_REG;
9930 return OP_AMD64_XOR_MEMBASE_REG;
9933 return OP_AMD64_ADD_MEMBASE_IMM;
9936 return OP_AMD64_SUB_MEMBASE_IMM;
9939 return OP_AMD64_AND_MEMBASE_IMM;
9942 return OP_AMD64_OR_MEMBASE_IMM;
9945 return OP_AMD64_XOR_MEMBASE_IMM;
9955 op_to_op_store_membase (int store_opcode, int opcode)
9957 #if defined(TARGET_X86) || defined(TARGET_AMD64)
9960 if (store_opcode == OP_STOREI1_MEMBASE_REG)
9961 return OP_X86_SETEQ_MEMBASE;
9963 if (store_opcode == OP_STOREI1_MEMBASE_REG)
9964 return OP_X86_SETNE_MEMBASE;
9972 op_to_op_src1_membase (int load_opcode, int opcode)
9975 /* FIXME: This has sign extension issues */
9977 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
9978 return OP_X86_COMPARE_MEMBASE8_IMM;
9981 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
9986 return OP_X86_PUSH_MEMBASE;
9987 case OP_COMPARE_IMM:
9988 case OP_ICOMPARE_IMM:
9989 return OP_X86_COMPARE_MEMBASE_IMM;
9992 return OP_X86_COMPARE_MEMBASE_REG;
9997 /* FIXME: This has sign extension issues */
9999 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10000 return OP_X86_COMPARE_MEMBASE8_IMM;
10005 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10006 return OP_X86_PUSH_MEMBASE;
10008 /* FIXME: This only works for 32 bit immediates
10009 case OP_COMPARE_IMM:
10010 case OP_LCOMPARE_IMM:
10011 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10012 return OP_AMD64_COMPARE_MEMBASE_IMM;
10014 case OP_ICOMPARE_IMM:
10015 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10016 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10020 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10021 return OP_AMD64_COMPARE_MEMBASE_REG;
10024 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10025 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10034 op_to_op_src2_membase (int load_opcode, int opcode)
10037 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10043 return OP_X86_COMPARE_REG_MEMBASE;
10045 return OP_X86_ADD_REG_MEMBASE;
10047 return OP_X86_SUB_REG_MEMBASE;
10049 return OP_X86_AND_REG_MEMBASE;
10051 return OP_X86_OR_REG_MEMBASE;
10053 return OP_X86_XOR_REG_MEMBASE;
10057 #ifdef TARGET_AMD64
10060 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10061 return OP_AMD64_ICOMPARE_REG_MEMBASE;
10065 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10066 return OP_AMD64_COMPARE_REG_MEMBASE;
10069 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10070 return OP_X86_ADD_REG_MEMBASE;
10072 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10073 return OP_X86_SUB_REG_MEMBASE;
10075 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10076 return OP_X86_AND_REG_MEMBASE;
10078 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10079 return OP_X86_OR_REG_MEMBASE;
10081 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10082 return OP_X86_XOR_REG_MEMBASE;
10084 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10085 return OP_AMD64_ADD_REG_MEMBASE;
10087 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10088 return OP_AMD64_SUB_REG_MEMBASE;
10090 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10091 return OP_AMD64_AND_REG_MEMBASE;
10093 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10094 return OP_AMD64_OR_REG_MEMBASE;
10096 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10097 return OP_AMD64_XOR_REG_MEMBASE;
10105 mono_op_to_op_imm_noemul (int opcode)
10108 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10113 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10121 return mono_op_to_op_imm (opcode);
10125 #ifndef DISABLE_JIT
10128 * mono_handle_global_vregs:
10130 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10134 mono_handle_global_vregs (MonoCompile *cfg)
10136 gint32 *vreg_to_bb;
10137 MonoBasicBlock *bb;
10140 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
10142 #ifdef MONO_ARCH_SIMD_INTRINSICS
10143 if (cfg->uses_simd_intrinsics)
10144 mono_simd_simplify_indirection (cfg);
10147 /* Find local vregs used in more than one bb */
10148 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10149 MonoInst *ins = bb->code;
10150 int block_num = bb->block_num;
10152 if (cfg->verbose_level > 2)
10153 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
10156 for (; ins; ins = ins->next) {
10157 const char *spec = INS_INFO (ins->opcode);
10158 int regtype, regindex;
10161 if (G_UNLIKELY (cfg->verbose_level > 2))
10162 mono_print_ins (ins);
10164 g_assert (ins->opcode >= MONO_CEE_LAST);
10166 for (regindex = 0; regindex < 4; regindex ++) {
10169 if (regindex == 0) {
10170 regtype = spec [MONO_INST_DEST];
10171 if (regtype == ' ')
10174 } else if (regindex == 1) {
10175 regtype = spec [MONO_INST_SRC1];
10176 if (regtype == ' ')
10179 } else if (regindex == 2) {
10180 regtype = spec [MONO_INST_SRC2];
10181 if (regtype == ' ')
10184 } else if (regindex == 3) {
10185 regtype = spec [MONO_INST_SRC3];
10186 if (regtype == ' ')
10191 #if SIZEOF_REGISTER == 4
10192 /* In the LLVM case, the long opcodes are not decomposed */
10193 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
10195 * Since some instructions reference the original long vreg,
10196 * and some reference the two component vregs, it is quite hard
10197 * to determine when it needs to be global. So be conservative.
10199 if (!get_vreg_to_inst (cfg, vreg)) {
10200 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10202 if (cfg->verbose_level > 2)
10203 printf ("LONG VREG R%d made global.\n", vreg);
10207 * Make the component vregs volatile since the optimizations can
10208 * get confused otherwise.
10210 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
10211 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
10215 g_assert (vreg != -1);
10217 prev_bb = vreg_to_bb [vreg];
10218 if (prev_bb == 0) {
10219 /* 0 is a valid block num */
10220 vreg_to_bb [vreg] = block_num + 1;
10221 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
10222 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
10225 if (!get_vreg_to_inst (cfg, vreg)) {
10226 if (G_UNLIKELY (cfg->verbose_level > 2))
10227 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
10231 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
10234 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
10237 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
10240 g_assert_not_reached ();
10244 /* Flag as having been used in more than one bb */
10245 vreg_to_bb [vreg] = -1;
10251 /* If a variable is used in only one bblock, convert it into a local vreg */
10252 for (i = 0; i < cfg->num_varinfo; i++) {
10253 MonoInst *var = cfg->varinfo [i];
10254 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
10256 switch (var->type) {
10262 #if SIZEOF_REGISTER == 8
10265 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
10266 /* Enabling this screws up the fp stack on x86 */
10269 /* Arguments are implicitly global */
10270 /* Putting R4 vars into registers doesn't work currently */
10271 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
10273 * Make that the variable's liveness interval doesn't contain a call, since
10274 * that would cause the lvreg to be spilled, making the whole optimization
10277 /* This is too slow for JIT compilation */
10279 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
10281 int def_index, call_index, ins_index;
10282 gboolean spilled = FALSE;
10287 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
10288 const char *spec = INS_INFO (ins->opcode);
10290 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
10291 def_index = ins_index;
10293 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
10294 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
10295 if (call_index > def_index) {
10301 if (MONO_IS_CALL (ins))
10302 call_index = ins_index;
10312 if (G_UNLIKELY (cfg->verbose_level > 2))
10313 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10314 var->flags |= MONO_INST_IS_DEAD;
10315 cfg->vreg_to_inst [var->dreg] = NULL;
10322 * Compress the varinfo and vars tables so the liveness computation is faster and
10323 * takes up less space.
10326 for (i = 0; i < cfg->num_varinfo; ++i) {
10327 MonoInst *var = cfg->varinfo [i];
10328 if (pos < i && cfg->locals_start == i)
10329 cfg->locals_start = pos;
10330 if (!(var->flags & MONO_INST_IS_DEAD)) {
10332 cfg->varinfo [pos] = cfg->varinfo [i];
10333 cfg->varinfo [pos]->inst_c0 = pos;
10334 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10335 cfg->vars [pos].idx = pos;
10336 #if SIZEOF_REGISTER == 4
10337 if (cfg->varinfo [pos]->type == STACK_I8) {
10338 /* Modify the two component vars too */
10341 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10342 var1->inst_c0 = pos;
10343 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10344 var1->inst_c0 = pos;
10351 cfg->num_varinfo = pos;
10352 if (cfg->locals_start > cfg->num_varinfo)
10353 cfg->locals_start = cfg->num_varinfo;
10357 * mono_spill_global_vars:
10359 * Generate spill code for variables which are not allocated to registers,
10360 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10361 * code is generated which could be optimized by the local optimization passes.
10364 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10366 MonoBasicBlock *bb;
10368 int orig_next_vreg;
10369 guint32 *vreg_to_lvreg;
10371 guint32 i, lvregs_len;
10372 gboolean dest_has_lvreg = FALSE;
10373 guint32 stacktypes [128];
10374 MonoInst **live_range_start, **live_range_end;
10375 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
10377 *need_local_opts = FALSE;
10379 memset (spec2, 0, sizeof (spec2));
10381 /* FIXME: Move this function to mini.c */
10382 stacktypes ['i'] = STACK_PTR;
10383 stacktypes ['l'] = STACK_I8;
10384 stacktypes ['f'] = STACK_R8;
10385 #ifdef MONO_ARCH_SIMD_INTRINSICS
10386 stacktypes ['x'] = STACK_VTYPE;
10389 #if SIZEOF_REGISTER == 4
10390 /* Create MonoInsts for longs */
10391 for (i = 0; i < cfg->num_varinfo; i++) {
10392 MonoInst *ins = cfg->varinfo [i];
10394 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10395 switch (ins->type) {
10396 #ifdef MONO_ARCH_SOFT_FLOAT
10402 g_assert (ins->opcode == OP_REGOFFSET);
10404 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10406 tree->opcode = OP_REGOFFSET;
10407 tree->inst_basereg = ins->inst_basereg;
10408 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10410 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10412 tree->opcode = OP_REGOFFSET;
10413 tree->inst_basereg = ins->inst_basereg;
10414 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10424 /* FIXME: widening and truncation */
10427 * As an optimization, when a variable allocated to the stack is first loaded into
10428 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10429 * the variable again.
10431 orig_next_vreg = cfg->next_vreg;
10432 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10433 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10437 * These arrays contain the first and last instructions accessing a given
10439 * Since we emit bblocks in the same order we process them here, and we
10440 * don't split live ranges, these will precisely describe the live range of
10441 * the variable, i.e. the instruction range where a valid value can be found
10442 * in the variables location.
10444 /* FIXME: Only do this if debugging info is requested */
10445 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
10446 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
10447 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10448 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10450 /* Add spill loads/stores */
10451 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10454 if (cfg->verbose_level > 2)
10455 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10457 /* Clear vreg_to_lvreg array */
10458 for (i = 0; i < lvregs_len; i++)
10459 vreg_to_lvreg [lvregs [i]] = 0;
10463 MONO_BB_FOR_EACH_INS (bb, ins) {
10464 const char *spec = INS_INFO (ins->opcode);
10465 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
10466 gboolean store, no_lvreg;
10467 int sregs [MONO_MAX_SRC_REGS];
10469 if (G_UNLIKELY (cfg->verbose_level > 2))
10470 mono_print_ins (ins);
10472 if (ins->opcode == OP_NOP)
10476 * We handle LDADDR here as well, since it can only be decomposed
10477 * when variable addresses are known.
10479 if (ins->opcode == OP_LDADDR) {
10480 MonoInst *var = ins->inst_p0;
10482 if (var->opcode == OP_VTARG_ADDR) {
10483 /* Happens on SPARC/S390 where vtypes are passed by reference */
10484 MonoInst *vtaddr = var->inst_left;
10485 if (vtaddr->opcode == OP_REGVAR) {
10486 ins->opcode = OP_MOVE;
10487 ins->sreg1 = vtaddr->dreg;
10489 else if (var->inst_left->opcode == OP_REGOFFSET) {
10490 ins->opcode = OP_LOAD_MEMBASE;
10491 ins->inst_basereg = vtaddr->inst_basereg;
10492 ins->inst_offset = vtaddr->inst_offset;
10496 g_assert (var->opcode == OP_REGOFFSET);
10498 ins->opcode = OP_ADD_IMM;
10499 ins->sreg1 = var->inst_basereg;
10500 ins->inst_imm = var->inst_offset;
10503 *need_local_opts = TRUE;
10504 spec = INS_INFO (ins->opcode);
10507 if (ins->opcode < MONO_CEE_LAST) {
10508 mono_print_ins (ins);
10509 g_assert_not_reached ();
10513 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10517 if (MONO_IS_STORE_MEMBASE (ins)) {
10518 tmp_reg = ins->dreg;
10519 ins->dreg = ins->sreg2;
10520 ins->sreg2 = tmp_reg;
10523 spec2 [MONO_INST_DEST] = ' ';
10524 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10525 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10526 spec2 [MONO_INST_SRC3] = ' ';
10528 } else if (MONO_IS_STORE_MEMINDEX (ins))
10529 g_assert_not_reached ();
10534 if (G_UNLIKELY (cfg->verbose_level > 2)) {
10535 printf ("\t %.3s %d", spec, ins->dreg);
10536 num_sregs = mono_inst_get_src_registers (ins, sregs);
10537 for (srcindex = 0; srcindex < 3; ++srcindex)
10538 printf (" %d", sregs [srcindex]);
10545 regtype = spec [MONO_INST_DEST];
10546 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
10549 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
10550 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
10551 MonoInst *store_ins;
10553 MonoInst *def_ins = ins;
10554 int dreg = ins->dreg; /* The original vreg */
10556 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
10558 if (var->opcode == OP_REGVAR) {
10559 ins->dreg = var->dreg;
10560 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
10562 * Instead of emitting a load+store, use a _membase opcode.
10564 g_assert (var->opcode == OP_REGOFFSET);
10565 if (ins->opcode == OP_MOVE) {
10569 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
10570 ins->inst_basereg = var->inst_basereg;
10571 ins->inst_offset = var->inst_offset;
10574 spec = INS_INFO (ins->opcode);
10578 g_assert (var->opcode == OP_REGOFFSET);
10580 prev_dreg = ins->dreg;
10582 /* Invalidate any previous lvreg for this vreg */
10583 vreg_to_lvreg [ins->dreg] = 0;
10587 #ifdef MONO_ARCH_SOFT_FLOAT
10588 if (store_opcode == OP_STORER8_MEMBASE_REG) {
10590 store_opcode = OP_STOREI8_MEMBASE_REG;
10594 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
10596 if (regtype == 'l') {
10597 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
10598 mono_bblock_insert_after_ins (bb, ins, store_ins);
10599 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
10600 mono_bblock_insert_after_ins (bb, ins, store_ins);
10601 def_ins = store_ins;
10604 g_assert (store_opcode != OP_STOREV_MEMBASE);
10606 /* Try to fuse the store into the instruction itself */
10607 /* FIXME: Add more instructions */
10608 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
10609 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
10610 ins->inst_imm = ins->inst_c0;
10611 ins->inst_destbasereg = var->inst_basereg;
10612 ins->inst_offset = var->inst_offset;
10613 spec = INS_INFO (ins->opcode);
10614 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
10615 ins->opcode = store_opcode;
10616 ins->inst_destbasereg = var->inst_basereg;
10617 ins->inst_offset = var->inst_offset;
10621 tmp_reg = ins->dreg;
10622 ins->dreg = ins->sreg2;
10623 ins->sreg2 = tmp_reg;
10626 spec2 [MONO_INST_DEST] = ' ';
10627 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10628 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10629 spec2 [MONO_INST_SRC3] = ' ';
10631 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
10632 // FIXME: The backends expect the base reg to be in inst_basereg
10633 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
10635 ins->inst_basereg = var->inst_basereg;
10636 ins->inst_offset = var->inst_offset;
10637 spec = INS_INFO (ins->opcode);
10639 /* printf ("INS: "); mono_print_ins (ins); */
10640 /* Create a store instruction */
10641 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
10643 /* Insert it after the instruction */
10644 mono_bblock_insert_after_ins (bb, ins, store_ins);
10646 def_ins = store_ins;
10649 * We can't assign ins->dreg to var->dreg here, since the
10650 * sregs could use it. So set a flag, and do it after
10653 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
10654 dest_has_lvreg = TRUE;
10659 if (def_ins && !live_range_start [dreg]) {
10660 live_range_start [dreg] = def_ins;
10661 live_range_start_bb [dreg] = bb;
10668 num_sregs = mono_inst_get_src_registers (ins, sregs);
10669 for (srcindex = 0; srcindex < 3; ++srcindex) {
10670 regtype = spec [MONO_INST_SRC1 + srcindex];
10671 sreg = sregs [srcindex];
10673 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
10674 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
10675 MonoInst *var = get_vreg_to_inst (cfg, sreg);
10676 MonoInst *use_ins = ins;
10677 MonoInst *load_ins;
10678 guint32 load_opcode;
10680 if (var->opcode == OP_REGVAR) {
10681 sregs [srcindex] = var->dreg;
10682 //mono_inst_set_src_registers (ins, sregs);
10683 live_range_end [sreg] = use_ins;
10684 live_range_end_bb [sreg] = bb;
10688 g_assert (var->opcode == OP_REGOFFSET);
10690 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
10692 g_assert (load_opcode != OP_LOADV_MEMBASE);
10694 if (vreg_to_lvreg [sreg]) {
10695 g_assert (vreg_to_lvreg [sreg] != -1);
10697 /* The variable is already loaded to an lvreg */
10698 if (G_UNLIKELY (cfg->verbose_level > 2))
10699 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
10700 sregs [srcindex] = vreg_to_lvreg [sreg];
10701 //mono_inst_set_src_registers (ins, sregs);
10705 /* Try to fuse the load into the instruction */
10706 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
10707 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
10708 sregs [0] = var->inst_basereg;
10709 //mono_inst_set_src_registers (ins, sregs);
10710 ins->inst_offset = var->inst_offset;
10711 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
10712 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
10713 sregs [1] = var->inst_basereg;
10714 //mono_inst_set_src_registers (ins, sregs);
10715 ins->inst_offset = var->inst_offset;
10717 if (MONO_IS_REAL_MOVE (ins)) {
10718 ins->opcode = OP_NOP;
10721 //printf ("%d ", srcindex); mono_print_ins (ins);
10723 sreg = alloc_dreg (cfg, stacktypes [regtype]);
10725 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
10726 if (var->dreg == prev_dreg) {
10728 * sreg refers to the value loaded by the load
10729 * emitted below, but we need to use ins->dreg
10730 * since it refers to the store emitted earlier.
10734 g_assert (sreg != -1);
10735 vreg_to_lvreg [var->dreg] = sreg;
10736 g_assert (lvregs_len < 1024);
10737 lvregs [lvregs_len ++] = var->dreg;
10741 sregs [srcindex] = sreg;
10742 //mono_inst_set_src_registers (ins, sregs);
10744 if (regtype == 'l') {
10745 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
10746 mono_bblock_insert_before_ins (bb, ins, load_ins);
10747 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
10748 mono_bblock_insert_before_ins (bb, ins, load_ins);
10749 use_ins = load_ins;
10752 #if SIZEOF_REGISTER == 4
10753 g_assert (load_opcode != OP_LOADI8_MEMBASE);
10755 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
10756 mono_bblock_insert_before_ins (bb, ins, load_ins);
10757 use_ins = load_ins;
10761 if (var->dreg < orig_next_vreg) {
10762 live_range_end [var->dreg] = use_ins;
10763 live_range_end_bb [var->dreg] = bb;
10767 mono_inst_set_src_registers (ins, sregs);
10769 if (dest_has_lvreg) {
10770 g_assert (ins->dreg != -1);
10771 vreg_to_lvreg [prev_dreg] = ins->dreg;
10772 g_assert (lvregs_len < 1024);
10773 lvregs [lvregs_len ++] = prev_dreg;
10774 dest_has_lvreg = FALSE;
10778 tmp_reg = ins->dreg;
10779 ins->dreg = ins->sreg2;
10780 ins->sreg2 = tmp_reg;
10783 if (MONO_IS_CALL (ins)) {
10784 /* Clear vreg_to_lvreg array */
10785 for (i = 0; i < lvregs_len; i++)
10786 vreg_to_lvreg [lvregs [i]] = 0;
10788 } else if (ins->opcode == OP_NOP) {
10790 MONO_INST_NULLIFY_SREGS (ins);
10793 if (cfg->verbose_level > 2)
10794 mono_print_ins_index (1, ins);
10798 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
10800 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
10801 * by storing the current native offset into MonoMethodVar->live_range_start/end.
10803 for (i = 0; i < cfg->num_varinfo; ++i) {
10804 int vreg = MONO_VARINFO (cfg, i)->vreg;
10807 if (live_range_start [vreg]) {
10808 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
10810 ins->inst_c1 = vreg;
10811 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
10813 if (live_range_end [vreg]) {
10814 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
10816 ins->inst_c1 = vreg;
10817 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
10822 g_free (live_range_start);
10823 g_free (live_range_end);
10824 g_free (live_range_start_bb);
10825 g_free (live_range_end_bb);
10830 * - use 'iadd' instead of 'int_add'
10831 * - handling ovf opcodes: decompose in method_to_ir.
10832 * - unify iregs/fregs
10833 * -> partly done, the missing parts are:
10834 * - a more complete unification would involve unifying the hregs as well, so
10835 * code wouldn't need if (fp) all over the place. but that would mean the hregs
10836 * would no longer map to the machine hregs, so the code generators would need to
10837 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
10838 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
10839 * fp/non-fp branches speeds it up by about 15%.
10840 * - use sext/zext opcodes instead of shifts
10842 * - get rid of TEMPLOADs if possible and use vregs instead
10843 * - clean up usage of OP_P/OP_ opcodes
10844 * - cleanup usage of DUMMY_USE
10845 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
10847 * - set the stack type and allocate a dreg in the EMIT_NEW macros
10848 * - get rid of all the <foo>2 stuff when the new JIT is ready.
10849 * - make sure handle_stack_args () is called before the branch is emitted
10850 * - when the new IR is done, get rid of all unused stuff
10851 * - COMPARE/BEQ as separate instructions or unify them ?
10852 * - keeping them separate allows specialized compare instructions like
10853 * compare_imm, compare_membase
10854 * - most back ends unify fp compare+branch, fp compare+ceq
10855 * - integrate mono_save_args into inline_method
10856 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
10857 * - handle long shift opts on 32 bit platforms somehow: they require
10858 * 3 sregs (2 for arg1 and 1 for arg2)
10859 * - make byref a 'normal' type.
10860 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
10861 * variable if needed.
10862 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
10863 * like inline_method.
10864 * - remove inlining restrictions
10865 * - fix LNEG and enable cfold of INEG
10866 * - generalize x86 optimizations like ldelema as a peephole optimization
10867 * - add store_mem_imm for amd64
10868 * - optimize the loading of the interruption flag in the managed->native wrappers
10869 * - avoid special handling of OP_NOP in passes
10870 * - move code inserting instructions into one function/macro.
10871 * - try a coalescing phase after liveness analysis
10872 * - add float -> vreg conversion + local optimizations on !x86
10873 * - figure out how to handle decomposed branches during optimizations, ie.
10874 * compare+branch, op_jump_table+op_br etc.
10875 * - promote RuntimeXHandles to vregs
10876 * - vtype cleanups:
10877 * - add a NEW_VARLOADA_VREG macro
10878 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
10879 * accessing vtype fields.
10880 * - get rid of I8CONST on 64 bit platforms
10881 * - dealing with the increase in code size due to branches created during opcode
10883 * - use extended basic blocks
10884 * - all parts of the JIT
10885 * - handle_global_vregs () && local regalloc
10886 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
10887 * - sources of increase in code size:
10890 * - isinst and castclass
10891 * - lvregs not allocated to global registers even if used multiple times
10892 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
10894 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
10895 * - add all micro optimizations from the old JIT
10896 * - put tree optimizations into the deadce pass
10897 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
10898 * specific function.
10899 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
10900 * fcompare + branchCC.
10901 * - create a helper function for allocating a stack slot, taking into account
10902 * MONO_CFG_HAS_SPILLUP.
10904 * - merge the ia64 switch changes.
10905 * - optimize mono_regstate2_alloc_int/float.
10906 * - fix the pessimistic handling of variables accessed in exception handler blocks.
10907 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
10908 * parts of the tree could be separated by other instructions, killing the tree
10909 * arguments, or stores killing loads etc. Also, should we fold loads into other
10910 * instructions if the result of the load is used multiple times ?
10911 * - make the REM_IMM optimization in mini-x86.c arch-independent.
10912 * - LAST MERGE: 108395.
10913 * - when returning vtypes in registers, generate IR and append it to the end of the
10914 * last bb instead of doing it in the epilog.
10915 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
10923 - When to decompose opcodes:
10924 - earlier: this makes some optimizations hard to implement, since the low level IR
10925 no longer contains the neccessary information. But it is easier to do.
10926 - later: harder to implement, enables more optimizations.
10927 - Branches inside bblocks:
10928 - created when decomposing complex opcodes.
10929 - branches to another bblock: harmless, but not tracked by the branch
10930 optimizations, so need to branch to a label at the start of the bblock.
10931 - branches to inside the same bblock: very problematic, trips up the local
10932 reg allocator. Can be fixed by spitting the current bblock, but that is a
10933 complex operation, since some local vregs can become global vregs etc.
10934 - Local/global vregs:
10935 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
10936 local register allocator.
10937 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
10938 structure, created by mono_create_var (). Assigned to hregs or the stack by
10939 the global register allocator.
10940 - When to do optimizations like alu->alu_imm:
10941 - earlier -> saves work later on since the IR will be smaller/simpler
10942 - later -> can work on more instructions
10943 - Handling of valuetypes:
10944 - When a vtype is pushed on the stack, a new temporary is created, an
10945 instruction computing its address (LDADDR) is emitted and pushed on
10946 the stack. Need to optimize cases when the vtype is used immediately as in
10947 argument passing, stloc etc.
10948 - Instead of the to_end stuff in the old JIT, simply call the function handling
10949 the values on the stack before emitting the last instruction of the bb.
10952 #endif /* DISABLE_JIT */