2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
30 #ifdef HAVE_VALGRIND_MEMCHECK_H
31 #include <valgrind/memcheck.h>
34 #include <mono/metadata/assembly.h>
35 #include <mono/metadata/loader.h>
36 #include <mono/metadata/tabledefs.h>
37 #include <mono/metadata/class.h>
38 #include <mono/metadata/object.h>
39 #include <mono/metadata/exception.h>
40 #include <mono/metadata/opcodes.h>
41 #include <mono/metadata/mono-endian.h>
42 #include <mono/metadata/tokentype.h>
43 #include <mono/metadata/tabledefs.h>
44 #include <mono/metadata/marshal.h>
45 #include <mono/metadata/debug-helpers.h>
46 #include <mono/metadata/mono-debug.h>
47 #include <mono/metadata/gc-internal.h>
48 #include <mono/metadata/security-manager.h>
49 #include <mono/metadata/threads-types.h>
50 #include <mono/metadata/security-core-clr.h>
51 #include <mono/metadata/monitor.h>
52 #include <mono/utils/mono-compiler.h>
59 #include "jit-icalls.h"
60 #include "debugger-agent.h"
62 #define BRANCH_COST 100
63 #define INLINE_LENGTH_LIMIT 20
64 #define INLINE_FAILURE do {\
65 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
68 #define CHECK_CFG_EXCEPTION do {\
69 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
72 #define METHOD_ACCESS_FAILURE do { \
73 char *method_fname = mono_method_full_name (method, TRUE); \
74 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
75 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
76 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
77 g_free (method_fname); \
78 g_free (cil_method_fname); \
79 goto exception_exit; \
81 #define FIELD_ACCESS_FAILURE do { \
82 char *method_fname = mono_method_full_name (method, TRUE); \
83 char *field_fname = mono_field_full_name (field); \
84 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
85 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
86 g_free (method_fname); \
87 g_free (field_fname); \
88 goto exception_exit; \
90 #define GENERIC_SHARING_FAILURE(opcode) do { \
91 if (cfg->generic_sharing_context) { \
92 if (cfg->verbose_level > 2) \
93 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
94 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
95 goto exception_exit; \
99 /* Determine whenever 'ins' represents a load of the 'this' argument */
100 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
102 static int ldind_to_load_membase (int opcode);
103 static int stind_to_store_membase (int opcode);
105 int mono_op_to_op_imm (int opcode);
106 int mono_op_to_op_imm_noemul (int opcode);
108 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
109 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
110 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
112 /* helper methods signature */
113 extern MonoMethodSignature *helper_sig_class_init_trampoline;
114 extern MonoMethodSignature *helper_sig_domain_get;
115 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
116 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
117 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
120 * Instruction metadata
128 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
129 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
135 #if SIZEOF_REGISTER == 8
140 /* keep in sync with the enum in mini.h */
143 #include "mini-ops.h"
148 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
149 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
151 * This should contain the index of the last sreg + 1. This is not the same
152 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
154 const gint8 ins_sreg_counts[] = {
155 #include "mini-ops.h"
160 extern GHashTable *jit_icall_name_hash;
162 #define MONO_INIT_VARINFO(vi,id) do { \
163 (vi)->range.first_use.pos.bid = 0xffff; \
169 mono_inst_set_src_registers (MonoInst *ins, int *regs)
171 ins->sreg1 = regs [0];
172 ins->sreg2 = regs [1];
173 ins->sreg3 = regs [2];
177 mono_alloc_ireg (MonoCompile *cfg)
179 return alloc_ireg (cfg);
183 mono_alloc_freg (MonoCompile *cfg)
185 return alloc_freg (cfg);
189 mono_alloc_preg (MonoCompile *cfg)
191 return alloc_preg (cfg);
195 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
197 return alloc_dreg (cfg, stack_type);
201 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
207 switch (type->type) {
210 case MONO_TYPE_BOOLEAN:
222 case MONO_TYPE_FNPTR:
224 case MONO_TYPE_CLASS:
225 case MONO_TYPE_STRING:
226 case MONO_TYPE_OBJECT:
227 case MONO_TYPE_SZARRAY:
228 case MONO_TYPE_ARRAY:
232 #if SIZEOF_REGISTER == 8
241 case MONO_TYPE_VALUETYPE:
242 if (type->data.klass->enumtype) {
243 type = mono_class_enum_basetype (type->data.klass);
246 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
249 case MONO_TYPE_TYPEDBYREF:
251 case MONO_TYPE_GENERICINST:
252 type = &type->data.generic_class->container_class->byval_arg;
256 g_assert (cfg->generic_sharing_context);
259 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
265 mono_print_bb (MonoBasicBlock *bb, const char *msg)
270 printf ("\n%s %d: [IN: ", msg, bb->block_num);
271 for (i = 0; i < bb->in_count; ++i)
272 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
274 for (i = 0; i < bb->out_count; ++i)
275 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
277 for (tree = bb->code; tree; tree = tree->next)
278 mono_print_ins_index (-1, tree);
282 * Can't put this at the beginning, since other files reference stuff from this
287 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
289 #define GET_BBLOCK(cfg,tblock,ip) do { \
290 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
292 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
293 NEW_BBLOCK (cfg, (tblock)); \
294 (tblock)->cil_code = (ip); \
295 ADD_BBLOCK (cfg, (tblock)); \
299 #if defined(TARGET_X86) || defined(TARGET_AMD64)
300 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
301 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
302 (dest)->dreg = alloc_preg ((cfg)); \
303 (dest)->sreg1 = (sr1); \
304 (dest)->sreg2 = (sr2); \
305 (dest)->inst_imm = (imm); \
306 (dest)->backend.shift_amount = (shift); \
307 MONO_ADD_INS ((cfg)->cbb, (dest)); \
311 #if SIZEOF_REGISTER == 8
312 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
313 /* FIXME: Need to add many more cases */ \
314 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
316 int dr = alloc_preg (cfg); \
317 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
318 (ins)->sreg2 = widen->dreg; \
322 #define ADD_WIDEN_OP(ins, arg1, arg2)
325 #define ADD_BINOP(op) do { \
326 MONO_INST_NEW (cfg, ins, (op)); \
328 ins->sreg1 = sp [0]->dreg; \
329 ins->sreg2 = sp [1]->dreg; \
330 type_from_op (ins, sp [0], sp [1]); \
332 /* Have to insert a widening op */ \
333 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
334 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
335 MONO_ADD_INS ((cfg)->cbb, (ins)); \
336 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
339 #define ADD_UNOP(op) do { \
340 MONO_INST_NEW (cfg, ins, (op)); \
342 ins->sreg1 = sp [0]->dreg; \
343 type_from_op (ins, sp [0], NULL); \
345 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
346 MONO_ADD_INS ((cfg)->cbb, (ins)); \
347 *sp++ = mono_decompose_opcode (cfg, ins); \
350 #define ADD_BINCOND(next_block) do { \
353 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
354 cmp->sreg1 = sp [0]->dreg; \
355 cmp->sreg2 = sp [1]->dreg; \
356 type_from_op (cmp, sp [0], sp [1]); \
358 type_from_op (ins, sp [0], sp [1]); \
359 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
360 GET_BBLOCK (cfg, tblock, target); \
361 link_bblock (cfg, bblock, tblock); \
362 ins->inst_true_bb = tblock; \
363 if ((next_block)) { \
364 link_bblock (cfg, bblock, (next_block)); \
365 ins->inst_false_bb = (next_block); \
366 start_new_bblock = 1; \
368 GET_BBLOCK (cfg, tblock, ip); \
369 link_bblock (cfg, bblock, tblock); \
370 ins->inst_false_bb = tblock; \
371 start_new_bblock = 2; \
373 if (sp != stack_start) { \
374 handle_stack_args (cfg, stack_start, sp - stack_start); \
375 CHECK_UNVERIFIABLE (cfg); \
377 MONO_ADD_INS (bblock, cmp); \
378 MONO_ADD_INS (bblock, ins); \
382 * link_bblock: Links two basic blocks
384 * links two basic blocks in the control flow graph, the 'from'
385 * argument is the starting block and the 'to' argument is the block
386 * the control flow ends to after 'from'.
389 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
391 MonoBasicBlock **newa;
395 if (from->cil_code) {
397 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
399 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
402 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
404 printf ("edge from entry to exit\n");
409 for (i = 0; i < from->out_count; ++i) {
410 if (to == from->out_bb [i]) {
416 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
417 for (i = 0; i < from->out_count; ++i) {
418 newa [i] = from->out_bb [i];
426 for (i = 0; i < to->in_count; ++i) {
427 if (from == to->in_bb [i]) {
433 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
434 for (i = 0; i < to->in_count; ++i) {
435 newa [i] = to->in_bb [i];
444 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
446 link_bblock (cfg, from, to);
450 * mono_find_block_region:
452 * We mark each basic block with a region ID. We use that to avoid BB
453 * optimizations when blocks are in different regions.
456 * A region token that encodes where this region is, and information
457 * about the clause owner for this block.
459 * The region encodes the try/catch/filter clause that owns this block
460 * as well as the type. -1 is a special value that represents a block
461 * that is in none of try/catch/filter.
464 mono_find_block_region (MonoCompile *cfg, int offset)
466 MonoMethod *method = cfg->method;
467 MonoMethodHeader *header = mono_method_get_header (method);
468 MonoExceptionClause *clause;
471 for (i = 0; i < header->num_clauses; ++i) {
472 clause = &header->clauses [i];
473 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
474 (offset < (clause->handler_offset)))
475 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
477 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
478 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
479 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
480 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
481 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
483 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
486 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
487 return ((i + 1) << 8) | clause->flags;
494 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
496 MonoMethod *method = cfg->method;
497 MonoMethodHeader *header = mono_method_get_header (method);
498 MonoExceptionClause *clause;
499 MonoBasicBlock *handler;
503 for (i = 0; i < header->num_clauses; ++i) {
504 clause = &header->clauses [i];
505 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
506 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
507 if (clause->flags == type) {
508 handler = cfg->cil_offset_to_bb [clause->handler_offset];
510 res = g_list_append (res, handler);
518 mono_create_spvar_for_region (MonoCompile *cfg, int region)
522 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
526 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
527 /* prevent it from being register allocated */
528 var->flags |= MONO_INST_INDIRECT;
530 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
534 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
536 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
540 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
544 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
548 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
549 /* prevent it from being register allocated */
550 var->flags |= MONO_INST_INDIRECT;
552 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
558 * Returns the type used in the eval stack when @type is loaded.
559 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
562 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
566 inst->klass = klass = mono_class_from_mono_type (type);
568 inst->type = STACK_MP;
573 switch (type->type) {
575 inst->type = STACK_INV;
579 case MONO_TYPE_BOOLEAN:
585 inst->type = STACK_I4;
590 case MONO_TYPE_FNPTR:
591 inst->type = STACK_PTR;
593 case MONO_TYPE_CLASS:
594 case MONO_TYPE_STRING:
595 case MONO_TYPE_OBJECT:
596 case MONO_TYPE_SZARRAY:
597 case MONO_TYPE_ARRAY:
598 inst->type = STACK_OBJ;
602 inst->type = STACK_I8;
606 inst->type = STACK_R8;
608 case MONO_TYPE_VALUETYPE:
609 if (type->data.klass->enumtype) {
610 type = mono_class_enum_basetype (type->data.klass);
614 inst->type = STACK_VTYPE;
617 case MONO_TYPE_TYPEDBYREF:
618 inst->klass = mono_defaults.typed_reference_class;
619 inst->type = STACK_VTYPE;
621 case MONO_TYPE_GENERICINST:
622 type = &type->data.generic_class->container_class->byval_arg;
625 case MONO_TYPE_MVAR :
626 /* FIXME: all the arguments must be references for now,
627 * later look inside cfg and see if the arg num is
630 g_assert (cfg->generic_sharing_context);
631 inst->type = STACK_OBJ;
634 g_error ("unknown type 0x%02x in eval stack type", type->type);
639 * The following tables are used to quickly validate the IL code in type_from_op ().
642 bin_num_table [STACK_MAX] [STACK_MAX] = {
643 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
644 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
645 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
646 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
647 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
648 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
649 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
650 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
655 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
658 /* reduce the size of this table */
660 bin_int_table [STACK_MAX] [STACK_MAX] = {
661 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
662 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
663 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
664 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
665 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
666 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
667 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
668 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
672 bin_comp_table [STACK_MAX] [STACK_MAX] = {
673 /* Inv i L p F & O vt */
675 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
676 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
677 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
678 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
679 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
680 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
681 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
684 /* reduce the size of this table */
686 shift_table [STACK_MAX] [STACK_MAX] = {
687 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
688 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
689 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
690 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
691 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
692 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
693 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
694 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
698 * Tables to map from the non-specific opcode to the matching
699 * type-specific opcode.
701 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
703 binops_op_map [STACK_MAX] = {
704 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
707 /* handles from CEE_NEG to CEE_CONV_U8 */
709 unops_op_map [STACK_MAX] = {
710 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
713 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
715 ovfops_op_map [STACK_MAX] = {
716 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
719 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
721 ovf2ops_op_map [STACK_MAX] = {
722 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
725 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
727 ovf3ops_op_map [STACK_MAX] = {
728 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
731 /* handles from CEE_BEQ to CEE_BLT_UN */
733 beqops_op_map [STACK_MAX] = {
734 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
737 /* handles from CEE_CEQ to CEE_CLT_UN */
739 ceqops_op_map [STACK_MAX] = {
740 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
744 * Sets ins->type (the type on the eval stack) according to the
745 * type of the opcode and the arguments to it.
746 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
748 * FIXME: this function sets ins->type unconditionally in some cases, but
749 * it should set it to invalid for some types (a conv.x on an object)
752 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
754 switch (ins->opcode) {
761 /* FIXME: check unverifiable args for STACK_MP */
762 ins->type = bin_num_table [src1->type] [src2->type];
763 ins->opcode += binops_op_map [ins->type];
770 ins->type = bin_int_table [src1->type] [src2->type];
771 ins->opcode += binops_op_map [ins->type];
776 ins->type = shift_table [src1->type] [src2->type];
777 ins->opcode += binops_op_map [ins->type];
782 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
783 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
784 ins->opcode = OP_LCOMPARE;
785 else if (src1->type == STACK_R8)
786 ins->opcode = OP_FCOMPARE;
788 ins->opcode = OP_ICOMPARE;
790 case OP_ICOMPARE_IMM:
791 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
792 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
793 ins->opcode = OP_LCOMPARE_IMM;
805 ins->opcode += beqops_op_map [src1->type];
808 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
809 ins->opcode += ceqops_op_map [src1->type];
815 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
816 ins->opcode += ceqops_op_map [src1->type];
820 ins->type = neg_table [src1->type];
821 ins->opcode += unops_op_map [ins->type];
824 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
825 ins->type = src1->type;
827 ins->type = STACK_INV;
828 ins->opcode += unops_op_map [ins->type];
834 ins->type = STACK_I4;
835 ins->opcode += unops_op_map [src1->type];
838 ins->type = STACK_R8;
839 switch (src1->type) {
842 ins->opcode = OP_ICONV_TO_R_UN;
845 ins->opcode = OP_LCONV_TO_R_UN;
849 case CEE_CONV_OVF_I1:
850 case CEE_CONV_OVF_U1:
851 case CEE_CONV_OVF_I2:
852 case CEE_CONV_OVF_U2:
853 case CEE_CONV_OVF_I4:
854 case CEE_CONV_OVF_U4:
855 ins->type = STACK_I4;
856 ins->opcode += ovf3ops_op_map [src1->type];
858 case CEE_CONV_OVF_I_UN:
859 case CEE_CONV_OVF_U_UN:
860 ins->type = STACK_PTR;
861 ins->opcode += ovf2ops_op_map [src1->type];
863 case CEE_CONV_OVF_I1_UN:
864 case CEE_CONV_OVF_I2_UN:
865 case CEE_CONV_OVF_I4_UN:
866 case CEE_CONV_OVF_U1_UN:
867 case CEE_CONV_OVF_U2_UN:
868 case CEE_CONV_OVF_U4_UN:
869 ins->type = STACK_I4;
870 ins->opcode += ovf2ops_op_map [src1->type];
873 ins->type = STACK_PTR;
874 switch (src1->type) {
876 ins->opcode = OP_ICONV_TO_U;
880 #if SIZEOF_REGISTER == 8
881 ins->opcode = OP_LCONV_TO_U;
883 ins->opcode = OP_MOVE;
887 ins->opcode = OP_LCONV_TO_U;
890 ins->opcode = OP_FCONV_TO_U;
896 ins->type = STACK_I8;
897 ins->opcode += unops_op_map [src1->type];
899 case CEE_CONV_OVF_I8:
900 case CEE_CONV_OVF_U8:
901 ins->type = STACK_I8;
902 ins->opcode += ovf3ops_op_map [src1->type];
904 case CEE_CONV_OVF_U8_UN:
905 case CEE_CONV_OVF_I8_UN:
906 ins->type = STACK_I8;
907 ins->opcode += ovf2ops_op_map [src1->type];
911 ins->type = STACK_R8;
912 ins->opcode += unops_op_map [src1->type];
915 ins->type = STACK_R8;
919 ins->type = STACK_I4;
920 ins->opcode += ovfops_op_map [src1->type];
925 ins->type = STACK_PTR;
926 ins->opcode += ovfops_op_map [src1->type];
934 ins->type = bin_num_table [src1->type] [src2->type];
935 ins->opcode += ovfops_op_map [src1->type];
936 if (ins->type == STACK_R8)
937 ins->type = STACK_INV;
939 case OP_LOAD_MEMBASE:
940 ins->type = STACK_PTR;
942 case OP_LOADI1_MEMBASE:
943 case OP_LOADU1_MEMBASE:
944 case OP_LOADI2_MEMBASE:
945 case OP_LOADU2_MEMBASE:
946 case OP_LOADI4_MEMBASE:
947 case OP_LOADU4_MEMBASE:
948 ins->type = STACK_PTR;
950 case OP_LOADI8_MEMBASE:
951 ins->type = STACK_I8;
953 case OP_LOADR4_MEMBASE:
954 case OP_LOADR8_MEMBASE:
955 ins->type = STACK_R8;
958 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
962 if (ins->type == STACK_MP)
963 ins->klass = mono_defaults.object_class;
968 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
974 param_table [STACK_MAX] [STACK_MAX] = {
979 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
983 switch (args->type) {
993 for (i = 0; i < sig->param_count; ++i) {
994 switch (args [i].type) {
998 if (!sig->params [i]->byref)
1002 if (sig->params [i]->byref)
1004 switch (sig->params [i]->type) {
1005 case MONO_TYPE_CLASS:
1006 case MONO_TYPE_STRING:
1007 case MONO_TYPE_OBJECT:
1008 case MONO_TYPE_SZARRAY:
1009 case MONO_TYPE_ARRAY:
1016 if (sig->params [i]->byref)
1018 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1027 /*if (!param_table [args [i].type] [sig->params [i]->type])
1035 * When we need a pointer to the current domain many times in a method, we
1036 * call mono_domain_get() once and we store the result in a local variable.
1037 * This function returns the variable that represents the MonoDomain*.
1039 inline static MonoInst *
1040 mono_get_domainvar (MonoCompile *cfg)
1042 if (!cfg->domainvar)
1043 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1044 return cfg->domainvar;
1048 * The got_var contains the address of the Global Offset Table when AOT
1052 mono_get_got_var (MonoCompile *cfg)
1054 #ifdef MONO_ARCH_NEED_GOT_VAR
1055 if (!cfg->compile_aot)
1057 if (!cfg->got_var) {
1058 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1060 return cfg->got_var;
1067 mono_get_vtable_var (MonoCompile *cfg)
1069 g_assert (cfg->generic_sharing_context);
1071 if (!cfg->rgctx_var) {
1072 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1073 /* force the var to be stack allocated */
1074 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1077 return cfg->rgctx_var;
1081 type_from_stack_type (MonoInst *ins) {
1082 switch (ins->type) {
1083 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1084 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1085 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1086 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1088 return &ins->klass->this_arg;
1089 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1090 case STACK_VTYPE: return &ins->klass->byval_arg;
1092 g_error ("stack type %d to monotype not handled\n", ins->type);
1097 static G_GNUC_UNUSED int
1098 type_to_stack_type (MonoType *t)
1100 switch (mono_type_get_underlying_type (t)->type) {
1103 case MONO_TYPE_BOOLEAN:
1106 case MONO_TYPE_CHAR:
1113 case MONO_TYPE_FNPTR:
1115 case MONO_TYPE_CLASS:
1116 case MONO_TYPE_STRING:
1117 case MONO_TYPE_OBJECT:
1118 case MONO_TYPE_SZARRAY:
1119 case MONO_TYPE_ARRAY:
1127 case MONO_TYPE_VALUETYPE:
1128 case MONO_TYPE_TYPEDBYREF:
1130 case MONO_TYPE_GENERICINST:
1131 if (mono_type_generic_inst_is_valuetype (t))
1137 g_assert_not_reached ();
1144 array_access_to_klass (int opcode)
1148 return mono_defaults.byte_class;
1150 return mono_defaults.uint16_class;
1153 return mono_defaults.int_class;
1156 return mono_defaults.sbyte_class;
1159 return mono_defaults.int16_class;
1162 return mono_defaults.int32_class;
1164 return mono_defaults.uint32_class;
1167 return mono_defaults.int64_class;
1170 return mono_defaults.single_class;
1173 return mono_defaults.double_class;
1174 case CEE_LDELEM_REF:
1175 case CEE_STELEM_REF:
1176 return mono_defaults.object_class;
1178 g_assert_not_reached ();
1184 * We try to share variables when possible
1187 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1192 /* inlining can result in deeper stacks */
1193 if (slot >= mono_method_get_header (cfg->method)->max_stack)
1194 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1196 pos = ins->type - 1 + slot * STACK_MAX;
1198 switch (ins->type) {
1205 if ((vnum = cfg->intvars [pos]))
1206 return cfg->varinfo [vnum];
1207 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1208 cfg->intvars [pos] = res->inst_c0;
1211 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1217 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1220 * Don't use this if a generic_context is set, since that means AOT can't
1221 * look up the method using just the image+token.
1222 * table == 0 means this is a reference made from a wrapper.
1224 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1225 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1226 jump_info_token->image = image;
1227 jump_info_token->token = token;
1228 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1233 * This function is called to handle items that are left on the evaluation stack
1234 * at basic block boundaries. What happens is that we save the values to local variables
1235 * and we reload them later when first entering the target basic block (with the
1236 * handle_loaded_temps () function).
1237 * A single joint point will use the same variables (stored in the array bb->out_stack or
1238 * bb->in_stack, if the basic block is before or after the joint point).
1240 * This function needs to be called _before_ emitting the last instruction of
1241 * the bb (i.e. before emitting a branch).
1242 * If the stack merge fails at a join point, cfg->unverifiable is set.
1245 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1248 MonoBasicBlock *bb = cfg->cbb;
1249 MonoBasicBlock *outb;
1250 MonoInst *inst, **locals;
1255 if (cfg->verbose_level > 3)
1256 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1257 if (!bb->out_scount) {
1258 bb->out_scount = count;
1259 //printf ("bblock %d has out:", bb->block_num);
1261 for (i = 0; i < bb->out_count; ++i) {
1262 outb = bb->out_bb [i];
1263 /* exception handlers are linked, but they should not be considered for stack args */
1264 if (outb->flags & BB_EXCEPTION_HANDLER)
1266 //printf (" %d", outb->block_num);
1267 if (outb->in_stack) {
1269 bb->out_stack = outb->in_stack;
1275 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1276 for (i = 0; i < count; ++i) {
1278 * try to reuse temps already allocated for this purpouse, if they occupy the same
1279 * stack slot and if they are of the same type.
1280 * This won't cause conflicts since if 'local' is used to
1281 * store one of the values in the in_stack of a bblock, then
1282 * the same variable will be used for the same outgoing stack
1284 * This doesn't work when inlining methods, since the bblocks
1285 * in the inlined methods do not inherit their in_stack from
1286 * the bblock they are inlined to. See bug #58863 for an
1289 if (cfg->inlined_method)
1290 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1292 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1297 for (i = 0; i < bb->out_count; ++i) {
1298 outb = bb->out_bb [i];
1299 /* exception handlers are linked, but they should not be considered for stack args */
1300 if (outb->flags & BB_EXCEPTION_HANDLER)
1302 if (outb->in_scount) {
1303 if (outb->in_scount != bb->out_scount) {
1304 cfg->unverifiable = TRUE;
1307 continue; /* check they are the same locals */
1309 outb->in_scount = count;
1310 outb->in_stack = bb->out_stack;
1313 locals = bb->out_stack;
1315 for (i = 0; i < count; ++i) {
1316 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1317 inst->cil_code = sp [i]->cil_code;
1318 sp [i] = locals [i];
1319 if (cfg->verbose_level > 3)
1320 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1324 * It is possible that the out bblocks already have in_stack assigned, and
1325 * the in_stacks differ. In this case, we will store to all the different
1332 /* Find a bblock which has a different in_stack */
1334 while (bindex < bb->out_count) {
1335 outb = bb->out_bb [bindex];
1336 /* exception handlers are linked, but they should not be considered for stack args */
1337 if (outb->flags & BB_EXCEPTION_HANDLER) {
1341 if (outb->in_stack != locals) {
1342 for (i = 0; i < count; ++i) {
1343 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1344 inst->cil_code = sp [i]->cil_code;
1345 sp [i] = locals [i];
1346 if (cfg->verbose_level > 3)
1347 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1349 locals = outb->in_stack;
1358 /* Emit code which loads interface_offsets [klass->interface_id]
1359 * The array is stored in memory before vtable.
1362 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1364 if (cfg->compile_aot) {
1365 int ioffset_reg = alloc_preg (cfg);
1366 int iid_reg = alloc_preg (cfg);
1368 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1369 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1370 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1373 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1378 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1379 * stored in "klass_reg" implements the interface "klass".
1382 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1384 int ibitmap_reg = alloc_preg (cfg);
1385 int ibitmap_byte_reg = alloc_preg (cfg);
1387 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap));
1389 if (cfg->compile_aot) {
1390 int iid_reg = alloc_preg (cfg);
1391 int shifted_iid_reg = alloc_preg (cfg);
1392 int ibitmap_byte_address_reg = alloc_preg (cfg);
1393 int masked_iid_reg = alloc_preg (cfg);
1394 int iid_one_bit_reg = alloc_preg (cfg);
1395 int iid_bit_reg = alloc_preg (cfg);
1396 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1397 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1398 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1399 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1400 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1401 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1402 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1403 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1405 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1406 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1411 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1412 * stored in "vtable_reg" implements the interface "klass".
1415 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1417 int ibitmap_reg = alloc_preg (cfg);
1418 int ibitmap_byte_reg = alloc_preg (cfg);
1420 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap));
1422 if (cfg->compile_aot) {
1423 int iid_reg = alloc_preg (cfg);
1424 int shifted_iid_reg = alloc_preg (cfg);
1425 int ibitmap_byte_address_reg = alloc_preg (cfg);
1426 int masked_iid_reg = alloc_preg (cfg);
1427 int iid_one_bit_reg = alloc_preg (cfg);
1428 int iid_bit_reg = alloc_preg (cfg);
1429 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1430 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, shifted_iid_reg, iid_reg, 3);
1431 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1432 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1433 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, masked_iid_reg, iid_reg, 7);
1434 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1435 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1436 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1438 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1439 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1444 * Emit code which checks whenever the interface id of @klass is smaller than
1445 * than the value given by max_iid_reg.
1448 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1449 MonoBasicBlock *false_target)
1451 if (cfg->compile_aot) {
1452 int iid_reg = alloc_preg (cfg);
1453 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1454 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1457 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1459 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1461 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1464 /* Same as above, but obtains max_iid from a vtable */
1466 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1467 MonoBasicBlock *false_target)
1469 int max_iid_reg = alloc_preg (cfg);
1471 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1472 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1475 /* Same as above, but obtains max_iid from a klass */
1477 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1478 MonoBasicBlock *false_target)
1480 int max_iid_reg = alloc_preg (cfg);
1482 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1483 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1487 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1489 int idepth_reg = alloc_preg (cfg);
1490 int stypes_reg = alloc_preg (cfg);
1491 int stype = alloc_preg (cfg);
1493 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1494 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1495 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1496 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1498 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1499 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1500 if (cfg->compile_aot) {
1501 int const_reg = alloc_preg (cfg);
1502 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1503 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1505 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1507 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1511 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1513 int intf_reg = alloc_preg (cfg);
1515 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1516 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1517 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1519 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1521 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1525 * Variant of the above that takes a register to the class, not the vtable.
1528 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1530 int intf_bit_reg = alloc_preg (cfg);
1532 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1533 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1534 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1536 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1538 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1542 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1544 if (cfg->compile_aot) {
1545 int const_reg = alloc_preg (cfg);
1546 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1547 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1549 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1551 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1555 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1557 if (cfg->compile_aot) {
1558 int const_reg = alloc_preg (cfg);
1559 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1560 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1562 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1564 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1568 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1571 int rank_reg = alloc_preg (cfg);
1572 int eclass_reg = alloc_preg (cfg);
1574 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1575 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1576 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1577 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1578 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1579 if (klass->cast_class == mono_defaults.object_class) {
1580 int parent_reg = alloc_preg (cfg);
1581 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1582 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1583 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1584 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1585 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1586 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1587 } else if (klass->cast_class == mono_defaults.enum_class) {
1588 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1589 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1590 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1592 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1593 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1596 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1597 /* Check that the object is a vector too */
1598 int bounds_reg = alloc_preg (cfg);
1599 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1600 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1601 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1604 int idepth_reg = alloc_preg (cfg);
1605 int stypes_reg = alloc_preg (cfg);
1606 int stype = alloc_preg (cfg);
1608 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1609 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1610 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1611 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1613 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1614 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1615 mini_emit_class_check (cfg, stype, klass);
1620 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1624 g_assert (val == 0);
1629 if ((size <= 4) && (size <= align)) {
1632 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1635 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1638 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1640 #if SIZEOF_REGISTER == 8
1642 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1648 val_reg = alloc_preg (cfg);
1650 if (SIZEOF_REGISTER == 8)
1651 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1653 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1656 /* This could be optimized further if neccesary */
1658 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1665 #if !NO_UNALIGNED_ACCESS
1666 if (SIZEOF_REGISTER == 8) {
1668 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1673 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1681 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1686 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1691 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1697 #endif /* DISABLE_JIT */
1700 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1708 /* This could be optimized further if neccesary */
1710 cur_reg = alloc_preg (cfg);
1711 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1712 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1719 #if !NO_UNALIGNED_ACCESS
1720 if (SIZEOF_REGISTER == 8) {
1722 cur_reg = alloc_preg (cfg);
1723 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1724 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1733 cur_reg = alloc_preg (cfg);
1734 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1735 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1741 cur_reg = alloc_preg (cfg);
1742 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1743 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1749 cur_reg = alloc_preg (cfg);
1750 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1751 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1761 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1764 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1767 type = mini_get_basic_type_from_generic (gsctx, type);
1768 switch (type->type) {
1769 case MONO_TYPE_VOID:
1770 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1773 case MONO_TYPE_BOOLEAN:
1776 case MONO_TYPE_CHAR:
1779 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1783 case MONO_TYPE_FNPTR:
1784 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1785 case MONO_TYPE_CLASS:
1786 case MONO_TYPE_STRING:
1787 case MONO_TYPE_OBJECT:
1788 case MONO_TYPE_SZARRAY:
1789 case MONO_TYPE_ARRAY:
1790 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1793 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1796 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1797 case MONO_TYPE_VALUETYPE:
1798 if (type->data.klass->enumtype) {
1799 type = mono_class_enum_basetype (type->data.klass);
1802 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1803 case MONO_TYPE_TYPEDBYREF:
1804 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1805 case MONO_TYPE_GENERICINST:
1806 type = &type->data.generic_class->container_class->byval_arg;
1809 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1815 * target_type_is_incompatible:
1816 * @cfg: MonoCompile context
1818 * Check that the item @arg on the evaluation stack can be stored
1819 * in the target type (can be a local, or field, etc).
1820 * The cfg arg can be used to check if we need verification or just
1823 * Returns: non-0 value if arg can't be stored on a target.
1826 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1828 MonoType *simple_type;
1831 if (target->byref) {
1832 /* FIXME: check that the pointed to types match */
1833 if (arg->type == STACK_MP)
1834 return arg->klass != mono_class_from_mono_type (target);
1835 if (arg->type == STACK_PTR)
1840 simple_type = mono_type_get_underlying_type (target);
1841 switch (simple_type->type) {
1842 case MONO_TYPE_VOID:
1846 case MONO_TYPE_BOOLEAN:
1849 case MONO_TYPE_CHAR:
1852 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1856 /* STACK_MP is needed when setting pinned locals */
1857 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1862 case MONO_TYPE_FNPTR:
1863 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1866 case MONO_TYPE_CLASS:
1867 case MONO_TYPE_STRING:
1868 case MONO_TYPE_OBJECT:
1869 case MONO_TYPE_SZARRAY:
1870 case MONO_TYPE_ARRAY:
1871 if (arg->type != STACK_OBJ)
1873 /* FIXME: check type compatibility */
1877 if (arg->type != STACK_I8)
1882 if (arg->type != STACK_R8)
1885 case MONO_TYPE_VALUETYPE:
1886 if (arg->type != STACK_VTYPE)
1888 klass = mono_class_from_mono_type (simple_type);
1889 if (klass != arg->klass)
1892 case MONO_TYPE_TYPEDBYREF:
1893 if (arg->type != STACK_VTYPE)
1895 klass = mono_class_from_mono_type (simple_type);
1896 if (klass != arg->klass)
1899 case MONO_TYPE_GENERICINST:
1900 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1901 if (arg->type != STACK_VTYPE)
1903 klass = mono_class_from_mono_type (simple_type);
1904 if (klass != arg->klass)
1908 if (arg->type != STACK_OBJ)
1910 /* FIXME: check type compatibility */
1914 case MONO_TYPE_MVAR:
1915 /* FIXME: all the arguments must be references for now,
1916 * later look inside cfg and see if the arg num is
1917 * really a reference
1919 g_assert (cfg->generic_sharing_context);
1920 if (arg->type != STACK_OBJ)
1924 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1930 * Prepare arguments for passing to a function call.
1931 * Return a non-zero value if the arguments can't be passed to the given
1933 * The type checks are not yet complete and some conversions may need
1934 * casts on 32 or 64 bit architectures.
1936 * FIXME: implement this using target_type_is_incompatible ()
1939 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1941 MonoType *simple_type;
1945 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1949 for (i = 0; i < sig->param_count; ++i) {
1950 if (sig->params [i]->byref) {
1951 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1955 simple_type = sig->params [i];
1956 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1958 switch (simple_type->type) {
1959 case MONO_TYPE_VOID:
1964 case MONO_TYPE_BOOLEAN:
1967 case MONO_TYPE_CHAR:
1970 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
1976 case MONO_TYPE_FNPTR:
1977 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
1980 case MONO_TYPE_CLASS:
1981 case MONO_TYPE_STRING:
1982 case MONO_TYPE_OBJECT:
1983 case MONO_TYPE_SZARRAY:
1984 case MONO_TYPE_ARRAY:
1985 if (args [i]->type != STACK_OBJ)
1990 if (args [i]->type != STACK_I8)
1995 if (args [i]->type != STACK_R8)
1998 case MONO_TYPE_VALUETYPE:
1999 if (simple_type->data.klass->enumtype) {
2000 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2003 if (args [i]->type != STACK_VTYPE)
2006 case MONO_TYPE_TYPEDBYREF:
2007 if (args [i]->type != STACK_VTYPE)
2010 case MONO_TYPE_GENERICINST:
2011 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2015 g_error ("unknown type 0x%02x in check_call_signature",
2023 callvirt_to_call (int opcode)
2028 case OP_VOIDCALLVIRT:
2037 g_assert_not_reached ();
2044 callvirt_to_call_membase (int opcode)
2048 return OP_CALL_MEMBASE;
2049 case OP_VOIDCALLVIRT:
2050 return OP_VOIDCALL_MEMBASE;
2052 return OP_FCALL_MEMBASE;
2054 return OP_LCALL_MEMBASE;
2056 return OP_VCALL_MEMBASE;
2058 g_assert_not_reached ();
2064 #ifdef MONO_ARCH_HAVE_IMT
2066 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2068 #ifdef MONO_ARCH_IMT_REG
2069 int method_reg = alloc_preg (cfg);
2072 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2073 } else if (cfg->compile_aot) {
2074 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2077 MONO_INST_NEW (cfg, ins, OP_PCONST);
2078 ins->inst_p0 = call->method;
2079 ins->dreg = method_reg;
2080 MONO_ADD_INS (cfg->cbb, ins);
2083 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2085 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2090 static MonoJumpInfo *
2091 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2093 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2097 ji->data.target = target;
2102 inline static MonoInst*
2103 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args);
2105 inline static MonoCallInst *
2106 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2107 MonoInst **args, int calli, int virtual, int tail)
2110 #ifdef MONO_ARCH_SOFT_FLOAT
2115 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2117 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2120 call->signature = sig;
2122 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2125 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2126 call->vret_var = cfg->vret_addr;
2127 //g_assert_not_reached ();
2129 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2130 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2133 temp->backend.is_pinvoke = sig->pinvoke;
2136 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2137 * address of return value to increase optimization opportunities.
2138 * Before vtype decomposition, the dreg of the call ins itself represents the
2139 * fact the call modifies the return value. After decomposition, the call will
2140 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2141 * will be transformed into an LDADDR.
2143 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2144 loada->dreg = alloc_preg (cfg);
2145 loada->inst_p0 = temp;
2146 /* We reference the call too since call->dreg could change during optimization */
2147 loada->inst_p1 = call;
2148 MONO_ADD_INS (cfg->cbb, loada);
2150 call->inst.dreg = temp->dreg;
2152 call->vret_var = loada;
2153 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2154 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2156 #ifdef MONO_ARCH_SOFT_FLOAT
2158 * If the call has a float argument, we would need to do an r8->r4 conversion using
2159 * an icall, but that cannot be done during the call sequence since it would clobber
2160 * the call registers + the stack. So we do it before emitting the call.
2162 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2164 MonoInst *in = call->args [i];
2166 if (i >= sig->hasthis)
2167 t = sig->params [i - sig->hasthis];
2169 t = &mono_defaults.int_class->byval_arg;
2170 t = mono_type_get_underlying_type (t);
2172 if (!t->byref && t->type == MONO_TYPE_R4) {
2173 MonoInst *iargs [1];
2177 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2179 /* The result will be in an int vreg */
2180 call->args [i] = conv;
2186 if (COMPILE_LLVM (cfg))
2187 mono_llvm_emit_call (cfg, call);
2189 mono_arch_emit_call (cfg, call);
2191 mono_arch_emit_call (cfg, call);
2194 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2195 cfg->flags |= MONO_CFG_HAS_CALLS;
2200 inline static MonoInst*
2201 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2203 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE);
2205 call->inst.sreg1 = addr->dreg;
2207 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2209 return (MonoInst*)call;
2212 inline static MonoInst*
2213 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2215 #ifdef MONO_ARCH_RGCTX_REG
2220 rgctx_reg = mono_alloc_preg (cfg);
2221 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2223 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2225 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2226 cfg->uses_rgctx_reg = TRUE;
2227 call->rgctx_reg = TRUE;
2229 return (MonoInst*)call;
2231 g_assert_not_reached ();
2237 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, int rgctx_type);
2239 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, int rgctx_type);
2242 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2243 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2245 gboolean might_be_remote;
2246 gboolean virtual = this != NULL;
2247 gboolean enable_for_aot = TRUE;
2251 if (method->string_ctor) {
2252 /* Create the real signature */
2253 /* FIXME: Cache these */
2254 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2255 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2260 might_be_remote = this && sig->hasthis &&
2261 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2262 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this);
2264 context_used = mono_method_check_context_used (method);
2265 if (might_be_remote && context_used) {
2268 g_assert (cfg->generic_sharing_context);
2270 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2272 return mono_emit_calli (cfg, sig, args, addr);
2275 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE);
2277 if (might_be_remote)
2278 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2280 call->method = method;
2281 call->inst.flags |= MONO_INST_HAS_METHOD;
2282 call->inst.inst_left = this;
2285 int vtable_reg, slot_reg, this_reg;
2287 this_reg = this->dreg;
2289 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2291 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2292 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2293 /* Make a call to delegate->invoke_impl */
2294 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2295 call->inst.inst_basereg = this_reg;
2296 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2297 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2299 return (MonoInst*)call;
2303 if ((!cfg->compile_aot || enable_for_aot) &&
2304 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2305 (MONO_METHOD_IS_FINAL (method) &&
2306 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK))) {
2308 * the method is not virtual, we just need to ensure this is not null
2309 * and then we can call the method directly.
2311 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2312 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2315 if (!method->string_ctor) {
2316 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2317 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2318 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2321 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2323 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2325 return (MonoInst*)call;
2328 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2330 * the method is virtual, but we can statically dispatch since either
2331 * it's class or the method itself are sealed.
2332 * But first we need to ensure it's not a null reference.
2334 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2335 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2336 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2338 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2339 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2341 return (MonoInst*)call;
2344 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2346 vtable_reg = alloc_preg (cfg);
2347 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2348 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2350 #ifdef MONO_ARCH_HAVE_IMT
2352 guint32 imt_slot = mono_method_get_imt_slot (method);
2353 emit_imt_argument (cfg, call, imt_arg);
2354 slot_reg = vtable_reg;
2355 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2358 if (slot_reg == -1) {
2359 slot_reg = alloc_preg (cfg);
2360 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2361 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2364 slot_reg = vtable_reg;
2365 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2366 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2367 #ifdef MONO_ARCH_HAVE_IMT
2369 g_assert (mono_method_signature (method)->generic_param_count);
2370 emit_imt_argument (cfg, call, imt_arg);
2375 call->inst.sreg1 = slot_reg;
2376 call->virtual = TRUE;
2379 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2381 return (MonoInst*)call;
2385 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2386 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2393 #ifdef MONO_ARCH_RGCTX_REG
2394 rgctx_reg = mono_alloc_preg (cfg);
2395 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2400 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2402 call = (MonoCallInst*)ins;
2404 #ifdef MONO_ARCH_RGCTX_REG
2405 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2406 cfg->uses_rgctx_reg = TRUE;
2407 call->rgctx_reg = TRUE;
2416 static inline MonoInst*
2417 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2419 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2423 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2430 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE);
2433 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2435 return (MonoInst*)call;
2438 inline static MonoInst*
2439 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2441 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2445 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2449 * mono_emit_abs_call:
2451 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2453 inline static MonoInst*
2454 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2455 MonoMethodSignature *sig, MonoInst **args)
2457 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2461 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2464 if (cfg->abs_patches == NULL)
2465 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2466 g_hash_table_insert (cfg->abs_patches, ji, ji);
2467 ins = mono_emit_native_call (cfg, ji, sig, args);
2468 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2473 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2475 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2476 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2480 * Native code might return non register sized integers
2481 * without initializing the upper bits.
2483 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2484 case OP_LOADI1_MEMBASE:
2485 widen_op = OP_ICONV_TO_I1;
2487 case OP_LOADU1_MEMBASE:
2488 widen_op = OP_ICONV_TO_U1;
2490 case OP_LOADI2_MEMBASE:
2491 widen_op = OP_ICONV_TO_I2;
2493 case OP_LOADU2_MEMBASE:
2494 widen_op = OP_ICONV_TO_U2;
2500 if (widen_op != -1) {
2501 int dreg = alloc_preg (cfg);
2504 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2505 widen->type = ins->type;
2515 get_memcpy_method (void)
2517 static MonoMethod *memcpy_method = NULL;
2518 if (!memcpy_method) {
2519 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2521 g_error ("Old corlib found. Install a new one");
2523 return memcpy_method;
2527 * Emit code to copy a valuetype of type @klass whose address is stored in
2528 * @src->dreg to memory whose address is stored at @dest->dreg.
2531 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2533 MonoInst *iargs [3];
2536 MonoMethod *memcpy_method;
2540 * This check breaks with spilled vars... need to handle it during verification anyway.
2541 * g_assert (klass && klass == src->klass && klass == dest->klass);
2545 n = mono_class_native_size (klass, &align);
2547 n = mono_class_value_size (klass, &align);
2549 #if HAVE_WRITE_BARRIERS
2550 /* if native is true there should be no references in the struct */
2551 if (klass->has_references && !native) {
2552 /* Avoid barriers when storing to the stack */
2553 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2554 (dest->opcode == OP_LDADDR))) {
2555 int context_used = 0;
2560 if (cfg->generic_sharing_context)
2561 context_used = mono_class_check_context_used (klass);
2563 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2565 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2566 mono_class_compute_gc_descriptor (klass);
2569 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2574 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2575 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2576 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2580 EMIT_NEW_ICONST (cfg, iargs [2], n);
2582 memcpy_method = get_memcpy_method ();
2583 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2588 get_memset_method (void)
2590 static MonoMethod *memset_method = NULL;
2591 if (!memset_method) {
2592 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2594 g_error ("Old corlib found. Install a new one");
2596 return memset_method;
2600 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2602 MonoInst *iargs [3];
2605 MonoMethod *memset_method;
2607 /* FIXME: Optimize this for the case when dest is an LDADDR */
2609 mono_class_init (klass);
2610 n = mono_class_value_size (klass, &align);
2612 if (n <= sizeof (gpointer) * 5) {
2613 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2616 memset_method = get_memset_method ();
2618 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2619 EMIT_NEW_ICONST (cfg, iargs [2], n);
2620 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2625 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2627 MonoInst *this = NULL;
2629 g_assert (cfg->generic_sharing_context);
2631 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2632 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2633 !method->klass->valuetype)
2634 EMIT_NEW_ARGLOAD (cfg, this, 0);
2636 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2637 MonoInst *mrgctx_loc, *mrgctx_var;
2640 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2642 mrgctx_loc = mono_get_vtable_var (cfg);
2643 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2646 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2647 MonoInst *vtable_loc, *vtable_var;
2651 vtable_loc = mono_get_vtable_var (cfg);
2652 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2654 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2655 MonoInst *mrgctx_var = vtable_var;
2658 vtable_reg = alloc_preg (cfg);
2659 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2660 vtable_var->type = STACK_PTR;
2666 int vtable_reg, res_reg;
2668 vtable_reg = alloc_preg (cfg);
2669 res_reg = alloc_preg (cfg);
2670 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2675 static MonoJumpInfoRgctxEntry *
2676 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2678 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2679 res->method = method;
2680 res->in_mrgctx = in_mrgctx;
2681 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2682 res->data->type = patch_type;
2683 res->data->data.target = patch_data;
2684 res->info_type = info_type;
2689 static inline MonoInst*
2690 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2692 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2696 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2697 MonoClass *klass, int rgctx_type)
2699 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2700 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2702 return emit_rgctx_fetch (cfg, rgctx, entry);
2706 * emit_get_rgctx_method:
2708 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2709 * normal constants, else emit a load from the rgctx.
2712 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2713 MonoMethod *cmethod, int rgctx_type)
2715 if (!context_used) {
2718 switch (rgctx_type) {
2719 case MONO_RGCTX_INFO_METHOD:
2720 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
2722 case MONO_RGCTX_INFO_METHOD_RGCTX:
2723 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
2726 g_assert_not_reached ();
2729 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2730 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2732 return emit_rgctx_fetch (cfg, rgctx, entry);
2737 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2738 MonoClassField *field, int rgctx_type)
2740 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2741 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2743 return emit_rgctx_fetch (cfg, rgctx, entry);
2747 * On return the caller must check @klass for load errors.
2750 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
2752 MonoInst *vtable_arg;
2754 int context_used = 0;
2756 if (cfg->generic_sharing_context)
2757 context_used = mono_class_check_context_used (klass);
2760 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
2761 klass, MONO_RGCTX_INFO_VTABLE);
2763 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2767 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
2770 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
2771 #ifdef MONO_ARCH_VTABLE_REG
2772 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
2773 cfg->uses_vtable_reg = TRUE;
2780 * On return the caller must check @array_class for load errors
2783 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
2785 int vtable_reg = alloc_preg (cfg);
2786 int context_used = 0;
2788 if (cfg->generic_sharing_context)
2789 context_used = mono_class_check_context_used (array_class);
2791 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2793 if (cfg->opt & MONO_OPT_SHARED) {
2794 int class_reg = alloc_preg (cfg);
2795 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2796 if (cfg->compile_aot) {
2797 int klass_reg = alloc_preg (cfg);
2798 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
2799 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
2801 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
2803 } else if (context_used) {
2804 MonoInst *vtable_ins;
2806 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
2807 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
2809 if (cfg->compile_aot) {
2813 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2815 vt_reg = alloc_preg (cfg);
2816 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
2817 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
2820 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2822 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
2826 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
2830 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
2832 if (mini_get_debug_options ()->better_cast_details) {
2833 int to_klass_reg = alloc_preg (cfg);
2834 int vtable_reg = alloc_preg (cfg);
2835 int klass_reg = alloc_preg (cfg);
2836 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2839 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2843 MONO_ADD_INS (cfg->cbb, tls_get);
2844 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2845 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2847 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
2848 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
2849 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
2854 reset_cast_details (MonoCompile *cfg)
2856 /* Reset the variables holding the cast details */
2857 if (mini_get_debug_options ()->better_cast_details) {
2858 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2860 MONO_ADD_INS (cfg->cbb, tls_get);
2861 /* It is enough to reset the from field */
2862 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
2867 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
2868 * generic code is generated.
2871 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
2873 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
2876 MonoInst *rgctx, *addr;
2878 /* FIXME: What if the class is shared? We might not
2879 have to get the address of the method from the
2881 addr = emit_get_rgctx_method (cfg, context_used, method,
2882 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2884 rgctx = emit_get_rgctx (cfg, method, context_used);
2886 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2888 return mono_emit_method_call (cfg, method, &val, NULL);
2893 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
2897 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
2898 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
2899 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
2900 int rank_reg = alloc_dreg (cfg ,STACK_I4);
2902 obj_reg = sp [0]->dreg;
2903 MONO_EMIT_NULL_CHECK (cfg, obj_reg);
2904 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2905 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2907 /* FIXME: generics */
2908 g_assert (klass->rank == 0);
2911 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
2912 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2914 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2915 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
2918 MonoInst *element_class;
2920 /* This assertion is from the unboxcast insn */
2921 g_assert (klass->rank == 0);
2923 element_class = emit_get_rgctx_klass (cfg, context_used,
2924 klass->element_class, MONO_RGCTX_INFO_KLASS);
2926 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
2927 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2929 save_cast_details (cfg, klass->element_class, obj_reg);
2930 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
2931 reset_cast_details (cfg);
2934 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
2935 MONO_ADD_INS (cfg->cbb, add);
2936 add->type = STACK_MP;
2943 * Returns NULL and set the cfg exception on error.
2946 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box)
2948 MonoInst *iargs [2];
2951 if (cfg->opt & MONO_OPT_SHARED) {
2952 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2953 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
2955 alloc_ftn = mono_object_new;
2956 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
2957 /* This happens often in argument checking code, eg. throw new FooException... */
2958 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
2959 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
2960 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
2962 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2963 MonoMethod *managed_alloc = NULL;
2967 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
2968 cfg->exception_ptr = klass;
2972 #ifndef MONO_CROSS_COMPILE
2973 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2976 if (managed_alloc) {
2977 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2978 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2980 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
2982 guint32 lw = vtable->klass->instance_size;
2983 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
2984 EMIT_NEW_ICONST (cfg, iargs [0], lw);
2985 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
2988 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2992 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2996 handle_alloc_from_inst (MonoCompile *cfg, MonoClass *klass, MonoInst *data_inst,
2999 MonoInst *iargs [2];
3000 MonoMethod *managed_alloc = NULL;
3004 FIXME: we cannot get managed_alloc here because we can't get
3005 the class's vtable (because it's not a closed class)
3007 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3008 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3011 if (cfg->opt & MONO_OPT_SHARED) {
3012 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3013 iargs [1] = data_inst;
3014 alloc_ftn = mono_object_new;
3016 if (managed_alloc) {
3017 iargs [0] = data_inst;
3018 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3021 iargs [0] = data_inst;
3022 alloc_ftn = mono_object_new_specific;
3025 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3029 * Returns NULL and set the cfg exception on error.
3032 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass)
3034 MonoInst *alloc, *ins;
3036 if (mono_class_is_nullable (klass)) {
3037 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3038 return mono_emit_method_call (cfg, method, &val, NULL);
3041 alloc = handle_alloc (cfg, klass, TRUE);
3045 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3051 handle_box_from_inst (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoInst *data_inst)
3053 MonoInst *alloc, *ins;
3055 if (mono_class_is_nullable (klass)) {
3056 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3057 /* FIXME: What if the class is shared? We might not
3058 have to get the method address from the RGCTX. */
3059 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3060 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3061 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3063 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3065 alloc = handle_alloc_from_inst (cfg, klass, data_inst, TRUE);
3067 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3074 * Returns NULL and set the cfg exception on error.
3077 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3079 MonoBasicBlock *is_null_bb;
3080 int obj_reg = src->dreg;
3081 int vtable_reg = alloc_preg (cfg);
3083 NEW_BBLOCK (cfg, is_null_bb);
3085 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3086 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3088 save_cast_details (cfg, klass, obj_reg);
3090 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3091 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3092 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3094 int klass_reg = alloc_preg (cfg);
3096 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3098 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3099 /* the remoting code is broken, access the class for now */
3100 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3101 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3103 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3104 cfg->exception_ptr = klass;
3107 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3109 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3110 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3112 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3114 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3115 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, is_null_bb);
3119 MONO_START_BB (cfg, is_null_bb);
3121 reset_cast_details (cfg);
3127 * Returns NULL and set the cfg exception on error.
3130 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3133 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3134 int obj_reg = src->dreg;
3135 int vtable_reg = alloc_preg (cfg);
3136 int res_reg = alloc_preg (cfg);
3138 NEW_BBLOCK (cfg, is_null_bb);
3139 NEW_BBLOCK (cfg, false_bb);
3140 NEW_BBLOCK (cfg, end_bb);
3142 /* Do the assignment at the beginning, so the other assignment can be if converted */
3143 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3144 ins->type = STACK_OBJ;
3147 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3148 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3150 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3151 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3152 /* the is_null_bb target simply copies the input register to the output */
3153 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3155 int klass_reg = alloc_preg (cfg);
3157 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3160 int rank_reg = alloc_preg (cfg);
3161 int eclass_reg = alloc_preg (cfg);
3163 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3164 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3165 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3166 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3167 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3168 if (klass->cast_class == mono_defaults.object_class) {
3169 int parent_reg = alloc_preg (cfg);
3170 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3171 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3172 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3173 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3174 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3175 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3176 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3177 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3178 } else if (klass->cast_class == mono_defaults.enum_class) {
3179 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3180 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3181 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3182 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3184 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3185 /* Check that the object is a vector too */
3186 int bounds_reg = alloc_preg (cfg);
3187 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3188 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3189 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3192 /* the is_null_bb target simply copies the input register to the output */
3193 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3195 } else if (mono_class_is_nullable (klass)) {
3196 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3197 /* the is_null_bb target simply copies the input register to the output */
3198 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3200 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3201 /* the remoting code is broken, access the class for now */
3202 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3203 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3205 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3206 cfg->exception_ptr = klass;
3209 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3211 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3212 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3214 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3215 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3217 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3218 /* the is_null_bb target simply copies the input register to the output */
3219 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, is_null_bb);
3224 MONO_START_BB (cfg, false_bb);
3226 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3227 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3229 MONO_START_BB (cfg, is_null_bb);
3231 MONO_START_BB (cfg, end_bb);
3237 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3239 /* This opcode takes as input an object reference and a class, and returns:
3240 0) if the object is an instance of the class,
3241 1) if the object is not instance of the class,
3242 2) if the object is a proxy whose type cannot be determined */
3245 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3246 int obj_reg = src->dreg;
3247 int dreg = alloc_ireg (cfg);
3249 int klass_reg = alloc_preg (cfg);
3251 NEW_BBLOCK (cfg, true_bb);
3252 NEW_BBLOCK (cfg, false_bb);
3253 NEW_BBLOCK (cfg, false2_bb);
3254 NEW_BBLOCK (cfg, end_bb);
3255 NEW_BBLOCK (cfg, no_proxy_bb);
3257 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3258 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3260 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3261 NEW_BBLOCK (cfg, interface_fail_bb);
3263 tmp_reg = alloc_preg (cfg);
3264 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3265 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3266 MONO_START_BB (cfg, interface_fail_bb);
3267 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3269 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3271 tmp_reg = alloc_preg (cfg);
3272 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3273 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3274 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3276 tmp_reg = alloc_preg (cfg);
3277 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3278 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3280 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3281 tmp_reg = alloc_preg (cfg);
3282 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3283 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3285 tmp_reg = alloc_preg (cfg);
3286 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3287 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3288 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3290 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3291 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3293 MONO_START_BB (cfg, no_proxy_bb);
3295 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3298 MONO_START_BB (cfg, false_bb);
3300 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3301 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3303 MONO_START_BB (cfg, false2_bb);
3305 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3306 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3308 MONO_START_BB (cfg, true_bb);
3310 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3312 MONO_START_BB (cfg, end_bb);
3315 MONO_INST_NEW (cfg, ins, OP_ICONST);
3317 ins->type = STACK_I4;
3323 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3325 /* This opcode takes as input an object reference and a class, and returns:
3326 0) if the object is an instance of the class,
3327 1) if the object is a proxy whose type cannot be determined
3328 an InvalidCastException exception is thrown otherwhise*/
3331 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3332 int obj_reg = src->dreg;
3333 int dreg = alloc_ireg (cfg);
3334 int tmp_reg = alloc_preg (cfg);
3335 int klass_reg = alloc_preg (cfg);
3337 NEW_BBLOCK (cfg, end_bb);
3338 NEW_BBLOCK (cfg, ok_result_bb);
3340 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3341 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3343 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3344 NEW_BBLOCK (cfg, interface_fail_bb);
3346 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3347 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3348 MONO_START_BB (cfg, interface_fail_bb);
3349 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3351 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3353 tmp_reg = alloc_preg (cfg);
3354 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3355 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3356 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3358 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3359 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3362 NEW_BBLOCK (cfg, no_proxy_bb);
3364 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3365 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3366 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3368 tmp_reg = alloc_preg (cfg);
3369 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3370 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3372 tmp_reg = alloc_preg (cfg);
3373 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3374 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3375 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3377 NEW_BBLOCK (cfg, fail_1_bb);
3379 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3381 MONO_START_BB (cfg, fail_1_bb);
3383 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3384 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3386 MONO_START_BB (cfg, no_proxy_bb);
3388 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3391 MONO_START_BB (cfg, ok_result_bb);
3393 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3395 MONO_START_BB (cfg, end_bb);
3398 MONO_INST_NEW (cfg, ins, OP_ICONST);
3400 ins->type = STACK_I4;
3406 * Returns NULL and set the cfg exception on error.
3408 static G_GNUC_UNUSED MonoInst*
3409 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3411 gpointer *trampoline;
3412 MonoInst *obj, *method_ins, *tramp_ins;
3416 obj = handle_alloc (cfg, klass, FALSE);
3420 /* Inline the contents of mono_delegate_ctor */
3422 /* Set target field */
3423 /* Optimize away setting of NULL target */
3424 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3425 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3427 /* Set method field */
3428 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3429 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3432 * To avoid looking up the compiled code belonging to the target method
3433 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3434 * store it, and we fill it after the method has been compiled.
3436 if (!cfg->compile_aot && !method->dynamic && !context_used) {
3437 MonoInst *code_slot_ins;
3439 domain = mono_domain_get ();
3440 mono_domain_lock (domain);
3441 if (!domain_jit_info (domain)->method_code_hash)
3442 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3443 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3445 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3446 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3448 mono_domain_unlock (domain);
3450 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3451 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3454 /* Set invoke_impl field */
3455 if (cfg->compile_aot) {
3456 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3458 trampoline = mono_create_delegate_trampoline (klass);
3459 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3461 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3463 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3469 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3471 MonoJitICallInfo *info;
3473 /* Need to register the icall so it gets an icall wrapper */
3474 info = mono_get_array_new_va_icall (rank);
3476 cfg->flags |= MONO_CFG_HAS_VARARGS;
3478 /* mono_array_new_va () needs a vararg calling convention */
3479 cfg->disable_llvm = TRUE;
3481 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3482 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3486 mono_emit_load_got_addr (MonoCompile *cfg)
3488 MonoInst *getaddr, *dummy_use;
3490 if (!cfg->got_var || cfg->got_var_allocated)
3493 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3494 getaddr->dreg = cfg->got_var->dreg;
3496 /* Add it to the start of the first bblock */
3497 if (cfg->bb_entry->code) {
3498 getaddr->next = cfg->bb_entry->code;
3499 cfg->bb_entry->code = getaddr;
3502 MONO_ADD_INS (cfg->bb_entry, getaddr);
3504 cfg->got_var_allocated = TRUE;
3507 * Add a dummy use to keep the got_var alive, since real uses might
3508 * only be generated by the back ends.
3509 * Add it to end_bblock, so the variable's lifetime covers the whole
3511 * It would be better to make the usage of the got var explicit in all
3512 * cases when the backend needs it (i.e. calls, throw etc.), so this
3513 * wouldn't be needed.
3515 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3516 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3519 static int inline_limit;
3520 static gboolean inline_limit_inited;
3523 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3525 MonoMethodHeader *header;
3527 #ifdef MONO_ARCH_SOFT_FLOAT
3528 MonoMethodSignature *sig = mono_method_signature (method);
3532 if (cfg->generic_sharing_context)
3535 if (cfg->inline_depth > 10)
3538 #ifdef MONO_ARCH_HAVE_LMF_OPS
3539 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3540 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3541 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3545 if (method->is_inflated)
3546 /* Avoid inflating the header */
3547 header = mono_method_get_header (((MonoMethodInflated*)method)->declaring);
3549 header = mono_method_get_header (method);
3551 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME) ||
3552 (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3553 (method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3554 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3555 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) ||
3556 (method->klass->marshalbyref) ||
3557 !header || header->num_clauses)
3560 /* also consider num_locals? */
3561 /* Do the size check early to avoid creating vtables */
3562 if (!inline_limit_inited) {
3563 if (getenv ("MONO_INLINELIMIT"))
3564 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3566 inline_limit = INLINE_LENGTH_LIMIT;
3567 inline_limit_inited = TRUE;
3569 if (header->code_size >= inline_limit)
3573 * if we can initialize the class of the method right away, we do,
3574 * otherwise we don't allow inlining if the class needs initialization,
3575 * since it would mean inserting a call to mono_runtime_class_init()
3576 * inside the inlined code
3578 if (!(cfg->opt & MONO_OPT_SHARED)) {
3579 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3580 if (cfg->run_cctors && method->klass->has_cctor) {
3581 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3582 if (!method->klass->runtime_info)
3583 /* No vtable created yet */
3585 vtable = mono_class_vtable (cfg->domain, method->klass);
3588 /* This makes so that inline cannot trigger */
3589 /* .cctors: too many apps depend on them */
3590 /* running with a specific order... */
3591 if (! vtable->initialized)
3593 mono_runtime_class_init (vtable);
3595 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3596 if (!method->klass->runtime_info)
3597 /* No vtable created yet */
3599 vtable = mono_class_vtable (cfg->domain, method->klass);
3602 if (!vtable->initialized)
3607 * If we're compiling for shared code
3608 * the cctor will need to be run at aot method load time, for example,
3609 * or at the end of the compilation of the inlining method.
3611 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3616 * CAS - do not inline methods with declarative security
3617 * Note: this has to be before any possible return TRUE;
3619 if (mono_method_has_declsec (method))
3622 #ifdef MONO_ARCH_SOFT_FLOAT
3624 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3626 for (i = 0; i < sig->param_count; ++i)
3627 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3635 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3637 if (vtable->initialized && !cfg->compile_aot)
3640 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3643 if (!mono_class_needs_cctor_run (vtable->klass, method))
3646 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3647 /* The initialization is already done before the method is called */
3654 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index)
3658 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3660 mono_class_init (klass);
3661 size = mono_class_array_element_size (klass);
3663 mult_reg = alloc_preg (cfg);
3664 array_reg = arr->dreg;
3665 index_reg = index->dreg;
3667 #if SIZEOF_REGISTER == 8
3668 /* The array reg is 64 bits but the index reg is only 32 */
3669 index2_reg = alloc_preg (cfg);
3670 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3672 if (index->type == STACK_I8) {
3673 index2_reg = alloc_preg (cfg);
3674 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
3676 index2_reg = index_reg;
3680 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3682 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3683 if (size == 1 || size == 2 || size == 4 || size == 8) {
3684 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3686 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3687 ins->type = STACK_PTR;
3693 add_reg = alloc_preg (cfg);
3695 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3696 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3697 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3698 ins->type = STACK_PTR;
3699 MONO_ADD_INS (cfg->cbb, ins);
3704 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3706 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3708 int bounds_reg = alloc_preg (cfg);
3709 int add_reg = alloc_preg (cfg);
3710 int mult_reg = alloc_preg (cfg);
3711 int mult2_reg = alloc_preg (cfg);
3712 int low1_reg = alloc_preg (cfg);
3713 int low2_reg = alloc_preg (cfg);
3714 int high1_reg = alloc_preg (cfg);
3715 int high2_reg = alloc_preg (cfg);
3716 int realidx1_reg = alloc_preg (cfg);
3717 int realidx2_reg = alloc_preg (cfg);
3718 int sum_reg = alloc_preg (cfg);
3723 mono_class_init (klass);
3724 size = mono_class_array_element_size (klass);
3726 index1 = index_ins1->dreg;
3727 index2 = index_ins2->dreg;
3729 /* range checking */
3730 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3731 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3733 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3734 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3735 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3736 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3737 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3738 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3739 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3741 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3742 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3743 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3744 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3745 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3746 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3747 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3749 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3750 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3751 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3752 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3753 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3755 ins->type = STACK_MP;
3757 MONO_ADD_INS (cfg->cbb, ins);
3764 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3768 MonoMethod *addr_method;
3771 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3774 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1]);
3776 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3777 /* emit_ldelema_2 depends on OP_LMUL */
3778 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3779 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3783 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3784 addr_method = mono_marshal_get_array_address (rank, element_size);
3785 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
3791 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
3793 MonoInst *ins = NULL;
3795 static MonoClass *runtime_helpers_class = NULL;
3796 if (! runtime_helpers_class)
3797 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
3798 "System.Runtime.CompilerServices", "RuntimeHelpers");
3800 if (cmethod->klass == mono_defaults.string_class) {
3801 if (strcmp (cmethod->name, "get_Chars") == 0) {
3802 int dreg = alloc_ireg (cfg);
3803 int index_reg = alloc_preg (cfg);
3804 int mult_reg = alloc_preg (cfg);
3805 int add_reg = alloc_preg (cfg);
3807 #if SIZEOF_REGISTER == 8
3808 /* The array reg is 64 bits but the index reg is only 32 */
3809 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
3811 index_reg = args [1]->dreg;
3813 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
3815 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3816 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
3817 add_reg = ins->dreg;
3818 /* Avoid a warning */
3820 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3823 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
3824 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3825 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3826 add_reg, G_STRUCT_OFFSET (MonoString, chars));
3828 type_from_op (ins, NULL, NULL);
3830 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3831 int dreg = alloc_ireg (cfg);
3832 /* Decompose later to allow more optimizations */
3833 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
3834 ins->type = STACK_I4;
3835 cfg->cbb->has_array_access = TRUE;
3836 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
3839 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
3840 int mult_reg = alloc_preg (cfg);
3841 int add_reg = alloc_preg (cfg);
3843 /* The corlib functions check for oob already. */
3844 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
3845 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3846 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
3849 } else if (cmethod->klass == mono_defaults.object_class) {
3851 if (strcmp (cmethod->name, "GetType") == 0) {
3852 int dreg = alloc_preg (cfg);
3853 int vt_reg = alloc_preg (cfg);
3854 MONO_EMIT_NULL_CHECK (cfg, args [0]->dreg);
3855 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3856 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
3857 type_from_op (ins, NULL, NULL);
3860 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
3861 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0) {
3862 int dreg = alloc_ireg (cfg);
3863 int t1 = alloc_ireg (cfg);
3865 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
3866 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
3867 ins->type = STACK_I4;
3871 } else if (strcmp (cmethod->name, ".ctor") == 0) {
3872 MONO_INST_NEW (cfg, ins, OP_NOP);
3873 MONO_ADD_INS (cfg->cbb, ins);
3877 } else if (cmethod->klass == mono_defaults.array_class) {
3878 if (cmethod->name [0] != 'g')
3881 if (strcmp (cmethod->name, "get_Rank") == 0) {
3882 int dreg = alloc_ireg (cfg);
3883 int vtable_reg = alloc_preg (cfg);
3884 MONO_EMIT_NULL_CHECK (cfg, args [0]->dreg);
3885 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, vtable_reg,
3886 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3887 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
3888 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3889 type_from_op (ins, NULL, NULL);
3892 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3893 int dreg = alloc_ireg (cfg);
3895 MONO_EMIT_NULL_CHECK (cfg, args [0]->dreg);
3896 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
3897 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
3898 type_from_op (ins, NULL, NULL);
3903 } else if (cmethod->klass == runtime_helpers_class) {
3905 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
3906 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
3910 } else if (cmethod->klass == mono_defaults.thread_class) {
3911 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
3912 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
3913 MONO_ADD_INS (cfg->cbb, ins);
3915 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
3916 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
3917 MONO_ADD_INS (cfg->cbb, ins);
3920 } else if (cmethod->klass == mono_defaults.monitor_class) {
3921 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
3922 if (strcmp (cmethod->name, "Enter") == 0) {
3925 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
3926 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
3927 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
3928 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
3930 return (MonoInst*)call;
3931 } else if (strcmp (cmethod->name, "Exit") == 0) {
3934 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
3935 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
3936 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
3937 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
3939 return (MonoInst*)call;
3941 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
3942 MonoMethod *fast_method = NULL;
3944 /* Avoid infinite recursion */
3945 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
3946 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
3947 strcmp (cfg->method->name, "FastMonitorExit") == 0))
3950 if (strcmp (cmethod->name, "Enter") == 0 ||
3951 strcmp (cmethod->name, "Exit") == 0)
3952 fast_method = mono_monitor_get_fast_path (cmethod);
3956 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
3958 } else if (mini_class_is_system_array (cmethod->klass) &&
3959 strcmp (cmethod->name, "GetGenericValueImpl") == 0) {
3960 MonoInst *addr, *store, *load;
3961 MonoClass *eklass = mono_class_from_mono_type (fsig->params [1]);
3963 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1]);
3964 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
3965 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
3967 } else if (cmethod->klass->image == mono_defaults.corlib &&
3968 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
3969 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
3972 #if SIZEOF_REGISTER == 8
3973 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
3974 /* 64 bit reads are already atomic */
3975 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
3976 ins->dreg = mono_alloc_preg (cfg);
3977 ins->inst_basereg = args [0]->dreg;
3978 ins->inst_offset = 0;
3979 MONO_ADD_INS (cfg->cbb, ins);
3983 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
3984 if (strcmp (cmethod->name, "Increment") == 0) {
3985 MonoInst *ins_iconst;
3988 if (fsig->params [0]->type == MONO_TYPE_I4)
3989 opcode = OP_ATOMIC_ADD_NEW_I4;
3990 #if SIZEOF_REGISTER == 8
3991 else if (fsig->params [0]->type == MONO_TYPE_I8)
3992 opcode = OP_ATOMIC_ADD_NEW_I8;
3995 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3996 ins_iconst->inst_c0 = 1;
3997 ins_iconst->dreg = mono_alloc_ireg (cfg);
3998 MONO_ADD_INS (cfg->cbb, ins_iconst);
4000 MONO_INST_NEW (cfg, ins, opcode);
4001 ins->dreg = mono_alloc_ireg (cfg);
4002 ins->inst_basereg = args [0]->dreg;
4003 ins->inst_offset = 0;
4004 ins->sreg2 = ins_iconst->dreg;
4005 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4006 MONO_ADD_INS (cfg->cbb, ins);
4008 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4009 MonoInst *ins_iconst;
4012 if (fsig->params [0]->type == MONO_TYPE_I4)
4013 opcode = OP_ATOMIC_ADD_NEW_I4;
4014 #if SIZEOF_REGISTER == 8
4015 else if (fsig->params [0]->type == MONO_TYPE_I8)
4016 opcode = OP_ATOMIC_ADD_NEW_I8;
4019 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4020 ins_iconst->inst_c0 = -1;
4021 ins_iconst->dreg = mono_alloc_ireg (cfg);
4022 MONO_ADD_INS (cfg->cbb, ins_iconst);
4024 MONO_INST_NEW (cfg, ins, opcode);
4025 ins->dreg = mono_alloc_ireg (cfg);
4026 ins->inst_basereg = args [0]->dreg;
4027 ins->inst_offset = 0;
4028 ins->sreg2 = ins_iconst->dreg;
4029 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4030 MONO_ADD_INS (cfg->cbb, ins);
4032 } else if (strcmp (cmethod->name, "Add") == 0) {
4035 if (fsig->params [0]->type == MONO_TYPE_I4)
4036 opcode = OP_ATOMIC_ADD_NEW_I4;
4037 #if SIZEOF_REGISTER == 8
4038 else if (fsig->params [0]->type == MONO_TYPE_I8)
4039 opcode = OP_ATOMIC_ADD_NEW_I8;
4043 MONO_INST_NEW (cfg, ins, opcode);
4044 ins->dreg = mono_alloc_ireg (cfg);
4045 ins->inst_basereg = args [0]->dreg;
4046 ins->inst_offset = 0;
4047 ins->sreg2 = args [1]->dreg;
4048 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4049 MONO_ADD_INS (cfg->cbb, ins);
4052 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4054 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4055 if (strcmp (cmethod->name, "Exchange") == 0) {
4057 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4059 if (fsig->params [0]->type == MONO_TYPE_I4)
4060 opcode = OP_ATOMIC_EXCHANGE_I4;
4061 #if SIZEOF_REGISTER == 8
4062 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4063 (fsig->params [0]->type == MONO_TYPE_I))
4064 opcode = OP_ATOMIC_EXCHANGE_I8;
4066 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4067 opcode = OP_ATOMIC_EXCHANGE_I4;
4072 MONO_INST_NEW (cfg, ins, opcode);
4073 ins->dreg = mono_alloc_ireg (cfg);
4074 ins->inst_basereg = args [0]->dreg;
4075 ins->inst_offset = 0;
4076 ins->sreg2 = args [1]->dreg;
4077 MONO_ADD_INS (cfg->cbb, ins);
4079 switch (fsig->params [0]->type) {
4081 ins->type = STACK_I4;
4085 ins->type = STACK_I8;
4087 case MONO_TYPE_OBJECT:
4088 ins->type = STACK_OBJ;
4091 g_assert_not_reached ();
4094 #if HAVE_WRITE_BARRIERS
4096 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4097 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4101 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4103 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4104 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4106 gboolean is_ref = MONO_TYPE_IS_REFERENCE (fsig->params [1]);
4107 if (fsig->params [1]->type == MONO_TYPE_I4)
4109 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4110 size = sizeof (gpointer);
4111 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I4)
4114 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4115 ins->dreg = alloc_ireg (cfg);
4116 ins->sreg1 = args [0]->dreg;
4117 ins->sreg2 = args [1]->dreg;
4118 ins->sreg3 = args [2]->dreg;
4119 ins->type = STACK_I4;
4120 MONO_ADD_INS (cfg->cbb, ins);
4121 } else if (size == 8) {
4122 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4123 ins->dreg = alloc_ireg (cfg);
4124 ins->sreg1 = args [0]->dreg;
4125 ins->sreg2 = args [1]->dreg;
4126 ins->sreg3 = args [2]->dreg;
4127 ins->type = STACK_I8;
4128 MONO_ADD_INS (cfg->cbb, ins);
4130 /* g_assert_not_reached (); */
4132 #if HAVE_WRITE_BARRIERS
4134 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4135 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4139 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4143 } else if (cmethod->klass->image == mono_defaults.corlib) {
4144 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4145 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4146 MONO_INST_NEW (cfg, ins, OP_BREAK);
4147 MONO_ADD_INS (cfg->cbb, ins);
4150 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4151 && strcmp (cmethod->klass->name, "Environment") == 0) {
4153 EMIT_NEW_ICONST (cfg, ins, 1);
4155 EMIT_NEW_ICONST (cfg, ins, 0);
4159 } else if (cmethod->klass == mono_defaults.math_class) {
4161 * There is general branches code for Min/Max, but it does not work for
4163 * http://everything2.com/?node_id=1051618
4167 #ifdef MONO_ARCH_SIMD_INTRINSICS
4168 if (cfg->opt & MONO_OPT_SIMD) {
4169 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4175 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4179 * This entry point could be used later for arbitrary method
4182 inline static MonoInst*
4183 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4184 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4186 if (method->klass == mono_defaults.string_class) {
4187 /* managed string allocation support */
4188 if (strcmp (method->name, "InternalAllocateStr") == 0) {
4189 MonoInst *iargs [2];
4190 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4191 MonoMethod *managed_alloc = NULL;
4193 g_assert (vtable); /*Should not fail since it System.String*/
4194 #ifndef MONO_CROSS_COMPILE
4195 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4199 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4200 iargs [1] = args [0];
4201 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4208 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4210 MonoInst *store, *temp;
4213 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4214 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4217 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4218 * would be different than the MonoInst's used to represent arguments, and
4219 * the ldelema implementation can't deal with that.
4220 * Solution: When ldelema is used on an inline argument, create a var for
4221 * it, emit ldelema on that var, and emit the saving code below in
4222 * inline_method () if needed.
4224 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4225 cfg->args [i] = temp;
4226 /* This uses cfg->args [i] which is set by the preceeding line */
4227 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4228 store->cil_code = sp [0]->cil_code;
4233 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4234 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4236 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4238 check_inline_called_method_name_limit (MonoMethod *called_method)
4241 static char *limit = NULL;
4243 if (limit == NULL) {
4244 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4246 if (limit_string != NULL)
4247 limit = limit_string;
4249 limit = (char *) "";
4252 if (limit [0] != '\0') {
4253 char *called_method_name = mono_method_full_name (called_method, TRUE);
4255 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4256 g_free (called_method_name);
4258 //return (strncmp_result <= 0);
4259 return (strncmp_result == 0);
4266 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4268 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4271 static char *limit = NULL;
4273 if (limit == NULL) {
4274 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4275 if (limit_string != NULL) {
4276 limit = limit_string;
4278 limit = (char *) "";
4282 if (limit [0] != '\0') {
4283 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4285 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4286 g_free (caller_method_name);
4288 //return (strncmp_result <= 0);
4289 return (strncmp_result == 0);
4297 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4298 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4300 MonoInst *ins, *rvar = NULL;
4301 MonoMethodHeader *cheader;
4302 MonoBasicBlock *ebblock, *sbblock;
4304 MonoMethod *prev_inlined_method;
4305 MonoInst **prev_locals, **prev_args;
4306 MonoType **prev_arg_types;
4307 guint prev_real_offset;
4308 GHashTable *prev_cbb_hash;
4309 MonoBasicBlock **prev_cil_offset_to_bb;
4310 MonoBasicBlock *prev_cbb;
4311 unsigned char* prev_cil_start;
4312 guint32 prev_cil_offset_to_bb_len;
4313 MonoMethod *prev_current_method;
4314 MonoGenericContext *prev_generic_context;
4315 gboolean ret_var_set, prev_ret_var_set;
4317 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4319 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4320 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4323 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4324 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4328 if (cfg->verbose_level > 2)
4329 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4331 if (!cmethod->inline_info) {
4332 mono_jit_stats.inlineable_methods++;
4333 cmethod->inline_info = 1;
4335 /* allocate space to store the return value */
4336 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4337 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4340 /* allocate local variables */
4341 cheader = mono_method_get_header (cmethod);
4342 prev_locals = cfg->locals;
4343 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4344 for (i = 0; i < cheader->num_locals; ++i)
4345 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4347 /* allocate start and end blocks */
4348 /* This is needed so if the inline is aborted, we can clean up */
4349 NEW_BBLOCK (cfg, sbblock);
4350 sbblock->real_offset = real_offset;
4352 NEW_BBLOCK (cfg, ebblock);
4353 ebblock->block_num = cfg->num_bblocks++;
4354 ebblock->real_offset = real_offset;
4356 prev_args = cfg->args;
4357 prev_arg_types = cfg->arg_types;
4358 prev_inlined_method = cfg->inlined_method;
4359 cfg->inlined_method = cmethod;
4360 cfg->ret_var_set = FALSE;
4361 cfg->inline_depth ++;
4362 prev_real_offset = cfg->real_offset;
4363 prev_cbb_hash = cfg->cbb_hash;
4364 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4365 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4366 prev_cil_start = cfg->cil_start;
4367 prev_cbb = cfg->cbb;
4368 prev_current_method = cfg->current_method;
4369 prev_generic_context = cfg->generic_context;
4370 prev_ret_var_set = cfg->ret_var_set;
4372 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4374 ret_var_set = cfg->ret_var_set;
4376 cfg->inlined_method = prev_inlined_method;
4377 cfg->real_offset = prev_real_offset;
4378 cfg->cbb_hash = prev_cbb_hash;
4379 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4380 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4381 cfg->cil_start = prev_cil_start;
4382 cfg->locals = prev_locals;
4383 cfg->args = prev_args;
4384 cfg->arg_types = prev_arg_types;
4385 cfg->current_method = prev_current_method;
4386 cfg->generic_context = prev_generic_context;
4387 cfg->ret_var_set = prev_ret_var_set;
4388 cfg->inline_depth --;
4390 if ((costs >= 0 && costs < 60) || inline_allways) {
4391 if (cfg->verbose_level > 2)
4392 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4394 mono_jit_stats.inlined_methods++;
4396 /* always add some code to avoid block split failures */
4397 MONO_INST_NEW (cfg, ins, OP_NOP);
4398 MONO_ADD_INS (prev_cbb, ins);
4400 prev_cbb->next_bb = sbblock;
4401 link_bblock (cfg, prev_cbb, sbblock);
4404 * Get rid of the begin and end bblocks if possible to aid local
4407 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4409 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4410 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4412 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4413 MonoBasicBlock *prev = ebblock->in_bb [0];
4414 mono_merge_basic_blocks (cfg, prev, ebblock);
4416 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4417 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4418 cfg->cbb = prev_cbb;
4426 * If the inlined method contains only a throw, then the ret var is not
4427 * set, so set it to a dummy value.
4430 static double r8_0 = 0.0;
4432 switch (rvar->type) {
4434 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4437 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4442 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4445 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4446 ins->type = STACK_R8;
4447 ins->inst_p0 = (void*)&r8_0;
4448 ins->dreg = rvar->dreg;
4449 MONO_ADD_INS (cfg->cbb, ins);
4452 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4455 g_assert_not_reached ();
4459 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4464 if (cfg->verbose_level > 2)
4465 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4466 cfg->exception_type = MONO_EXCEPTION_NONE;
4467 mono_loader_clear_error ();
4469 /* This gets rid of the newly added bblocks */
4470 cfg->cbb = prev_cbb;
4476 * Some of these comments may well be out-of-date.
4477 * Design decisions: we do a single pass over the IL code (and we do bblock
4478 * splitting/merging in the few cases when it's required: a back jump to an IL
4479 * address that was not already seen as bblock starting point).
4480 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4481 * Complex operations are decomposed in simpler ones right away. We need to let the
4482 * arch-specific code peek and poke inside this process somehow (except when the
4483 * optimizations can take advantage of the full semantic info of coarse opcodes).
4484 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4485 * MonoInst->opcode initially is the IL opcode or some simplification of that
4486 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4487 * opcode with value bigger than OP_LAST.
4488 * At this point the IR can be handed over to an interpreter, a dumb code generator
4489 * or to the optimizing code generator that will translate it to SSA form.
4491 * Profiling directed optimizations.
4492 * We may compile by default with few or no optimizations and instrument the code
4493 * or the user may indicate what methods to optimize the most either in a config file
4494 * or through repeated runs where the compiler applies offline the optimizations to
4495 * each method and then decides if it was worth it.
4498 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4499 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4500 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4501 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4502 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4503 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4504 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4505 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4507 /* offset from br.s -> br like opcodes */
4508 #define BIG_BRANCH_OFFSET 13
4511 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4513 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4515 return b == NULL || b == bb;
4519 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4521 unsigned char *ip = start;
4522 unsigned char *target;
4525 MonoBasicBlock *bblock;
4526 const MonoOpcode *opcode;
4529 cli_addr = ip - start;
4530 i = mono_opcode_value ((const guint8 **)&ip, end);
4533 opcode = &mono_opcodes [i];
4534 switch (opcode->argument) {
4535 case MonoInlineNone:
4538 case MonoInlineString:
4539 case MonoInlineType:
4540 case MonoInlineField:
4541 case MonoInlineMethod:
4544 case MonoShortInlineR:
4551 case MonoShortInlineVar:
4552 case MonoShortInlineI:
4555 case MonoShortInlineBrTarget:
4556 target = start + cli_addr + 2 + (signed char)ip [1];
4557 GET_BBLOCK (cfg, bblock, target);
4560 GET_BBLOCK (cfg, bblock, ip);
4562 case MonoInlineBrTarget:
4563 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4564 GET_BBLOCK (cfg, bblock, target);
4567 GET_BBLOCK (cfg, bblock, ip);
4569 case MonoInlineSwitch: {
4570 guint32 n = read32 (ip + 1);
4573 cli_addr += 5 + 4 * n;
4574 target = start + cli_addr;
4575 GET_BBLOCK (cfg, bblock, target);
4577 for (j = 0; j < n; ++j) {
4578 target = start + cli_addr + (gint32)read32 (ip);
4579 GET_BBLOCK (cfg, bblock, target);
4589 g_assert_not_reached ();
4592 if (i == CEE_THROW) {
4593 unsigned char *bb_start = ip - 1;
4595 /* Find the start of the bblock containing the throw */
4597 while ((bb_start >= start) && !bblock) {
4598 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4602 bblock->out_of_line = 1;
4611 static inline MonoMethod *
4612 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4616 if (m->wrapper_type != MONO_WRAPPER_NONE)
4617 return mono_method_get_wrapper_data (m, token);
4619 method = mono_get_method_full (m->klass->image, token, klass, context);
4624 static inline MonoMethod *
4625 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4627 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4629 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4635 static inline MonoClass*
4636 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4640 if (method->wrapper_type != MONO_WRAPPER_NONE)
4641 klass = mono_method_get_wrapper_data (method, token);
4643 klass = mono_class_get_full (method->klass->image, token, context);
4645 mono_class_init (klass);
4650 * Returns TRUE if the JIT should abort inlining because "callee"
4651 * is influenced by security attributes.
4654 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4658 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
4662 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
4663 if (result == MONO_JIT_SECURITY_OK)
4666 if (result == MONO_JIT_LINKDEMAND_ECMA) {
4667 /* Generate code to throw a SecurityException before the actual call/link */
4668 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4671 NEW_ICONST (cfg, args [0], 4);
4672 NEW_METHODCONST (cfg, args [1], caller);
4673 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
4674 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
4675 /* don't hide previous results */
4676 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
4677 cfg->exception_data = result;
4685 throw_exception (void)
4687 static MonoMethod *method = NULL;
4690 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4691 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
4698 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
4700 MonoMethod *thrower = throw_exception ();
4703 EMIT_NEW_PCONST (cfg, args [0], ex);
4704 mono_emit_method_call (cfg, thrower, args, NULL);
4708 * Return the original method is a wrapper is specified. We can only access
4709 * the custom attributes from the original method.
4712 get_original_method (MonoMethod *method)
4714 if (method->wrapper_type == MONO_WRAPPER_NONE)
4717 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
4718 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
4721 /* in other cases we need to find the original method */
4722 return mono_marshal_method_from_wrapper (method);
4726 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
4727 MonoBasicBlock *bblock, unsigned char *ip)
4729 /* there's no restriction to access Transparent or SafeCritical fields, so we only check calls to Critical methods */
4730 if (mono_security_core_clr_class_level (mono_field_get_parent (field)) != MONO_SECURITY_CORE_CLR_CRITICAL)
4733 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4734 caller = get_original_method (caller);
4738 /* caller is Critical! only SafeCritical and Critical callers can access the field, so we throw if caller is Transparent */
4739 if (mono_security_core_clr_method_level (caller, TRUE) == MONO_SECURITY_CORE_CLR_TRANSPARENT)
4740 emit_throw_exception (cfg, mono_get_exception_field_access ());
4744 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4745 MonoBasicBlock *bblock, unsigned char *ip)
4747 /* there's no restriction to call Transparent or SafeCritical code, so we only check calls to Critical methods */
4748 if (mono_security_core_clr_method_level (callee, TRUE) != MONO_SECURITY_CORE_CLR_CRITICAL)
4751 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4752 caller = get_original_method (caller);
4756 /* caller is Critical! only SafeCritical and Critical callers can call it, so we throw if the caller is Transparent */
4757 if (mono_security_core_clr_method_level (caller, TRUE) == MONO_SECURITY_CORE_CLR_TRANSPARENT)
4758 emit_throw_exception (cfg, mono_get_exception_method_access ());
4762 * Check that the IL instructions at ip are the array initialization
4763 * sequence and return the pointer to the data and the size.
4766 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
4769 * newarr[System.Int32]
4771 * ldtoken field valuetype ...
4772 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
4774 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
4775 guint32 token = read32 (ip + 7);
4776 guint32 field_token = read32 (ip + 2);
4777 guint32 field_index = field_token & 0xffffff;
4779 const char *data_ptr;
4781 MonoMethod *cmethod;
4782 MonoClass *dummy_class;
4783 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
4789 *out_field_token = field_token;
4791 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
4794 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
4796 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
4797 case MONO_TYPE_BOOLEAN:
4801 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
4802 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
4803 case MONO_TYPE_CHAR:
4813 return NULL; /* stupid ARM FP swapped format */
4823 if (size > mono_type_size (field->type, &dummy_align))
4826 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
4827 if (!method->klass->image->dynamic) {
4828 field_index = read32 (ip + 2) & 0xffffff;
4829 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
4830 data_ptr = mono_image_rva_map (method->klass->image, rva);
4831 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
4832 /* for aot code we do the lookup on load */
4833 if (aot && data_ptr)
4834 return GUINT_TO_POINTER (rva);
4836 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
4838 data_ptr = mono_field_get_data (field);
4846 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
4848 char *method_fname = mono_method_full_name (method, TRUE);
4851 if (mono_method_get_header (method)->code_size == 0)
4852 method_code = g_strdup ("method body is empty.");
4854 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
4855 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
4856 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
4857 g_free (method_fname);
4858 g_free (method_code);
4862 set_exception_object (MonoCompile *cfg, MonoException *exception)
4864 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
4865 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
4866 cfg->exception_ptr = exception;
4870 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4874 if (cfg->generic_sharing_context)
4875 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
4877 type = &klass->byval_arg;
4878 return MONO_TYPE_IS_REFERENCE (type);
4882 * mono_decompose_array_access_opts:
4884 * Decompose array access opcodes.
4885 * This should be in decompose.c, but it emits calls so it has to stay here until
4886 * the old JIT is gone.
4889 mono_decompose_array_access_opts (MonoCompile *cfg)
4891 MonoBasicBlock *bb, *first_bb;
4894 * Unlike decompose_long_opts, this pass does not alter the CFG of the method so it
4895 * can be executed anytime. It should be run before decompose_long
4899 * Create a dummy bblock and emit code into it so we can use the normal
4900 * code generation macros.
4902 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4903 first_bb = cfg->cbb;
4905 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4907 MonoInst *prev = NULL;
4909 MonoInst *iargs [3];
4912 if (!bb->has_array_access)
4915 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE DECOMPOSE-ARRAY-ACCESS-OPTS ");
4917 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4923 for (ins = bb->code; ins; ins = ins->next) {
4924 switch (ins->opcode) {
4926 MONO_EMIT_NULL_CHECK (cfg, ins->sreg1);
4927 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg, ins->sreg1,
4928 G_STRUCT_OFFSET (MonoArray, max_length));
4929 MONO_ADD_INS (cfg->cbb, dest);
4931 case OP_BOUNDS_CHECK:
4932 MONO_EMIT_NULL_CHECK (cfg, ins->sreg1); \
4933 MONO_ARCH_EMIT_BOUNDS_CHECK (cfg, ins->sreg1, ins->inst_imm, ins->sreg2);
4936 if (cfg->opt & MONO_OPT_SHARED) {
4937 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4938 EMIT_NEW_CLASSCONST (cfg, iargs [1], ins->inst_newa_class);
4939 MONO_INST_NEW (cfg, iargs [2], OP_MOVE);
4940 iargs [2]->dreg = ins->sreg1;
4942 dest = mono_emit_jit_icall (cfg, mono_array_new, iargs);
4943 dest->dreg = ins->dreg;
4945 MonoVTable *vtable = mono_class_vtable (cfg->domain, mono_array_class_get (ins->inst_newa_class, 1));
4946 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (vtable, 1);
4948 g_assert (vtable); /*This shall not fail since we check for this condition on OP_NEWARR creation*/
4949 NEW_VTABLECONST (cfg, iargs [0], vtable);
4950 MONO_ADD_INS (cfg->cbb, iargs [0]);
4951 MONO_INST_NEW (cfg, iargs [1], OP_MOVE);
4952 iargs [1]->dreg = ins->sreg1;
4955 dest = mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4957 dest = mono_emit_jit_icall (cfg, mono_array_new_specific, iargs);
4958 dest->dreg = ins->dreg;
4962 MONO_EMIT_NULL_CHECK (cfg, ins->sreg1);
4963 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg,
4964 ins->sreg1, G_STRUCT_OFFSET (MonoString, length));
4965 MONO_ADD_INS (cfg->cbb, dest);
4971 g_assert (cfg->cbb == first_bb);
4973 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
4974 /* Replace the original instruction with the new code sequence */
4976 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
4977 first_bb->code = first_bb->last_ins = NULL;
4978 first_bb->in_count = first_bb->out_count = 0;
4979 cfg->cbb = first_bb;
4986 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER DECOMPOSE-ARRAY-ACCESS-OPTS ");
4996 #ifdef MONO_ARCH_SOFT_FLOAT
4999 * mono_decompose_soft_float:
5001 * Soft float support on ARM. We store each double value in a pair of integer vregs,
5002 * similar to long support on 32 bit platforms. 32 bit float values require special
5003 * handling when used as locals, arguments, and in calls.
5004 * One big problem with soft-float is that there are few r4 test cases in our test suite.
5007 mono_decompose_soft_float (MonoCompile *cfg)
5009 MonoBasicBlock *bb, *first_bb;
5012 * This pass creates long opcodes, so it should be run before decompose_long_opts ().
5016 * Create a dummy bblock and emit code into it so we can use the normal
5017 * code generation macros.
5019 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
5020 first_bb = cfg->cbb;
5022 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
5024 MonoInst *prev = NULL;
5027 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE HANDLE-SOFT-FLOAT ");
5029 cfg->cbb->code = cfg->cbb->last_ins = NULL;
5035 for (ins = bb->code; ins; ins = ins->next) {
5036 const char *spec = INS_INFO (ins->opcode);
5038 /* Most fp operations are handled automatically by opcode emulation */
5040 switch (ins->opcode) {
5043 d.vald = *(double*)ins->inst_p0;
5044 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
5049 /* We load the r8 value */
5050 d.vald = *(float*)ins->inst_p0;
5051 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
5055 ins->opcode = OP_LMOVE;
5058 ins->opcode = OP_MOVE;
5059 ins->sreg1 = ins->sreg1 + 1;
5062 ins->opcode = OP_MOVE;
5063 ins->sreg1 = ins->sreg1 + 2;
5066 int reg = ins->sreg1;
5068 ins->opcode = OP_SETLRET;
5070 ins->sreg1 = reg + 1;
5071 ins->sreg2 = reg + 2;
5074 case OP_LOADR8_MEMBASE:
5075 ins->opcode = OP_LOADI8_MEMBASE;
5077 case OP_STORER8_MEMBASE_REG:
5078 ins->opcode = OP_STOREI8_MEMBASE_REG;
5080 case OP_STORER4_MEMBASE_REG: {
5081 MonoInst *iargs [2];
5084 /* Arg 1 is the double value */
5085 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
5086 iargs [0]->dreg = ins->sreg1;
5088 /* Arg 2 is the address to store to */
5089 addr_reg = mono_alloc_preg (cfg);
5090 EMIT_NEW_BIALU_IMM (cfg, iargs [1], OP_PADD_IMM, addr_reg, ins->inst_destbasereg, ins->inst_offset);
5091 mono_emit_jit_icall (cfg, mono_fstore_r4, iargs);
5095 case OP_LOADR4_MEMBASE: {
5096 MonoInst *iargs [1];
5100 addr_reg = mono_alloc_preg (cfg);
5101 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, addr_reg, ins->inst_basereg, ins->inst_offset);
5102 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
5103 conv->dreg = ins->dreg;
5108 case OP_FCALL_MEMBASE: {
5109 MonoCallInst *call = (MonoCallInst*)ins;
5110 if (call->signature->ret->type == MONO_TYPE_R4) {
5111 MonoCallInst *call2;
5112 MonoInst *iargs [1];
5115 /* Convert the call into a call returning an int */
5116 MONO_INST_NEW_CALL (cfg, call2, OP_CALL);
5117 memcpy (call2, call, sizeof (MonoCallInst));
5118 switch (ins->opcode) {
5120 call2->inst.opcode = OP_CALL;
5123 call2->inst.opcode = OP_CALL_REG;
5125 case OP_FCALL_MEMBASE:
5126 call2->inst.opcode = OP_CALL_MEMBASE;
5129 g_assert_not_reached ();
5131 call2->inst.dreg = mono_alloc_ireg (cfg);
5132 MONO_ADD_INS (cfg->cbb, (MonoInst*)call2);
5134 /* FIXME: Optimize this */
5136 /* Emit an r4->r8 conversion */
5137 EMIT_NEW_VARLOADA_VREG (cfg, iargs [0], call2->inst.dreg, &mono_defaults.int32_class->byval_arg);
5138 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
5139 conv->dreg = ins->dreg;
5141 /* The call sequence might include fp ins */
5144 switch (ins->opcode) {
5146 ins->opcode = OP_LCALL;
5149 ins->opcode = OP_LCALL_REG;
5151 case OP_FCALL_MEMBASE:
5152 ins->opcode = OP_LCALL_MEMBASE;
5155 g_assert_not_reached ();
5161 MonoJitICallInfo *info;
5162 MonoInst *iargs [2];
5163 MonoInst *call, *cmp, *br;
5165 /* Convert fcompare+fbcc to icall+icompare+beq */
5167 info = mono_find_jit_opcode_emulation (ins->next->opcode);
5170 /* Create dummy MonoInst's for the arguments */
5171 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
5172 iargs [0]->dreg = ins->sreg1;
5173 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
5174 iargs [1]->dreg = ins->sreg2;
5176 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
5178 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE_IMM);
5179 cmp->sreg1 = call->dreg;
5181 MONO_ADD_INS (cfg->cbb, cmp);
5183 MONO_INST_NEW (cfg, br, OP_IBNE_UN);
5184 br->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * 2);
5185 br->inst_true_bb = ins->next->inst_true_bb;
5186 br->inst_false_bb = ins->next->inst_false_bb;
5187 MONO_ADD_INS (cfg->cbb, br);
5189 /* The call sequence might include fp ins */
5192 /* Skip fbcc or fccc */
5193 NULLIFY_INS (ins->next);
5201 MonoJitICallInfo *info;
5202 MonoInst *iargs [2];
5205 /* Convert fccc to icall+icompare+iceq */
5207 info = mono_find_jit_opcode_emulation (ins->opcode);
5210 /* Create dummy MonoInst's for the arguments */
5211 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
5212 iargs [0]->dreg = ins->sreg1;
5213 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
5214 iargs [1]->dreg = ins->sreg2;
5216 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
5218 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, call->dreg, 1);
5219 MONO_EMIT_NEW_UNALU (cfg, OP_ICEQ, ins->dreg, -1);
5221 /* The call sequence might include fp ins */
5226 MonoInst *iargs [2];
5227 MonoInst *call, *cmp;
5229 /* Convert to icall+icompare+cond_exc+move */
5231 /* Create dummy MonoInst's for the arguments */
5232 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
5233 iargs [0]->dreg = ins->sreg1;
5235 call = mono_emit_jit_icall (cfg, mono_isfinite, iargs);
5237 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE_IMM);
5238 cmp->sreg1 = call->dreg;
5240 MONO_ADD_INS (cfg->cbb, cmp);
5242 MONO_EMIT_NEW_COND_EXC (cfg, INE_UN, "ArithmeticException");
5244 /* Do the assignment if the value is finite */
5245 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, ins->dreg, ins->sreg1);
5251 if (spec [MONO_INST_SRC1] == 'f' || spec [MONO_INST_SRC2] == 'f' || spec [MONO_INST_DEST] == 'f') {
5252 mono_print_ins (ins);
5253 g_assert_not_reached ();
5258 g_assert (cfg->cbb == first_bb);
5260 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
5261 /* Replace the original instruction with the new code sequence */
5263 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
5264 first_bb->code = first_bb->last_ins = NULL;
5265 first_bb->in_count = first_bb->out_count = 0;
5266 cfg->cbb = first_bb;
5273 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER HANDLE-SOFT-FLOAT ");
5276 mono_decompose_long_opts (cfg);
5282 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5285 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5286 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5287 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5288 /* Optimize reg-reg moves away */
5290 * Can't optimize other opcodes, since sp[0] might point to
5291 * the last ins of a decomposed opcode.
5293 sp [0]->dreg = (cfg)->locals [n]->dreg;
5295 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5300 * ldloca inhibits many optimizations so try to get rid of it in common
5303 static inline unsigned char *
5304 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5313 local = read16 (ip + 2);
5317 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5318 gboolean skip = FALSE;
5320 /* From the INITOBJ case */
5321 token = read32 (ip + 2);
5322 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5323 CHECK_TYPELOAD (klass);
5324 if (generic_class_is_reference_type (cfg, klass)) {
5325 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5326 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5327 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5328 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5329 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5342 is_exception_class (MonoClass *class)
5345 if (class == mono_defaults.exception_class)
5347 class = class->parent;
5353 * mono_method_to_ir:
5355 * Translate the .net IL into linear IR.
5358 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5359 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5360 guint inline_offset, gboolean is_virtual_call)
5362 MonoInst *ins, **sp, **stack_start;
5363 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5364 MonoMethod *cmethod, *method_definition;
5365 MonoInst **arg_array;
5366 MonoMethodHeader *header;
5368 guint32 token, ins_flag;
5370 MonoClass *constrained_call = NULL;
5371 unsigned char *ip, *end, *target, *err_pos;
5372 static double r8_0 = 0.0;
5373 MonoMethodSignature *sig;
5374 MonoGenericContext *generic_context = NULL;
5375 MonoGenericContainer *generic_container = NULL;
5376 MonoType **param_types;
5377 int i, n, start_new_bblock, dreg;
5378 int num_calls = 0, inline_costs = 0;
5379 int breakpoint_id = 0;
5381 MonoBoolean security, pinvoke;
5382 MonoSecurityManager* secman = NULL;
5383 MonoDeclSecurityActions actions;
5384 GSList *class_inits = NULL;
5385 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5387 gboolean init_locals, seq_points;
5389 /* serialization and xdomain stuff may need access to private fields and methods */
5390 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5391 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5392 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5393 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5394 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5395 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5397 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5399 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5400 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5401 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5402 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5404 image = method->klass->image;
5405 header = mono_method_get_header (method);
5406 generic_container = mono_method_get_generic_container (method);
5407 sig = mono_method_signature (method);
5408 num_args = sig->hasthis + sig->param_count;
5409 ip = (unsigned char*)header->code;
5410 cfg->cil_start = ip;
5411 end = ip + header->code_size;
5412 mono_jit_stats.cil_code_size += header->code_size;
5413 init_locals = header->init_locals;
5415 seq_points = cfg->gen_seq_points && cfg->method == method;
5418 * Methods without init_locals set could cause asserts in various passes
5423 method_definition = method;
5424 while (method_definition->is_inflated) {
5425 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5426 method_definition = imethod->declaring;
5429 /* SkipVerification is not allowed if core-clr is enabled */
5430 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5432 dont_verify_stloc = TRUE;
5435 if (!dont_verify && mini_method_verify (cfg, method_definition))
5436 goto exception_exit;
5438 if (mono_debug_using_mono_debugger ())
5439 cfg->keep_cil_nops = TRUE;
5441 if (sig->is_inflated)
5442 generic_context = mono_method_get_context (method);
5443 else if (generic_container)
5444 generic_context = &generic_container->context;
5445 cfg->generic_context = generic_context;
5447 if (!cfg->generic_sharing_context)
5448 g_assert (!sig->has_type_parameters);
5450 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5451 g_assert (method->is_inflated);
5452 g_assert (mono_method_get_context (method)->method_inst);
5454 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5455 g_assert (sig->generic_param_count);
5457 if (cfg->method == method) {
5458 cfg->real_offset = 0;
5460 cfg->real_offset = inline_offset;
5463 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5464 cfg->cil_offset_to_bb_len = header->code_size;
5466 cfg->current_method = method;
5468 if (cfg->verbose_level > 2)
5469 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5471 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5473 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5474 for (n = 0; n < sig->param_count; ++n)
5475 param_types [n + sig->hasthis] = sig->params [n];
5476 cfg->arg_types = param_types;
5478 dont_inline = g_list_prepend (dont_inline, method);
5479 if (cfg->method == method) {
5481 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5482 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5485 NEW_BBLOCK (cfg, start_bblock);
5486 cfg->bb_entry = start_bblock;
5487 start_bblock->cil_code = NULL;
5488 start_bblock->cil_length = 0;
5491 NEW_BBLOCK (cfg, end_bblock);
5492 cfg->bb_exit = end_bblock;
5493 end_bblock->cil_code = NULL;
5494 end_bblock->cil_length = 0;
5495 g_assert (cfg->num_bblocks == 2);
5497 arg_array = cfg->args;
5499 if (header->num_clauses) {
5500 cfg->spvars = g_hash_table_new (NULL, NULL);
5501 cfg->exvars = g_hash_table_new (NULL, NULL);
5503 /* handle exception clauses */
5504 for (i = 0; i < header->num_clauses; ++i) {
5505 MonoBasicBlock *try_bb;
5506 MonoExceptionClause *clause = &header->clauses [i];
5507 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5508 try_bb->real_offset = clause->try_offset;
5509 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5510 tblock->real_offset = clause->handler_offset;
5511 tblock->flags |= BB_EXCEPTION_HANDLER;
5513 link_bblock (cfg, try_bb, tblock);
5515 if (*(ip + clause->handler_offset) == CEE_POP)
5516 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5518 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5519 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5520 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5521 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5522 MONO_ADD_INS (tblock, ins);
5524 /* todo: is a fault block unsafe to optimize? */
5525 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5526 tblock->flags |= BB_EXCEPTION_UNSAFE;
5530 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5532 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5534 /* catch and filter blocks get the exception object on the stack */
5535 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5536 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5537 MonoInst *dummy_use;
5539 /* mostly like handle_stack_args (), but just sets the input args */
5540 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5541 tblock->in_scount = 1;
5542 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5543 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5546 * Add a dummy use for the exvar so its liveness info will be
5550 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5552 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5553 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5554 tblock->flags |= BB_EXCEPTION_HANDLER;
5555 tblock->real_offset = clause->data.filter_offset;
5556 tblock->in_scount = 1;
5557 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5558 /* The filter block shares the exvar with the handler block */
5559 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5560 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5561 MONO_ADD_INS (tblock, ins);
5565 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5566 clause->data.catch_class &&
5567 cfg->generic_sharing_context &&
5568 mono_class_check_context_used (clause->data.catch_class)) {
5570 * In shared generic code with catch
5571 * clauses containing type variables
5572 * the exception handling code has to
5573 * be able to get to the rgctx.
5574 * Therefore we have to make sure that
5575 * the vtable/mrgctx argument (for
5576 * static or generic methods) or the
5577 * "this" argument (for non-static
5578 * methods) are live.
5580 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5581 mini_method_get_context (method)->method_inst ||
5582 method->klass->valuetype) {
5583 mono_get_vtable_var (cfg);
5585 MonoInst *dummy_use;
5587 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5592 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5593 cfg->cbb = start_bblock;
5594 cfg->args = arg_array;
5595 mono_save_args (cfg, sig, inline_args);
5598 /* FIRST CODE BLOCK */
5599 NEW_BBLOCK (cfg, bblock);
5600 bblock->cil_code = ip;
5604 ADD_BBLOCK (cfg, bblock);
5606 if (cfg->method == method) {
5607 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5608 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5609 MONO_INST_NEW (cfg, ins, OP_BREAK);
5610 MONO_ADD_INS (bblock, ins);
5614 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5615 secman = mono_security_manager_get_methods ();
5617 security = (secman && mono_method_has_declsec (method));
5618 /* at this point having security doesn't mean we have any code to generate */
5619 if (security && (cfg->method == method)) {
5620 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5621 * And we do not want to enter the next section (with allocation) if we
5622 * have nothing to generate */
5623 security = mono_declsec_get_demands (method, &actions);
5626 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5627 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5629 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5630 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5631 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5633 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5634 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5638 mono_custom_attrs_free (custom);
5641 custom = mono_custom_attrs_from_class (wrapped->klass);
5642 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5646 mono_custom_attrs_free (custom);
5649 /* not a P/Invoke after all */
5654 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5655 /* we use a separate basic block for the initialization code */
5656 NEW_BBLOCK (cfg, init_localsbb);
5657 cfg->bb_init = init_localsbb;
5658 init_localsbb->real_offset = cfg->real_offset;
5659 start_bblock->next_bb = init_localsbb;
5660 init_localsbb->next_bb = bblock;
5661 link_bblock (cfg, start_bblock, init_localsbb);
5662 link_bblock (cfg, init_localsbb, bblock);
5664 cfg->cbb = init_localsbb;
5666 start_bblock->next_bb = bblock;
5667 link_bblock (cfg, start_bblock, bblock);
5670 /* at this point we know, if security is TRUE, that some code needs to be generated */
5671 if (security && (cfg->method == method)) {
5674 mono_jit_stats.cas_demand_generation++;
5676 if (actions.demand.blob) {
5677 /* Add code for SecurityAction.Demand */
5678 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5679 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5680 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5681 mono_emit_method_call (cfg, secman->demand, args, NULL);
5683 if (actions.noncasdemand.blob) {
5684 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5685 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5686 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5687 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5688 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5689 mono_emit_method_call (cfg, secman->demand, args, NULL);
5691 if (actions.demandchoice.blob) {
5692 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5693 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5694 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5695 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5696 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5700 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5702 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5705 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5706 /* check if this is native code, e.g. an icall or a p/invoke */
5707 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5708 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5710 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
5711 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
5713 /* if this ia a native call then it can only be JITted from platform code */
5714 if ((icall || pinvk) && method->klass && method->klass->image) {
5715 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
5716 MonoException *ex = icall ? mono_get_exception_security () :
5717 mono_get_exception_method_access ();
5718 emit_throw_exception (cfg, ex);
5725 if (header->code_size == 0)
5728 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5733 if (cfg->method == method)
5734 mono_debug_init_method (cfg, bblock, breakpoint_id);
5736 for (n = 0; n < header->num_locals; ++n) {
5737 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5742 /* We force the vtable variable here for all shared methods
5743 for the possibility that they might show up in a stack
5744 trace where their exact instantiation is needed. */
5745 if (cfg->generic_sharing_context && method == cfg->method) {
5746 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5747 mini_method_get_context (method)->method_inst ||
5748 method->klass->valuetype) {
5749 mono_get_vtable_var (cfg);
5751 /* FIXME: Is there a better way to do this?
5752 We need the variable live for the duration
5753 of the whole method. */
5754 cfg->args [0]->flags |= MONO_INST_INDIRECT;
5758 /* add a check for this != NULL to inlined methods */
5759 if (is_virtual_call) {
5762 NEW_ARGLOAD (cfg, arg_ins, 0);
5763 MONO_ADD_INS (cfg->cbb, arg_ins);
5764 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
5765 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, arg_ins->dreg);
5766 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, arg_ins->dreg);
5769 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5770 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5773 start_new_bblock = 0;
5777 if (cfg->method == method)
5778 cfg->real_offset = ip - header->code;
5780 cfg->real_offset = inline_offset;
5785 if (start_new_bblock) {
5786 bblock->cil_length = ip - bblock->cil_code;
5787 if (start_new_bblock == 2) {
5788 g_assert (ip == tblock->cil_code);
5790 GET_BBLOCK (cfg, tblock, ip);
5792 bblock->next_bb = tblock;
5795 start_new_bblock = 0;
5796 for (i = 0; i < bblock->in_scount; ++i) {
5797 if (cfg->verbose_level > 3)
5798 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5799 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5803 g_slist_free (class_inits);
5806 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5807 link_bblock (cfg, bblock, tblock);
5808 if (sp != stack_start) {
5809 handle_stack_args (cfg, stack_start, sp - stack_start);
5811 CHECK_UNVERIFIABLE (cfg);
5813 bblock->next_bb = tblock;
5816 for (i = 0; i < bblock->in_scount; ++i) {
5817 if (cfg->verbose_level > 3)
5818 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5819 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5822 g_slist_free (class_inits);
5828 * Sequence points are points where the debugger can place a breakpoint.
5829 * Currently, we generate these automatically at points where the IL
5832 if (seq_points && sp == stack_start) {
5833 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
5834 MONO_ADD_INS (cfg->cbb, ins);
5837 bblock->real_offset = cfg->real_offset;
5839 if ((cfg->method == method) && cfg->coverage_info) {
5840 guint32 cil_offset = ip - header->code;
5841 cfg->coverage_info->data [cil_offset].cil_code = ip;
5843 /* TODO: Use an increment here */
5844 #if defined(TARGET_X86)
5845 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5846 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5848 MONO_ADD_INS (cfg->cbb, ins);
5850 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5851 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5855 if (cfg->verbose_level > 3)
5856 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5860 if (cfg->keep_cil_nops)
5861 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
5863 MONO_INST_NEW (cfg, ins, OP_NOP);
5865 MONO_ADD_INS (bblock, ins);
5868 MONO_INST_NEW (cfg, ins, OP_BREAK);
5870 MONO_ADD_INS (bblock, ins);
5876 CHECK_STACK_OVF (1);
5877 n = (*ip)-CEE_LDARG_0;
5879 EMIT_NEW_ARGLOAD (cfg, ins, n);
5887 CHECK_STACK_OVF (1);
5888 n = (*ip)-CEE_LDLOC_0;
5890 EMIT_NEW_LOCLOAD (cfg, ins, n);
5899 n = (*ip)-CEE_STLOC_0;
5902 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5904 emit_stloc_ir (cfg, sp, header, n);
5911 CHECK_STACK_OVF (1);
5914 EMIT_NEW_ARGLOAD (cfg, ins, n);
5920 CHECK_STACK_OVF (1);
5923 NEW_ARGLOADA (cfg, ins, n);
5924 MONO_ADD_INS (cfg->cbb, ins);
5934 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5936 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5941 CHECK_STACK_OVF (1);
5944 EMIT_NEW_LOCLOAD (cfg, ins, n);
5948 case CEE_LDLOCA_S: {
5949 unsigned char *tmp_ip;
5951 CHECK_STACK_OVF (1);
5952 CHECK_LOCAL (ip [1]);
5954 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
5960 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5969 CHECK_LOCAL (ip [1]);
5970 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5972 emit_stloc_ir (cfg, sp, header, ip [1]);
5977 CHECK_STACK_OVF (1);
5978 EMIT_NEW_PCONST (cfg, ins, NULL);
5979 ins->type = STACK_OBJ;
5984 CHECK_STACK_OVF (1);
5985 EMIT_NEW_ICONST (cfg, ins, -1);
5998 CHECK_STACK_OVF (1);
5999 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
6005 CHECK_STACK_OVF (1);
6007 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
6013 CHECK_STACK_OVF (1);
6014 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
6020 CHECK_STACK_OVF (1);
6021 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6022 ins->type = STACK_I8;
6023 ins->dreg = alloc_dreg (cfg, STACK_I8);
6025 ins->inst_l = (gint64)read64 (ip);
6026 MONO_ADD_INS (bblock, ins);
6032 gboolean use_aotconst = FALSE;
6034 #ifdef TARGET_POWERPC
6035 /* FIXME: Clean this up */
6036 if (cfg->compile_aot)
6037 use_aotconst = TRUE;
6040 /* FIXME: we should really allocate this only late in the compilation process */
6041 f = mono_domain_alloc (cfg->domain, sizeof (float));
6043 CHECK_STACK_OVF (1);
6049 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
6051 dreg = alloc_freg (cfg);
6052 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
6053 ins->type = STACK_R8;
6055 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6056 ins->type = STACK_R8;
6057 ins->dreg = alloc_dreg (cfg, STACK_R8);
6059 MONO_ADD_INS (bblock, ins);
6069 gboolean use_aotconst = FALSE;
6071 #ifdef TARGET_POWERPC
6072 /* FIXME: Clean this up */
6073 if (cfg->compile_aot)
6074 use_aotconst = TRUE;
6077 /* FIXME: we should really allocate this only late in the compilation process */
6078 d = mono_domain_alloc (cfg->domain, sizeof (double));
6080 CHECK_STACK_OVF (1);
6086 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
6088 dreg = alloc_freg (cfg);
6089 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
6090 ins->type = STACK_R8;
6092 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6093 ins->type = STACK_R8;
6094 ins->dreg = alloc_dreg (cfg, STACK_R8);
6096 MONO_ADD_INS (bblock, ins);
6105 MonoInst *temp, *store;
6107 CHECK_STACK_OVF (1);
6111 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
6112 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
6114 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6117 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6130 if (sp [0]->type == STACK_R8)
6131 /* we need to pop the value from the x86 FP stack */
6132 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
6141 if (stack_start != sp)
6143 token = read32 (ip + 1);
6144 /* FIXME: check the signature matches */
6145 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6150 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
6151 GENERIC_SHARING_FAILURE (CEE_JMP);
6153 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
6154 CHECK_CFG_EXCEPTION;
6156 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6158 MonoMethodSignature *fsig = mono_method_signature (cmethod);
6161 /* Handle tail calls similarly to calls */
6162 n = fsig->param_count + fsig->hasthis;
6164 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
6165 call->method = cmethod;
6166 call->tail_call = TRUE;
6167 call->signature = mono_method_signature (cmethod);
6168 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
6169 call->inst.inst_p0 = cmethod;
6170 for (i = 0; i < n; ++i)
6171 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
6173 mono_arch_emit_call (cfg, call);
6174 MONO_ADD_INS (bblock, (MonoInst*)call);
6177 for (i = 0; i < num_args; ++i)
6178 /* Prevent arguments from being optimized away */
6179 arg_array [i]->flags |= MONO_INST_VOLATILE;
6181 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6182 ins = (MonoInst*)call;
6183 ins->inst_p0 = cmethod;
6184 MONO_ADD_INS (bblock, ins);
6188 start_new_bblock = 1;
6193 case CEE_CALLVIRT: {
6194 MonoInst *addr = NULL;
6195 MonoMethodSignature *fsig = NULL;
6197 int virtual = *ip == CEE_CALLVIRT;
6198 int calli = *ip == CEE_CALLI;
6199 gboolean pass_imt_from_rgctx = FALSE;
6200 MonoInst *imt_arg = NULL;
6201 gboolean pass_vtable = FALSE;
6202 gboolean pass_mrgctx = FALSE;
6203 MonoInst *vtable_arg = NULL;
6204 gboolean check_this = FALSE;
6205 gboolean supported_tail_call = FALSE;
6208 token = read32 (ip + 1);
6215 if (method->wrapper_type != MONO_WRAPPER_NONE)
6216 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6218 fsig = mono_metadata_parse_signature (image, token);
6220 n = fsig->param_count + fsig->hasthis;
6222 MonoMethod *cil_method;
6224 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6225 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6226 cil_method = cmethod;
6227 } else if (constrained_call) {
6228 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6230 * This is needed since get_method_constrained can't find
6231 * the method in klass representing a type var.
6232 * The type var is guaranteed to be a reference type in this
6235 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6236 cil_method = cmethod;
6237 g_assert (!cmethod->klass->valuetype);
6239 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6242 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6243 cil_method = cmethod;
6248 if (!dont_verify && !cfg->skip_visibility) {
6249 MonoMethod *target_method = cil_method;
6250 if (method->is_inflated) {
6251 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6253 if (!mono_method_can_access_method (method_definition, target_method) &&
6254 !mono_method_can_access_method (method, cil_method))
6255 METHOD_ACCESS_FAILURE;
6258 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6259 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6261 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6262 /* MS.NET seems to silently convert this to a callvirt */
6265 if (!cmethod->klass->inited)
6266 if (!mono_class_init (cmethod->klass))
6269 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6270 mini_class_is_system_array (cmethod->klass)) {
6271 array_rank = cmethod->klass->rank;
6272 fsig = mono_method_signature (cmethod);
6274 if (mono_method_signature (cmethod)->pinvoke) {
6275 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6276 check_for_pending_exc, FALSE);
6277 fsig = mono_method_signature (wrapper);
6278 } else if (constrained_call) {
6279 fsig = mono_method_signature (cmethod);
6281 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6285 mono_save_token_info (cfg, image, token, cil_method);
6287 n = fsig->param_count + fsig->hasthis;
6289 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6290 if (check_linkdemand (cfg, method, cmethod))
6292 CHECK_CFG_EXCEPTION;
6295 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6296 g_assert_not_reached ();
6299 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6302 if (!cfg->generic_sharing_context && cmethod)
6303 g_assert (!mono_method_check_context_used (cmethod));
6307 //g_assert (!virtual || fsig->hasthis);
6311 if (constrained_call) {
6313 * We have the `constrained.' prefix opcode.
6315 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6317 * The type parameter is instantiated as a valuetype,
6318 * but that type doesn't override the method we're
6319 * calling, so we need to box `this'.
6321 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
6322 ins->klass = constrained_call;
6323 sp [0] = handle_box (cfg, ins, constrained_call);
6324 CHECK_CFG_EXCEPTION;
6325 } else if (!constrained_call->valuetype) {
6326 int dreg = alloc_preg (cfg);
6329 * The type parameter is instantiated as a reference
6330 * type. We have a managed pointer on the stack, so
6331 * we need to dereference it here.
6333 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6334 ins->type = STACK_OBJ;
6336 } else if (cmethod->klass->valuetype)
6338 constrained_call = NULL;
6341 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6345 * If the callee is a shared method, then its static cctor
6346 * might not get called after the call was patched.
6348 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6349 emit_generic_class_init (cfg, cmethod->klass);
6350 CHECK_TYPELOAD (cmethod->klass);
6353 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6354 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6355 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6356 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6357 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6360 * Pass vtable iff target method might
6361 * be shared, which means that sharing
6362 * is enabled for its class and its
6363 * context is sharable (and it's not a
6366 if (sharing_enabled && context_sharable &&
6367 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6371 if (cmethod && mini_method_get_context (cmethod) &&
6372 mini_method_get_context (cmethod)->method_inst) {
6373 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6374 MonoGenericContext *context = mini_method_get_context (cmethod);
6375 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6377 g_assert (!pass_vtable);
6379 if (sharing_enabled && context_sharable)
6383 if (cfg->generic_sharing_context && cmethod) {
6384 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6386 context_used = mono_method_check_context_used (cmethod);
6388 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6389 /* Generic method interface
6390 calls are resolved via a
6391 helper function and don't
6393 if (!cmethod_context || !cmethod_context->method_inst)
6394 pass_imt_from_rgctx = TRUE;
6398 * If a shared method calls another
6399 * shared method then the caller must
6400 * have a generic sharing context
6401 * because the magic trampoline
6402 * requires it. FIXME: We shouldn't
6403 * have to force the vtable/mrgctx
6404 * variable here. Instead there
6405 * should be a flag in the cfg to
6406 * request a generic sharing context.
6409 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6410 mono_get_vtable_var (cfg);
6415 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6417 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6419 CHECK_TYPELOAD (cmethod->klass);
6420 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6425 g_assert (!vtable_arg);
6427 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6429 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6430 MONO_METHOD_IS_FINAL (cmethod)) {
6437 if (pass_imt_from_rgctx) {
6438 g_assert (!pass_vtable);
6441 imt_arg = emit_get_rgctx_method (cfg, context_used,
6442 cmethod, MONO_RGCTX_INFO_METHOD);
6448 MONO_INST_NEW (cfg, check, OP_CHECK_THIS);
6449 check->sreg1 = sp [0]->dreg;
6450 MONO_ADD_INS (cfg->cbb, check);
6453 /* Calling virtual generic methods */
6454 if (cmethod && virtual &&
6455 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6456 !(MONO_METHOD_IS_FINAL (cmethod) &&
6457 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6458 mono_method_signature (cmethod)->generic_param_count) {
6459 MonoInst *this_temp, *this_arg_temp, *store;
6460 MonoInst *iargs [4];
6462 g_assert (mono_method_signature (cmethod)->is_inflated);
6464 /* Prevent inlining of methods that contain indirect calls */
6467 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK
6468 /* The llvm vcall trampolines doesn't support generic virtual calls yet */
6469 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt && !mono_use_llvm) {
6470 g_assert (!imt_arg);
6472 g_assert (cmethod->is_inflated);
6473 imt_arg = emit_get_rgctx_method (cfg, context_used,
6474 cmethod, MONO_RGCTX_INFO_METHOD);
6475 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6479 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6480 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6481 MONO_ADD_INS (bblock, store);
6483 /* FIXME: This should be a managed pointer */
6484 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6486 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6487 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6488 cmethod, MONO_RGCTX_INFO_METHOD);
6489 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6490 addr = mono_emit_jit_icall (cfg,
6491 mono_helper_compile_generic_method, iargs);
6493 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6495 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6498 if (!MONO_TYPE_IS_VOID (fsig->ret))
6499 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6506 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6507 supported_tail_call = cmethod && MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
6509 supported_tail_call = cmethod && mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6513 /* FIXME: runtime generic context pointer for jumps? */
6514 /* FIXME: handle this for generic sharing eventually */
6515 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) && supported_tail_call) {
6518 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6521 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6522 /* Handle tail calls similarly to calls */
6523 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE);
6525 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6526 call->tail_call = TRUE;
6527 call->method = cmethod;
6528 call->signature = mono_method_signature (cmethod);
6531 * We implement tail calls by storing the actual arguments into the
6532 * argument variables, then emitting a CEE_JMP.
6534 for (i = 0; i < n; ++i) {
6535 /* Prevent argument from being register allocated */
6536 arg_array [i]->flags |= MONO_INST_VOLATILE;
6537 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6541 ins = (MonoInst*)call;
6542 ins->inst_p0 = cmethod;
6543 ins->inst_p1 = arg_array [0];
6544 MONO_ADD_INS (bblock, ins);
6545 link_bblock (cfg, bblock, end_bblock);
6546 start_new_bblock = 1;
6547 /* skip CEE_RET as well */
6553 /* Conversion to a JIT intrinsic */
6554 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6555 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6556 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6567 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6568 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6569 mono_method_check_inlining (cfg, cmethod) &&
6570 !g_list_find (dont_inline, cmethod)) {
6572 gboolean allways = FALSE;
6574 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6575 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6576 /* Prevent inlining of methods that call wrappers */
6578 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6582 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6584 cfg->real_offset += 5;
6587 if (!MONO_TYPE_IS_VOID (fsig->ret))
6588 /* *sp is already set by inline_method */
6591 inline_costs += costs;
6597 inline_costs += 10 * num_calls++;
6599 /* Tail recursion elimination */
6600 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6601 gboolean has_vtargs = FALSE;
6604 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6607 /* keep it simple */
6608 for (i = fsig->param_count - 1; i >= 0; i--) {
6609 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6614 for (i = 0; i < n; ++i)
6615 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6616 MONO_INST_NEW (cfg, ins, OP_BR);
6617 MONO_ADD_INS (bblock, ins);
6618 tblock = start_bblock->out_bb [0];
6619 link_bblock (cfg, bblock, tblock);
6620 ins->inst_target_bb = tblock;
6621 start_new_bblock = 1;
6623 /* skip the CEE_RET, too */
6624 if (ip_in_bb (cfg, bblock, ip + 5))
6634 /* Generic sharing */
6635 /* FIXME: only do this for generic methods if
6636 they are not shared! */
6637 if (context_used && !imt_arg && !array_rank &&
6638 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6639 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6640 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6641 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6644 g_assert (cfg->generic_sharing_context && cmethod);
6648 * We are compiling a call to a
6649 * generic method from shared code,
6650 * which means that we have to look up
6651 * the method in the rgctx and do an
6654 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6657 /* Indirect calls */
6659 g_assert (!imt_arg);
6661 if (*ip == CEE_CALL)
6662 g_assert (context_used);
6663 else if (*ip == CEE_CALLI)
6664 g_assert (!vtable_arg);
6666 /* FIXME: what the hell is this??? */
6667 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6668 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6670 /* Prevent inlining of methods with indirect calls */
6674 #ifdef MONO_ARCH_RGCTX_REG
6676 int rgctx_reg = mono_alloc_preg (cfg);
6678 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6679 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6680 call = (MonoCallInst*)ins;
6681 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
6682 cfg->uses_rgctx_reg = TRUE;
6683 call->rgctx_reg = TRUE;
6688 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6690 * Instead of emitting an indirect call, emit a direct call
6691 * with the contents of the aotconst as the patch info.
6693 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
6695 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6696 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
6699 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6702 if (!MONO_TYPE_IS_VOID (fsig->ret))
6703 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6714 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6715 if (sp [fsig->param_count]->type == STACK_OBJ) {
6716 MonoInst *iargs [2];
6719 iargs [1] = sp [fsig->param_count];
6721 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6724 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6725 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6726 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6727 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6729 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6732 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6733 if (!cmethod->klass->element_class->valuetype && !readonly)
6734 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6735 CHECK_TYPELOAD (cmethod->klass);
6738 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6741 g_assert_not_reached ();
6749 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6751 if (!MONO_TYPE_IS_VOID (fsig->ret))
6752 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6762 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6764 } else if (imt_arg) {
6765 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6767 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6770 if (!MONO_TYPE_IS_VOID (fsig->ret))
6771 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6778 if (cfg->method != method) {
6779 /* return from inlined method */
6781 * If in_count == 0, that means the ret is unreachable due to
6782 * being preceeded by a throw. In that case, inline_method () will
6783 * handle setting the return value
6784 * (test case: test_0_inline_throw ()).
6786 if (return_var && cfg->cbb->in_count) {
6790 //g_assert (returnvar != -1);
6791 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6792 cfg->ret_var_set = TRUE;
6796 MonoType *ret_type = mono_method_signature (method)->ret;
6798 g_assert (!return_var);
6801 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6804 if (!cfg->vret_addr) {
6807 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6809 EMIT_NEW_RETLOADA (cfg, ret_addr);
6811 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6812 ins->klass = mono_class_from_mono_type (ret_type);
6815 #ifdef MONO_ARCH_SOFT_FLOAT
6816 if (!ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6817 MonoInst *iargs [1];
6821 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6822 mono_arch_emit_setret (cfg, method, conv);
6824 mono_arch_emit_setret (cfg, method, *sp);
6827 mono_arch_emit_setret (cfg, method, *sp);
6832 if (sp != stack_start)
6834 MONO_INST_NEW (cfg, ins, OP_BR);
6836 ins->inst_target_bb = end_bblock;
6837 MONO_ADD_INS (bblock, ins);
6838 link_bblock (cfg, bblock, end_bblock);
6839 start_new_bblock = 1;
6843 MONO_INST_NEW (cfg, ins, OP_BR);
6845 target = ip + 1 + (signed char)(*ip);
6847 GET_BBLOCK (cfg, tblock, target);
6848 link_bblock (cfg, bblock, tblock);
6849 ins->inst_target_bb = tblock;
6850 if (sp != stack_start) {
6851 handle_stack_args (cfg, stack_start, sp - stack_start);
6853 CHECK_UNVERIFIABLE (cfg);
6855 MONO_ADD_INS (bblock, ins);
6856 start_new_bblock = 1;
6857 inline_costs += BRANCH_COST;
6871 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6873 target = ip + 1 + *(signed char*)ip;
6879 inline_costs += BRANCH_COST;
6883 MONO_INST_NEW (cfg, ins, OP_BR);
6886 target = ip + 4 + (gint32)read32(ip);
6888 GET_BBLOCK (cfg, tblock, target);
6889 link_bblock (cfg, bblock, tblock);
6890 ins->inst_target_bb = tblock;
6891 if (sp != stack_start) {
6892 handle_stack_args (cfg, stack_start, sp - stack_start);
6894 CHECK_UNVERIFIABLE (cfg);
6897 MONO_ADD_INS (bblock, ins);
6899 start_new_bblock = 1;
6900 inline_costs += BRANCH_COST;
6907 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6908 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6909 guint32 opsize = is_short ? 1 : 4;
6911 CHECK_OPSIZE (opsize);
6913 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6916 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6921 GET_BBLOCK (cfg, tblock, target);
6922 link_bblock (cfg, bblock, tblock);
6923 GET_BBLOCK (cfg, tblock, ip);
6924 link_bblock (cfg, bblock, tblock);
6926 if (sp != stack_start) {
6927 handle_stack_args (cfg, stack_start, sp - stack_start);
6928 CHECK_UNVERIFIABLE (cfg);
6931 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6932 cmp->sreg1 = sp [0]->dreg;
6933 type_from_op (cmp, sp [0], NULL);
6936 #if SIZEOF_REGISTER == 4
6937 if (cmp->opcode == OP_LCOMPARE_IMM) {
6938 /* Convert it to OP_LCOMPARE */
6939 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6940 ins->type = STACK_I8;
6941 ins->dreg = alloc_dreg (cfg, STACK_I8);
6943 MONO_ADD_INS (bblock, ins);
6944 cmp->opcode = OP_LCOMPARE;
6945 cmp->sreg2 = ins->dreg;
6948 MONO_ADD_INS (bblock, cmp);
6950 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
6951 type_from_op (ins, sp [0], NULL);
6952 MONO_ADD_INS (bblock, ins);
6953 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
6954 GET_BBLOCK (cfg, tblock, target);
6955 ins->inst_true_bb = tblock;
6956 GET_BBLOCK (cfg, tblock, ip);
6957 ins->inst_false_bb = tblock;
6958 start_new_bblock = 2;
6961 inline_costs += BRANCH_COST;
6976 MONO_INST_NEW (cfg, ins, *ip);
6978 target = ip + 4 + (gint32)read32(ip);
6984 inline_costs += BRANCH_COST;
6988 MonoBasicBlock **targets;
6989 MonoBasicBlock *default_bblock;
6990 MonoJumpInfoBBTable *table;
6991 int offset_reg = alloc_preg (cfg);
6992 int target_reg = alloc_preg (cfg);
6993 int table_reg = alloc_preg (cfg);
6994 int sum_reg = alloc_preg (cfg);
6995 gboolean use_op_switch;
6999 n = read32 (ip + 1);
7002 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
7006 CHECK_OPSIZE (n * sizeof (guint32));
7007 target = ip + n * sizeof (guint32);
7009 GET_BBLOCK (cfg, default_bblock, target);
7011 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
7012 for (i = 0; i < n; ++i) {
7013 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
7014 targets [i] = tblock;
7018 if (sp != stack_start) {
7020 * Link the current bb with the targets as well, so handle_stack_args
7021 * will set their in_stack correctly.
7023 link_bblock (cfg, bblock, default_bblock);
7024 for (i = 0; i < n; ++i)
7025 link_bblock (cfg, bblock, targets [i]);
7027 handle_stack_args (cfg, stack_start, sp - stack_start);
7029 CHECK_UNVERIFIABLE (cfg);
7032 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
7033 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
7036 for (i = 0; i < n; ++i)
7037 link_bblock (cfg, bblock, targets [i]);
7039 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
7040 table->table = targets;
7041 table->table_size = n;
7043 use_op_switch = FALSE;
7045 /* ARM implements SWITCH statements differently */
7046 /* FIXME: Make it use the generic implementation */
7047 if (!cfg->compile_aot)
7048 use_op_switch = TRUE;
7051 if (COMPILE_LLVM (cfg))
7052 use_op_switch = TRUE;
7054 cfg->cbb->has_jump_table = 1;
7056 if (use_op_switch) {
7057 MONO_INST_NEW (cfg, ins, OP_SWITCH);
7058 ins->sreg1 = src1->dreg;
7059 ins->inst_p0 = table;
7060 ins->inst_many_bb = targets;
7061 ins->klass = GUINT_TO_POINTER (n);
7062 MONO_ADD_INS (cfg->cbb, ins);
7064 if (sizeof (gpointer) == 8)
7065 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
7067 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
7069 #if SIZEOF_REGISTER == 8
7070 /* The upper word might not be zero, and we add it to a 64 bit address later */
7071 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
7074 if (cfg->compile_aot) {
7075 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
7077 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
7078 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
7079 ins->inst_p0 = table;
7080 ins->dreg = table_reg;
7081 MONO_ADD_INS (cfg->cbb, ins);
7084 /* FIXME: Use load_memindex */
7085 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
7086 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
7087 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
7089 start_new_bblock = 1;
7090 inline_costs += (BRANCH_COST * 2);
7110 dreg = alloc_freg (cfg);
7113 dreg = alloc_lreg (cfg);
7116 dreg = alloc_preg (cfg);
7119 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
7120 ins->type = ldind_type [*ip - CEE_LDIND_I1];
7121 ins->flags |= ins_flag;
7123 MONO_ADD_INS (bblock, ins);
7138 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
7139 ins->flags |= ins_flag;
7141 MONO_ADD_INS (bblock, ins);
7143 #if HAVE_WRITE_BARRIERS
7144 if (*ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0))) {
7145 /* insert call to write barrier */
7146 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
7147 mono_emit_method_call (cfg, write_barrier, sp, NULL);
7158 MONO_INST_NEW (cfg, ins, (*ip));
7160 ins->sreg1 = sp [0]->dreg;
7161 ins->sreg2 = sp [1]->dreg;
7162 type_from_op (ins, sp [0], sp [1]);
7164 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7166 /* Use the immediate opcodes if possible */
7167 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
7168 int imm_opcode = mono_op_to_op_imm (ins->opcode);
7169 if (imm_opcode != -1) {
7170 ins->opcode = imm_opcode;
7171 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7174 sp [1]->opcode = OP_NOP;
7178 MONO_ADD_INS ((cfg)->cbb, (ins));
7180 *sp++ = mono_decompose_opcode (cfg, ins);
7197 MONO_INST_NEW (cfg, ins, (*ip));
7199 ins->sreg1 = sp [0]->dreg;
7200 ins->sreg2 = sp [1]->dreg;
7201 type_from_op (ins, sp [0], sp [1]);
7203 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7204 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7206 /* FIXME: Pass opcode to is_inst_imm */
7208 /* Use the immediate opcodes if possible */
7209 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7212 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7213 if (imm_opcode != -1) {
7214 ins->opcode = imm_opcode;
7215 if (sp [1]->opcode == OP_I8CONST) {
7216 #if SIZEOF_REGISTER == 8
7217 ins->inst_imm = sp [1]->inst_l;
7219 ins->inst_ls_word = sp [1]->inst_ls_word;
7220 ins->inst_ms_word = sp [1]->inst_ms_word;
7224 ins->inst_imm = (gssize)(sp [1]->inst_c0);
7227 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7228 if (sp [1]->next == NULL)
7229 sp [1]->opcode = OP_NOP;
7232 MONO_ADD_INS ((cfg)->cbb, (ins));
7234 *sp++ = mono_decompose_opcode (cfg, ins);
7247 case CEE_CONV_OVF_I8:
7248 case CEE_CONV_OVF_U8:
7252 /* Special case this earlier so we have long constants in the IR */
7253 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7254 int data = sp [-1]->inst_c0;
7255 sp [-1]->opcode = OP_I8CONST;
7256 sp [-1]->type = STACK_I8;
7257 #if SIZEOF_REGISTER == 8
7258 if ((*ip) == CEE_CONV_U8)
7259 sp [-1]->inst_c0 = (guint32)data;
7261 sp [-1]->inst_c0 = data;
7263 sp [-1]->inst_ls_word = data;
7264 if ((*ip) == CEE_CONV_U8)
7265 sp [-1]->inst_ms_word = 0;
7267 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7269 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7276 case CEE_CONV_OVF_I4:
7277 case CEE_CONV_OVF_I1:
7278 case CEE_CONV_OVF_I2:
7279 case CEE_CONV_OVF_I:
7280 case CEE_CONV_OVF_U:
7283 if (sp [-1]->type == STACK_R8) {
7284 ADD_UNOP (CEE_CONV_OVF_I8);
7291 case CEE_CONV_OVF_U1:
7292 case CEE_CONV_OVF_U2:
7293 case CEE_CONV_OVF_U4:
7296 if (sp [-1]->type == STACK_R8) {
7297 ADD_UNOP (CEE_CONV_OVF_U8);
7304 case CEE_CONV_OVF_I1_UN:
7305 case CEE_CONV_OVF_I2_UN:
7306 case CEE_CONV_OVF_I4_UN:
7307 case CEE_CONV_OVF_I8_UN:
7308 case CEE_CONV_OVF_U1_UN:
7309 case CEE_CONV_OVF_U2_UN:
7310 case CEE_CONV_OVF_U4_UN:
7311 case CEE_CONV_OVF_U8_UN:
7312 case CEE_CONV_OVF_I_UN:
7313 case CEE_CONV_OVF_U_UN:
7323 case CEE_ADD_OVF_UN:
7325 case CEE_MUL_OVF_UN:
7327 case CEE_SUB_OVF_UN:
7335 token = read32 (ip + 1);
7336 klass = mini_get_class (method, token, generic_context);
7337 CHECK_TYPELOAD (klass);
7339 if (generic_class_is_reference_type (cfg, klass)) {
7340 MonoInst *store, *load;
7341 int dreg = alloc_preg (cfg);
7343 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7344 load->flags |= ins_flag;
7345 MONO_ADD_INS (cfg->cbb, load);
7347 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7348 store->flags |= ins_flag;
7349 MONO_ADD_INS (cfg->cbb, store);
7351 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7363 token = read32 (ip + 1);
7364 klass = mini_get_class (method, token, generic_context);
7365 CHECK_TYPELOAD (klass);
7367 /* Optimize the common ldobj+stloc combination */
7377 loc_index = ip [5] - CEE_STLOC_0;
7384 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7385 CHECK_LOCAL (loc_index);
7387 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7388 ins->dreg = cfg->locals [loc_index]->dreg;
7394 /* Optimize the ldobj+stobj combination */
7395 /* The reference case ends up being a load+store anyway */
7396 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7401 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7408 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7417 CHECK_STACK_OVF (1);
7419 n = read32 (ip + 1);
7421 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7422 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7423 ins->type = STACK_OBJ;
7426 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7427 MonoInst *iargs [1];
7429 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7430 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7432 if (cfg->opt & MONO_OPT_SHARED) {
7433 MonoInst *iargs [3];
7435 if (cfg->compile_aot) {
7436 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7438 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7439 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7440 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7441 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7442 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7444 if (bblock->out_of_line) {
7445 MonoInst *iargs [2];
7447 if (image == mono_defaults.corlib) {
7449 * Avoid relocations in AOT and save some space by using a
7450 * version of helper_ldstr specialized to mscorlib.
7452 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7453 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7455 /* Avoid creating the string object */
7456 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7457 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7458 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7462 if (cfg->compile_aot) {
7463 NEW_LDSTRCONST (cfg, ins, image, n);
7465 MONO_ADD_INS (bblock, ins);
7468 NEW_PCONST (cfg, ins, NULL);
7469 ins->type = STACK_OBJ;
7470 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7472 MONO_ADD_INS (bblock, ins);
7481 MonoInst *iargs [2];
7482 MonoMethodSignature *fsig;
7485 MonoInst *vtable_arg = NULL;
7488 token = read32 (ip + 1);
7489 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7492 fsig = mono_method_get_signature (cmethod, image, token);
7494 mono_save_token_info (cfg, image, token, cmethod);
7496 if (!mono_class_init (cmethod->klass))
7499 if (cfg->generic_sharing_context)
7500 context_used = mono_method_check_context_used (cmethod);
7502 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7503 if (check_linkdemand (cfg, method, cmethod))
7505 CHECK_CFG_EXCEPTION;
7506 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7507 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7510 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7511 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7512 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7513 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7514 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7517 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7518 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7520 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7522 CHECK_TYPELOAD (cmethod->klass);
7523 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7528 n = fsig->param_count;
7532 * Generate smaller code for the common newobj <exception> instruction in
7533 * argument checking code.
7535 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7536 is_exception_class (cmethod->klass) && n <= 2 &&
7537 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7538 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7539 MonoInst *iargs [3];
7541 g_assert (!vtable_arg);
7545 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7548 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7552 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7557 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7560 g_assert_not_reached ();
7568 /* move the args to allow room for 'this' in the first position */
7574 /* check_call_signature () requires sp[0] to be set */
7575 this_ins.type = STACK_OBJ;
7577 if (check_call_signature (cfg, fsig, sp))
7582 if (mini_class_is_system_array (cmethod->klass)) {
7583 g_assert (!vtable_arg);
7585 *sp = emit_get_rgctx_method (cfg, context_used,
7586 cmethod, MONO_RGCTX_INFO_METHOD);
7588 /* Avoid varargs in the common case */
7589 if (fsig->param_count == 1)
7590 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7591 else if (fsig->param_count == 2)
7592 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7594 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7595 } else if (cmethod->string_ctor) {
7596 g_assert (!context_used);
7597 g_assert (!vtable_arg);
7598 /* we simply pass a null pointer */
7599 EMIT_NEW_PCONST (cfg, *sp, NULL);
7600 /* now call the string ctor */
7601 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7603 MonoInst* callvirt_this_arg = NULL;
7605 if (cmethod->klass->valuetype) {
7606 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7607 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7608 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7613 * The code generated by mini_emit_virtual_call () expects
7614 * iargs [0] to be a boxed instance, but luckily the vcall
7615 * will be transformed into a normal call there.
7617 } else if (context_used) {
7621 if (cfg->opt & MONO_OPT_SHARED)
7622 rgctx_info = MONO_RGCTX_INFO_KLASS;
7624 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7625 data = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, rgctx_info);
7627 alloc = handle_alloc_from_inst (cfg, cmethod->klass, data, FALSE);
7630 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7632 CHECK_TYPELOAD (cmethod->klass);
7635 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7636 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7637 * As a workaround, we call class cctors before allocating objects.
7639 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7640 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7641 if (cfg->verbose_level > 2)
7642 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7643 class_inits = g_slist_prepend (class_inits, vtable);
7646 alloc = handle_alloc (cfg, cmethod->klass, FALSE);
7649 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
7652 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7654 /* Now call the actual ctor */
7655 /* Avoid virtual calls to ctors if possible */
7656 if (cmethod->klass->marshalbyref)
7657 callvirt_this_arg = sp [0];
7659 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7660 mono_method_check_inlining (cfg, cmethod) &&
7661 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7662 !g_list_find (dont_inline, cmethod)) {
7665 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7666 cfg->real_offset += 5;
7669 inline_costs += costs - 5;
7672 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7674 } else if (context_used &&
7675 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7676 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7677 MonoInst *cmethod_addr;
7679 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7680 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7682 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7685 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7686 callvirt_this_arg, NULL, vtable_arg);
7690 if (alloc == NULL) {
7692 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7693 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7707 token = read32 (ip + 1);
7708 klass = mini_get_class (method, token, generic_context);
7709 CHECK_TYPELOAD (klass);
7710 if (sp [0]->type != STACK_OBJ)
7713 if (cfg->generic_sharing_context)
7714 context_used = mono_class_check_context_used (klass);
7723 args [1] = emit_get_rgctx_klass (cfg, context_used,
7724 klass, MONO_RGCTX_INFO_KLASS);
7726 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7730 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7731 MonoMethod *mono_castclass;
7732 MonoInst *iargs [1];
7735 mono_castclass = mono_marshal_get_castclass (klass);
7738 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7739 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7740 g_assert (costs > 0);
7743 cfg->real_offset += 5;
7748 inline_costs += costs;
7751 ins = handle_castclass (cfg, klass, *sp);
7752 CHECK_CFG_EXCEPTION;
7762 token = read32 (ip + 1);
7763 klass = mini_get_class (method, token, generic_context);
7764 CHECK_TYPELOAD (klass);
7765 if (sp [0]->type != STACK_OBJ)
7768 if (cfg->generic_sharing_context)
7769 context_used = mono_class_check_context_used (klass);
7778 args [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7780 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7784 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7785 MonoMethod *mono_isinst;
7786 MonoInst *iargs [1];
7789 mono_isinst = mono_marshal_get_isinst (klass);
7792 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7793 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7794 g_assert (costs > 0);
7797 cfg->real_offset += 5;
7802 inline_costs += costs;
7805 ins = handle_isinst (cfg, klass, *sp);
7806 CHECK_CFG_EXCEPTION;
7813 case CEE_UNBOX_ANY: {
7817 token = read32 (ip + 1);
7818 klass = mini_get_class (method, token, generic_context);
7819 CHECK_TYPELOAD (klass);
7821 mono_save_token_info (cfg, image, token, klass);
7823 if (cfg->generic_sharing_context)
7824 context_used = mono_class_check_context_used (klass);
7826 if (generic_class_is_reference_type (cfg, klass)) {
7827 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
7829 MonoInst *iargs [2];
7834 iargs [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7835 ins = mono_emit_jit_icall (cfg, mono_object_castclass, iargs);
7839 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7840 MonoMethod *mono_castclass;
7841 MonoInst *iargs [1];
7844 mono_castclass = mono_marshal_get_castclass (klass);
7847 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7848 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7850 g_assert (costs > 0);
7853 cfg->real_offset += 5;
7857 inline_costs += costs;
7859 ins = handle_castclass (cfg, klass, *sp);
7860 CHECK_CFG_EXCEPTION;
7868 if (mono_class_is_nullable (klass)) {
7869 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
7876 ins = handle_unbox (cfg, klass, sp, context_used);
7882 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7895 token = read32 (ip + 1);
7896 klass = mini_get_class (method, token, generic_context);
7897 CHECK_TYPELOAD (klass);
7899 mono_save_token_info (cfg, image, token, klass);
7901 if (cfg->generic_sharing_context)
7902 context_used = mono_class_check_context_used (klass);
7904 if (generic_class_is_reference_type (cfg, klass)) {
7910 if (klass == mono_defaults.void_class)
7912 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7914 /* frequent check in generic code: box (struct), brtrue */
7915 if (!mono_class_is_nullable (klass) &&
7916 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) && (ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S)) {
7917 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7919 MONO_INST_NEW (cfg, ins, OP_BR);
7920 if (*ip == CEE_BRTRUE_S) {
7923 target = ip + 1 + (signed char)(*ip);
7928 target = ip + 4 + (gint)(read32 (ip));
7931 GET_BBLOCK (cfg, tblock, target);
7932 link_bblock (cfg, bblock, tblock);
7933 ins->inst_target_bb = tblock;
7934 GET_BBLOCK (cfg, tblock, ip);
7936 * This leads to some inconsistency, since the two bblocks are
7937 * not really connected, but it is needed for handling stack
7938 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
7939 * FIXME: This should only be needed if sp != stack_start, but that
7940 * doesn't work for some reason (test failure in mcs/tests on x86).
7942 link_bblock (cfg, bblock, tblock);
7943 if (sp != stack_start) {
7944 handle_stack_args (cfg, stack_start, sp - stack_start);
7946 CHECK_UNVERIFIABLE (cfg);
7948 MONO_ADD_INS (bblock, ins);
7949 start_new_bblock = 1;
7957 if (cfg->opt & MONO_OPT_SHARED)
7958 rgctx_info = MONO_RGCTX_INFO_KLASS;
7960 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7961 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
7962 *sp++ = handle_box_from_inst (cfg, val, klass, context_used, data);
7964 *sp++ = handle_box (cfg, val, klass);
7967 CHECK_CFG_EXCEPTION;
7976 token = read32 (ip + 1);
7977 klass = mini_get_class (method, token, generic_context);
7978 CHECK_TYPELOAD (klass);
7980 mono_save_token_info (cfg, image, token, klass);
7982 if (cfg->generic_sharing_context)
7983 context_used = mono_class_check_context_used (klass);
7985 if (mono_class_is_nullable (klass)) {
7988 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
7989 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
7993 ins = handle_unbox (cfg, klass, sp, context_used);
8003 MonoClassField *field;
8007 if (*ip == CEE_STFLD) {
8014 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
8016 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
8019 token = read32 (ip + 1);
8020 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8021 field = mono_method_get_wrapper_data (method, token);
8022 klass = field->parent;
8025 field = mono_field_from_token (image, token, &klass, generic_context);
8029 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8030 FIELD_ACCESS_FAILURE;
8031 mono_class_init (klass);
8033 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
8034 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
8035 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8036 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8039 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
8040 if (*ip == CEE_STFLD) {
8041 if (target_type_is_incompatible (cfg, field->type, sp [1]))
8043 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8044 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
8045 MonoInst *iargs [5];
8048 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8049 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8050 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
8054 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8055 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
8056 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8057 g_assert (costs > 0);
8059 cfg->real_offset += 5;
8062 inline_costs += costs;
8064 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
8069 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8071 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
8073 #if HAVE_WRITE_BARRIERS
8074 if (mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
8075 /* insert call to write barrier */
8076 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
8077 MonoInst *iargs [2];
8080 dreg = alloc_preg (cfg);
8081 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8083 mono_emit_method_call (cfg, write_barrier, iargs, NULL);
8087 store->flags |= ins_flag;
8094 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8095 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
8096 MonoInst *iargs [4];
8099 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8100 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8101 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
8102 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8103 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
8104 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8106 g_assert (costs > 0);
8108 cfg->real_offset += 5;
8112 inline_costs += costs;
8114 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
8118 if (sp [0]->type == STACK_VTYPE) {
8121 /* Have to compute the address of the variable */
8123 var = get_vreg_to_inst (cfg, sp [0]->dreg);
8125 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
8127 g_assert (var->klass == klass);
8129 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
8133 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8135 if (*ip == CEE_LDFLDA) {
8136 dreg = alloc_preg (cfg);
8138 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8139 ins->klass = mono_class_from_mono_type (field->type);
8140 ins->type = STACK_MP;
8145 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
8146 load->flags |= ins_flag;
8157 MonoClassField *field;
8158 gpointer addr = NULL;
8159 gboolean is_special_static;
8162 token = read32 (ip + 1);
8164 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8165 field = mono_method_get_wrapper_data (method, token);
8166 klass = field->parent;
8169 field = mono_field_from_token (image, token, &klass, generic_context);
8172 mono_class_init (klass);
8173 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8174 FIELD_ACCESS_FAILURE;
8176 /* if the class is Critical then transparent code cannot access it's fields */
8177 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8178 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8181 * We can only support shared generic static
8182 * field access on architectures where the
8183 * trampoline code has been extended to handle
8184 * the generic class init.
8186 #ifndef MONO_ARCH_VTABLE_REG
8187 GENERIC_SHARING_FAILURE (*ip);
8190 if (cfg->generic_sharing_context)
8191 context_used = mono_class_check_context_used (klass);
8193 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
8195 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8196 * to be called here.
8198 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8199 mono_class_vtable (cfg->domain, klass);
8200 CHECK_TYPELOAD (klass);
8202 mono_domain_lock (cfg->domain);
8203 if (cfg->domain->special_static_fields)
8204 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8205 mono_domain_unlock (cfg->domain);
8207 is_special_static = mono_class_field_is_special_static (field);
8209 /* Generate IR to compute the field address */
8211 if ((cfg->opt & MONO_OPT_SHARED) ||
8212 (cfg->compile_aot && is_special_static) ||
8213 (context_used && is_special_static)) {
8214 MonoInst *iargs [2];
8216 g_assert (field->parent);
8217 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8219 iargs [1] = emit_get_rgctx_field (cfg, context_used,
8220 field, MONO_RGCTX_INFO_CLASS_FIELD);
8222 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8224 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8225 } else if (context_used) {
8226 MonoInst *static_data;
8229 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8230 method->klass->name_space, method->klass->name, method->name,
8231 depth, field->offset);
8234 if (mono_class_needs_cctor_run (klass, method)) {
8238 vtable = emit_get_rgctx_klass (cfg, context_used,
8239 klass, MONO_RGCTX_INFO_VTABLE);
8241 // FIXME: This doesn't work since it tries to pass the argument
8242 // in the normal way, instead of using MONO_ARCH_VTABLE_REG
8244 * The vtable pointer is always passed in a register regardless of
8245 * the calling convention, so assign it manually, and make a call
8246 * using a signature without parameters.
8248 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable);
8249 #ifdef MONO_ARCH_VTABLE_REG
8250 mono_call_inst_add_outarg_reg (cfg, call, vtable->dreg, MONO_ARCH_VTABLE_REG, FALSE);
8251 cfg->uses_vtable_reg = TRUE;
8258 * The pointer we're computing here is
8260 * super_info.static_data + field->offset
8262 static_data = emit_get_rgctx_klass (cfg, context_used,
8263 klass, MONO_RGCTX_INFO_STATIC_DATA);
8265 if (field->offset == 0) {
8268 int addr_reg = mono_alloc_preg (cfg);
8269 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8271 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8272 MonoInst *iargs [2];
8274 g_assert (field->parent);
8275 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8276 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8277 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8279 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8281 CHECK_TYPELOAD (klass);
8283 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8284 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8285 if (cfg->verbose_level > 2)
8286 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8287 class_inits = g_slist_prepend (class_inits, vtable);
8289 if (cfg->run_cctors) {
8291 /* This makes so that inline cannot trigger */
8292 /* .cctors: too many apps depend on them */
8293 /* running with a specific order... */
8294 if (! vtable->initialized)
8296 ex = mono_runtime_class_init_full (vtable, FALSE);
8298 set_exception_object (cfg, ex);
8299 goto exception_exit;
8303 addr = (char*)vtable->data + field->offset;
8305 if (cfg->compile_aot)
8306 EMIT_NEW_SFLDACONST (cfg, ins, field);
8308 EMIT_NEW_PCONST (cfg, ins, addr);
8311 * insert call to mono_threads_get_static_data (GPOINTER_TO_UINT (addr))
8312 * This could be later optimized to do just a couple of
8313 * memory dereferences with constant offsets.
8315 MonoInst *iargs [1];
8316 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8317 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8321 /* Generate IR to do the actual load/store operation */
8323 if (*ip == CEE_LDSFLDA) {
8324 ins->klass = mono_class_from_mono_type (field->type);
8325 ins->type = STACK_PTR;
8327 } else if (*ip == CEE_STSFLD) {
8332 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
8333 store->flags |= ins_flag;
8335 gboolean is_const = FALSE;
8336 MonoVTable *vtable = NULL;
8338 if (!context_used) {
8339 vtable = mono_class_vtable (cfg->domain, klass);
8340 CHECK_TYPELOAD (klass);
8342 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8343 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8344 gpointer addr = (char*)vtable->data + field->offset;
8345 int ro_type = field->type->type;
8346 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
8347 ro_type = mono_class_enum_basetype (field->type->data.klass)->type;
8349 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8352 case MONO_TYPE_BOOLEAN:
8354 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8358 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8361 case MONO_TYPE_CHAR:
8363 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8367 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8372 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8376 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8379 #ifndef HAVE_MOVING_COLLECTOR
8382 case MONO_TYPE_STRING:
8383 case MONO_TYPE_OBJECT:
8384 case MONO_TYPE_CLASS:
8385 case MONO_TYPE_SZARRAY:
8387 case MONO_TYPE_FNPTR:
8388 case MONO_TYPE_ARRAY:
8389 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8390 type_to_eval_stack_type ((cfg), field->type, *sp);
8396 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8401 case MONO_TYPE_VALUETYPE:
8411 CHECK_STACK_OVF (1);
8413 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8414 load->flags |= ins_flag;
8427 token = read32 (ip + 1);
8428 klass = mini_get_class (method, token, generic_context);
8429 CHECK_TYPELOAD (klass);
8430 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8431 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8442 const char *data_ptr;
8444 guint32 field_token;
8450 token = read32 (ip + 1);
8452 klass = mini_get_class (method, token, generic_context);
8453 CHECK_TYPELOAD (klass);
8455 if (cfg->generic_sharing_context)
8456 context_used = mono_class_check_context_used (klass);
8458 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
8459 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
8460 ins->sreg1 = sp [0]->dreg;
8461 ins->type = STACK_I4;
8462 ins->dreg = alloc_ireg (cfg);
8463 MONO_ADD_INS (cfg->cbb, ins);
8464 *sp = mono_decompose_opcode (cfg, ins);
8469 MonoClass *array_class = mono_array_class_get (klass, 1);
8470 /* FIXME: we cannot get a managed
8471 allocator because we can't get the
8472 open generic class's vtable. We
8473 have the same problem in
8474 handle_alloc_from_inst(). This
8475 needs to be solved so that we can
8476 have managed allocs of shared
8479 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
8480 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
8482 MonoMethod *managed_alloc = NULL;
8484 /* FIXME: Decompose later to help abcrem */
8487 args [0] = emit_get_rgctx_klass (cfg, context_used,
8488 array_class, MONO_RGCTX_INFO_VTABLE);
8493 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
8495 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8497 if (cfg->opt & MONO_OPT_SHARED) {
8498 /* Decompose now to avoid problems with references to the domainvar */
8499 MonoInst *iargs [3];
8501 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8502 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8505 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8507 /* Decompose later since it is needed by abcrem */
8508 MonoClass *array_type = mono_array_class_get (klass, 1);
8509 mono_class_vtable (cfg->domain, array_type);
8510 CHECK_TYPELOAD (array_type);
8512 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8513 ins->dreg = alloc_preg (cfg);
8514 ins->sreg1 = sp [0]->dreg;
8515 ins->inst_newa_class = klass;
8516 ins->type = STACK_OBJ;
8518 MONO_ADD_INS (cfg->cbb, ins);
8519 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8520 cfg->cbb->has_array_access = TRUE;
8522 /* Needed so mono_emit_load_get_addr () gets called */
8523 mono_get_got_var (cfg);
8533 * we inline/optimize the initialization sequence if possible.
8534 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8535 * for small sizes open code the memcpy
8536 * ensure the rva field is big enough
8538 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8539 MonoMethod *memcpy_method = get_memcpy_method ();
8540 MonoInst *iargs [3];
8541 int add_reg = alloc_preg (cfg);
8543 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8544 if (cfg->compile_aot) {
8545 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8547 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8549 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8550 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8559 if (sp [0]->type != STACK_OBJ)
8562 dreg = alloc_preg (cfg);
8563 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8564 ins->dreg = alloc_preg (cfg);
8565 ins->sreg1 = sp [0]->dreg;
8566 ins->type = STACK_I4;
8567 MONO_ADD_INS (cfg->cbb, ins);
8568 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8569 cfg->cbb->has_array_access = TRUE;
8577 if (sp [0]->type != STACK_OBJ)
8580 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8582 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8583 CHECK_TYPELOAD (klass);
8584 /* we need to make sure that this array is exactly the type it needs
8585 * to be for correctness. the wrappers are lax with their usage
8586 * so we need to ignore them here
8588 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
8589 MonoClass *array_class = mono_array_class_get (klass, 1);
8590 mini_emit_check_array_type (cfg, sp [0], array_class);
8591 CHECK_TYPELOAD (array_class);
8595 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8610 case CEE_LDELEM_REF: {
8616 if (*ip == CEE_LDELEM) {
8618 token = read32 (ip + 1);
8619 klass = mini_get_class (method, token, generic_context);
8620 CHECK_TYPELOAD (klass);
8621 mono_class_init (klass);
8624 klass = array_access_to_klass (*ip);
8626 if (sp [0]->type != STACK_OBJ)
8629 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8631 if (sp [1]->opcode == OP_ICONST) {
8632 int array_reg = sp [0]->dreg;
8633 int index_reg = sp [1]->dreg;
8634 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8636 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8637 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8639 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8640 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8643 if (*ip == CEE_LDELEM)
8656 case CEE_STELEM_REF:
8663 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8665 if (*ip == CEE_STELEM) {
8667 token = read32 (ip + 1);
8668 klass = mini_get_class (method, token, generic_context);
8669 CHECK_TYPELOAD (klass);
8670 mono_class_init (klass);
8673 klass = array_access_to_klass (*ip);
8675 if (sp [0]->type != STACK_OBJ)
8678 /* storing a NULL doesn't need any of the complex checks in stelemref */
8679 if (generic_class_is_reference_type (cfg, klass) &&
8680 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8681 MonoMethod* helper = mono_marshal_get_stelemref ();
8682 MonoInst *iargs [3];
8684 if (sp [0]->type != STACK_OBJ)
8686 if (sp [2]->type != STACK_OBJ)
8693 mono_emit_method_call (cfg, helper, iargs, NULL);
8695 if (sp [1]->opcode == OP_ICONST) {
8696 int array_reg = sp [0]->dreg;
8697 int index_reg = sp [1]->dreg;
8698 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8700 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8701 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8703 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8704 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8708 if (*ip == CEE_STELEM)
8715 case CEE_CKFINITE: {
8719 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8720 ins->sreg1 = sp [0]->dreg;
8721 ins->dreg = alloc_freg (cfg);
8722 ins->type = STACK_R8;
8723 MONO_ADD_INS (bblock, ins);
8725 *sp++ = mono_decompose_opcode (cfg, ins);
8730 case CEE_REFANYVAL: {
8731 MonoInst *src_var, *src;
8733 int klass_reg = alloc_preg (cfg);
8734 int dreg = alloc_preg (cfg);
8737 MONO_INST_NEW (cfg, ins, *ip);
8740 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8741 CHECK_TYPELOAD (klass);
8742 mono_class_init (klass);
8744 if (cfg->generic_sharing_context)
8745 context_used = mono_class_check_context_used (klass);
8748 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8750 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8751 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8752 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8755 MonoInst *klass_ins;
8757 klass_ins = emit_get_rgctx_klass (cfg, context_used,
8758 klass, MONO_RGCTX_INFO_KLASS);
8761 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8762 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8764 mini_emit_class_check (cfg, klass_reg, klass);
8766 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8767 ins->type = STACK_MP;
8772 case CEE_MKREFANY: {
8773 MonoInst *loc, *addr;
8776 MONO_INST_NEW (cfg, ins, *ip);
8779 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8780 CHECK_TYPELOAD (klass);
8781 mono_class_init (klass);
8783 if (cfg->generic_sharing_context)
8784 context_used = mono_class_check_context_used (klass);
8786 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8787 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8790 MonoInst *const_ins;
8791 int type_reg = alloc_preg (cfg);
8793 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
8794 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8795 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8796 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8797 } else if (cfg->compile_aot) {
8798 int const_reg = alloc_preg (cfg);
8799 int type_reg = alloc_preg (cfg);
8801 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8802 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8803 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8804 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8806 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8807 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8809 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8811 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8812 ins->type = STACK_VTYPE;
8813 ins->klass = mono_defaults.typed_reference_class;
8820 MonoClass *handle_class;
8822 CHECK_STACK_OVF (1);
8825 n = read32 (ip + 1);
8827 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
8828 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8829 handle = mono_method_get_wrapper_data (method, n);
8830 handle_class = mono_method_get_wrapper_data (method, n + 1);
8831 if (handle_class == mono_defaults.typehandle_class)
8832 handle = &((MonoClass*)handle)->byval_arg;
8835 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8839 mono_class_init (handle_class);
8840 if (cfg->generic_sharing_context) {
8841 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
8842 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
8843 /* This case handles ldtoken
8844 of an open type, like for
8847 } else if (handle_class == mono_defaults.typehandle_class) {
8848 /* If we get a MONO_TYPE_CLASS
8849 then we need to provide the
8851 instantiation of it. */
8852 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8855 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8856 } else if (handle_class == mono_defaults.fieldhandle_class)
8857 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8858 else if (handle_class == mono_defaults.methodhandle_class)
8859 context_used = mono_method_check_context_used (handle);
8861 g_assert_not_reached ();
8864 if ((cfg->opt & MONO_OPT_SHARED) &&
8865 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
8866 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
8867 MonoInst *addr, *vtvar, *iargs [3];
8868 int method_context_used;
8870 if (cfg->generic_sharing_context)
8871 method_context_used = mono_method_check_context_used (method);
8873 method_context_used = 0;
8875 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8877 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8878 EMIT_NEW_ICONST (cfg, iargs [1], n);
8879 if (method_context_used) {
8880 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
8881 method, MONO_RGCTX_INFO_METHOD);
8882 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
8884 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
8885 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
8887 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8889 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8891 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8893 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
8894 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
8895 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
8896 (cmethod->klass == mono_defaults.monotype_class->parent) &&
8897 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
8898 MonoClass *tclass = mono_class_from_mono_type (handle);
8900 mono_class_init (tclass);
8902 ins = emit_get_rgctx_klass (cfg, context_used,
8903 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
8904 } else if (cfg->compile_aot) {
8905 if (method->wrapper_type) {
8906 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
8907 /* Special case for static synchronized wrappers */
8908 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
8910 /* FIXME: n is not a normal token */
8911 cfg->disable_aot = TRUE;
8912 EMIT_NEW_PCONST (cfg, ins, NULL);
8915 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
8918 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
8920 ins->type = STACK_OBJ;
8921 ins->klass = cmethod->klass;
8924 MonoInst *addr, *vtvar;
8926 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8929 if (handle_class == mono_defaults.typehandle_class) {
8930 ins = emit_get_rgctx_klass (cfg, context_used,
8931 mono_class_from_mono_type (handle),
8932 MONO_RGCTX_INFO_TYPE);
8933 } else if (handle_class == mono_defaults.methodhandle_class) {
8934 ins = emit_get_rgctx_method (cfg, context_used,
8935 handle, MONO_RGCTX_INFO_METHOD);
8936 } else if (handle_class == mono_defaults.fieldhandle_class) {
8937 ins = emit_get_rgctx_field (cfg, context_used,
8938 handle, MONO_RGCTX_INFO_CLASS_FIELD);
8940 g_assert_not_reached ();
8942 } else if (cfg->compile_aot) {
8943 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
8945 EMIT_NEW_PCONST (cfg, ins, handle);
8947 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8948 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8949 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8959 MONO_INST_NEW (cfg, ins, OP_THROW);
8961 ins->sreg1 = sp [0]->dreg;
8963 bblock->out_of_line = TRUE;
8964 MONO_ADD_INS (bblock, ins);
8965 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
8966 MONO_ADD_INS (bblock, ins);
8969 link_bblock (cfg, bblock, end_bblock);
8970 start_new_bblock = 1;
8972 case CEE_ENDFINALLY:
8973 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
8974 MONO_ADD_INS (bblock, ins);
8976 start_new_bblock = 1;
8979 * Control will leave the method so empty the stack, otherwise
8980 * the next basic block will start with a nonempty stack.
8982 while (sp != stack_start) {
8990 if (*ip == CEE_LEAVE) {
8992 target = ip + 5 + (gint32)read32(ip + 1);
8995 target = ip + 2 + (signed char)(ip [1]);
8998 /* empty the stack */
8999 while (sp != stack_start) {
9004 * If this leave statement is in a catch block, check for a
9005 * pending exception, and rethrow it if necessary.
9006 * We avoid doing this in runtime invoke wrappers, since those are called
9007 * by native code which excepts the wrapper to catch all exceptions.
9009 for (i = 0; i < header->num_clauses; ++i) {
9010 MonoExceptionClause *clause = &header->clauses [i];
9013 * Use <= in the final comparison to handle clauses with multiple
9014 * leave statements, like in bug #78024.
9015 * The ordering of the exception clauses guarantees that we find the
9018 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
9020 MonoBasicBlock *dont_throw;
9025 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
9028 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
9030 NEW_BBLOCK (cfg, dont_throw);
9033 * Currently, we allways rethrow the abort exception, despite the
9034 * fact that this is not correct. See thread6.cs for an example.
9035 * But propagating the abort exception is more important than
9036 * getting the sematics right.
9038 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
9039 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
9040 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
9042 MONO_START_BB (cfg, dont_throw);
9047 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
9049 for (tmp = handlers; tmp; tmp = tmp->next) {
9051 link_bblock (cfg, bblock, tblock);
9052 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
9053 ins->inst_target_bb = tblock;
9054 MONO_ADD_INS (bblock, ins);
9055 bblock->has_call_handler = 1;
9057 g_list_free (handlers);
9060 MONO_INST_NEW (cfg, ins, OP_BR);
9061 MONO_ADD_INS (bblock, ins);
9062 GET_BBLOCK (cfg, tblock, target);
9063 link_bblock (cfg, bblock, tblock);
9064 ins->inst_target_bb = tblock;
9065 start_new_bblock = 1;
9067 if (*ip == CEE_LEAVE)
9076 * Mono specific opcodes
9078 case MONO_CUSTOM_PREFIX: {
9080 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
9084 case CEE_MONO_ICALL: {
9086 MonoJitICallInfo *info;
9088 token = read32 (ip + 2);
9089 func = mono_method_get_wrapper_data (method, token);
9090 info = mono_find_jit_icall_by_addr (func);
9093 CHECK_STACK (info->sig->param_count);
9094 sp -= info->sig->param_count;
9096 ins = mono_emit_jit_icall (cfg, info->func, sp);
9097 if (!MONO_TYPE_IS_VOID (info->sig->ret))
9101 inline_costs += 10 * num_calls++;
9105 case CEE_MONO_LDPTR: {
9108 CHECK_STACK_OVF (1);
9110 token = read32 (ip + 2);
9112 ptr = mono_method_get_wrapper_data (method, token);
9113 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
9114 MonoJitICallInfo *callinfo;
9115 const char *icall_name;
9117 icall_name = method->name + strlen ("__icall_wrapper_");
9118 g_assert (icall_name);
9119 callinfo = mono_find_jit_icall_by_name (icall_name);
9120 g_assert (callinfo);
9122 if (ptr == callinfo->func) {
9123 /* Will be transformed into an AOTCONST later */
9124 EMIT_NEW_PCONST (cfg, ins, ptr);
9130 /* FIXME: Generalize this */
9131 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
9132 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
9137 EMIT_NEW_PCONST (cfg, ins, ptr);
9140 inline_costs += 10 * num_calls++;
9141 /* Can't embed random pointers into AOT code */
9142 cfg->disable_aot = 1;
9145 case CEE_MONO_ICALL_ADDR: {
9146 MonoMethod *cmethod;
9149 CHECK_STACK_OVF (1);
9151 token = read32 (ip + 2);
9153 cmethod = mono_method_get_wrapper_data (method, token);
9155 if (cfg->compile_aot) {
9156 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
9158 ptr = mono_lookup_internal_call (cmethod);
9160 EMIT_NEW_PCONST (cfg, ins, ptr);
9166 case CEE_MONO_VTADDR: {
9167 MonoInst *src_var, *src;
9173 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9174 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
9179 case CEE_MONO_NEWOBJ: {
9180 MonoInst *iargs [2];
9182 CHECK_STACK_OVF (1);
9184 token = read32 (ip + 2);
9185 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9186 mono_class_init (klass);
9187 NEW_DOMAINCONST (cfg, iargs [0]);
9188 MONO_ADD_INS (cfg->cbb, iargs [0]);
9189 NEW_CLASSCONST (cfg, iargs [1], klass);
9190 MONO_ADD_INS (cfg->cbb, iargs [1]);
9191 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
9193 inline_costs += 10 * num_calls++;
9196 case CEE_MONO_OBJADDR:
9199 MONO_INST_NEW (cfg, ins, OP_MOVE);
9200 ins->dreg = alloc_preg (cfg);
9201 ins->sreg1 = sp [0]->dreg;
9202 ins->type = STACK_MP;
9203 MONO_ADD_INS (cfg->cbb, ins);
9207 case CEE_MONO_LDNATIVEOBJ:
9209 * Similar to LDOBJ, but instead load the unmanaged
9210 * representation of the vtype to the stack.
9215 token = read32 (ip + 2);
9216 klass = mono_method_get_wrapper_data (method, token);
9217 g_assert (klass->valuetype);
9218 mono_class_init (klass);
9221 MonoInst *src, *dest, *temp;
9224 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
9225 temp->backend.is_pinvoke = 1;
9226 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
9227 mini_emit_stobj (cfg, dest, src, klass, TRUE);
9229 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
9230 dest->type = STACK_VTYPE;
9231 dest->klass = klass;
9237 case CEE_MONO_RETOBJ: {
9239 * Same as RET, but return the native representation of a vtype
9242 g_assert (cfg->ret);
9243 g_assert (mono_method_signature (method)->pinvoke);
9248 token = read32 (ip + 2);
9249 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9251 if (!cfg->vret_addr) {
9252 g_assert (cfg->ret_var_is_local);
9254 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
9256 EMIT_NEW_RETLOADA (cfg, ins);
9258 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
9260 if (sp != stack_start)
9263 MONO_INST_NEW (cfg, ins, OP_BR);
9264 ins->inst_target_bb = end_bblock;
9265 MONO_ADD_INS (bblock, ins);
9266 link_bblock (cfg, bblock, end_bblock);
9267 start_new_bblock = 1;
9271 case CEE_MONO_CISINST:
9272 case CEE_MONO_CCASTCLASS: {
9277 token = read32 (ip + 2);
9278 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9279 if (ip [1] == CEE_MONO_CISINST)
9280 ins = handle_cisinst (cfg, klass, sp [0]);
9282 ins = handle_ccastclass (cfg, klass, sp [0]);
9288 case CEE_MONO_SAVE_LMF:
9289 case CEE_MONO_RESTORE_LMF:
9290 #ifdef MONO_ARCH_HAVE_LMF_OPS
9291 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9292 MONO_ADD_INS (bblock, ins);
9293 cfg->need_lmf_area = TRUE;
9297 case CEE_MONO_CLASSCONST:
9298 CHECK_STACK_OVF (1);
9300 token = read32 (ip + 2);
9301 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9304 inline_costs += 10 * num_calls++;
9306 case CEE_MONO_NOT_TAKEN:
9307 bblock->out_of_line = TRUE;
9311 CHECK_STACK_OVF (1);
9313 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9314 ins->dreg = alloc_preg (cfg);
9315 ins->inst_offset = (gint32)read32 (ip + 2);
9316 ins->type = STACK_PTR;
9317 MONO_ADD_INS (bblock, ins);
9321 case CEE_MONO_DYN_CALL: {
9324 /* It would be easier to call a trampoline, but that would put an
9325 * extra frame on the stack, confusing exception handling. So
9326 * implement it inline using an opcode for now.
9329 if (!cfg->dyn_call_var) {
9330 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9331 /* prevent it from being register allocated */
9332 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
9335 /* Has to use a call inst since it local regalloc expects it */
9336 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
9337 ins = (MonoInst*)call;
9339 ins->sreg1 = sp [0]->dreg;
9340 ins->sreg2 = sp [1]->dreg;
9341 MONO_ADD_INS (bblock, ins);
9343 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
9344 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
9348 inline_costs += 10 * num_calls++;
9353 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9363 /* somewhat similar to LDTOKEN */
9364 MonoInst *addr, *vtvar;
9365 CHECK_STACK_OVF (1);
9366 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9368 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9369 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9371 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9372 ins->type = STACK_VTYPE;
9373 ins->klass = mono_defaults.argumenthandle_class;
9386 * The following transforms:
9387 * CEE_CEQ into OP_CEQ
9388 * CEE_CGT into OP_CGT
9389 * CEE_CGT_UN into OP_CGT_UN
9390 * CEE_CLT into OP_CLT
9391 * CEE_CLT_UN into OP_CLT_UN
9393 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9395 MONO_INST_NEW (cfg, ins, cmp->opcode);
9397 cmp->sreg1 = sp [0]->dreg;
9398 cmp->sreg2 = sp [1]->dreg;
9399 type_from_op (cmp, sp [0], sp [1]);
9401 if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9402 cmp->opcode = OP_LCOMPARE;
9403 else if (sp [0]->type == STACK_R8)
9404 cmp->opcode = OP_FCOMPARE;
9406 cmp->opcode = OP_ICOMPARE;
9407 MONO_ADD_INS (bblock, cmp);
9408 ins->type = STACK_I4;
9409 ins->dreg = alloc_dreg (cfg, ins->type);
9410 type_from_op (ins, sp [0], sp [1]);
9412 if (cmp->opcode == OP_FCOMPARE) {
9414 * The backends expect the fceq opcodes to do the
9417 cmp->opcode = OP_NOP;
9418 ins->sreg1 = cmp->sreg1;
9419 ins->sreg2 = cmp->sreg2;
9421 MONO_ADD_INS (bblock, ins);
9428 MonoMethod *cil_method;
9429 gboolean needs_static_rgctx_invoke;
9431 CHECK_STACK_OVF (1);
9433 n = read32 (ip + 2);
9434 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9437 mono_class_init (cmethod->klass);
9439 mono_save_token_info (cfg, image, n, cmethod);
9441 if (cfg->generic_sharing_context)
9442 context_used = mono_method_check_context_used (cmethod);
9444 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9446 cil_method = cmethod;
9447 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9448 METHOD_ACCESS_FAILURE;
9450 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9451 if (check_linkdemand (cfg, method, cmethod))
9453 CHECK_CFG_EXCEPTION;
9454 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9455 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9459 * Optimize the common case of ldftn+delegate creation
9461 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
9462 /* FIXME: SGEN support */
9463 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9464 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9465 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9466 MonoInst *target_ins;
9469 invoke = mono_get_delegate_invoke (ctor_method->klass);
9470 if (!invoke || !mono_method_signature (invoke))
9474 if (cfg->verbose_level > 3)
9475 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9476 target_ins = sp [-1];
9478 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
9479 CHECK_CFG_EXCEPTION;
9487 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
9488 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
9492 inline_costs += 10 * num_calls++;
9495 case CEE_LDVIRTFTN: {
9500 n = read32 (ip + 2);
9501 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9504 mono_class_init (cmethod->klass);
9506 if (cfg->generic_sharing_context)
9507 context_used = mono_method_check_context_used (cmethod);
9509 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9510 if (check_linkdemand (cfg, method, cmethod))
9512 CHECK_CFG_EXCEPTION;
9513 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9514 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9520 args [1] = emit_get_rgctx_method (cfg, context_used,
9521 cmethod, MONO_RGCTX_INFO_METHOD);
9524 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
9526 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
9529 inline_costs += 10 * num_calls++;
9533 CHECK_STACK_OVF (1);
9535 n = read16 (ip + 2);
9537 EMIT_NEW_ARGLOAD (cfg, ins, n);
9542 CHECK_STACK_OVF (1);
9544 n = read16 (ip + 2);
9546 NEW_ARGLOADA (cfg, ins, n);
9547 MONO_ADD_INS (cfg->cbb, ins);
9555 n = read16 (ip + 2);
9557 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9559 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9563 CHECK_STACK_OVF (1);
9565 n = read16 (ip + 2);
9567 EMIT_NEW_LOCLOAD (cfg, ins, n);
9572 unsigned char *tmp_ip;
9573 CHECK_STACK_OVF (1);
9575 n = read16 (ip + 2);
9578 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9584 EMIT_NEW_LOCLOADA (cfg, ins, n);
9593 n = read16 (ip + 2);
9595 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9597 emit_stloc_ir (cfg, sp, header, n);
9604 if (sp != stack_start)
9606 if (cfg->method != method)
9608 * Inlining this into a loop in a parent could lead to
9609 * stack overflows which is different behavior than the
9610 * non-inlined case, thus disable inlining in this case.
9612 goto inline_failure;
9614 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9615 ins->dreg = alloc_preg (cfg);
9616 ins->sreg1 = sp [0]->dreg;
9617 ins->type = STACK_PTR;
9618 MONO_ADD_INS (cfg->cbb, ins);
9620 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9622 ins->flags |= MONO_INST_INIT;
9627 case CEE_ENDFILTER: {
9628 MonoExceptionClause *clause, *nearest;
9629 int cc, nearest_num;
9633 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9635 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9636 ins->sreg1 = (*sp)->dreg;
9637 MONO_ADD_INS (bblock, ins);
9638 start_new_bblock = 1;
9643 for (cc = 0; cc < header->num_clauses; ++cc) {
9644 clause = &header->clauses [cc];
9645 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9646 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9647 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9653 if ((ip - header->code) != nearest->handler_offset)
9658 case CEE_UNALIGNED_:
9659 ins_flag |= MONO_INST_UNALIGNED;
9660 /* FIXME: record alignment? we can assume 1 for now */
9665 ins_flag |= MONO_INST_VOLATILE;
9669 ins_flag |= MONO_INST_TAILCALL;
9670 cfg->flags |= MONO_CFG_HAS_TAIL;
9671 /* Can't inline tail calls at this time */
9672 inline_costs += 100000;
9679 token = read32 (ip + 2);
9680 klass = mini_get_class (method, token, generic_context);
9681 CHECK_TYPELOAD (klass);
9682 if (generic_class_is_reference_type (cfg, klass))
9683 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9685 mini_emit_initobj (cfg, *sp, NULL, klass);
9689 case CEE_CONSTRAINED_:
9691 token = read32 (ip + 2);
9692 if (method->wrapper_type != MONO_WRAPPER_NONE)
9693 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
9695 constrained_call = mono_class_get_full (image, token, generic_context);
9696 CHECK_TYPELOAD (constrained_call);
9701 MonoInst *iargs [3];
9705 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9706 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9707 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9708 /* emit_memset only works when val == 0 */
9709 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9714 if (ip [1] == CEE_CPBLK) {
9715 MonoMethod *memcpy_method = get_memcpy_method ();
9716 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9718 MonoMethod *memset_method = get_memset_method ();
9719 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9729 ins_flag |= MONO_INST_NOTYPECHECK;
9731 ins_flag |= MONO_INST_NORANGECHECK;
9732 /* we ignore the no-nullcheck for now since we
9733 * really do it explicitly only when doing callvirt->call
9739 int handler_offset = -1;
9741 for (i = 0; i < header->num_clauses; ++i) {
9742 MonoExceptionClause *clause = &header->clauses [i];
9743 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9744 handler_offset = clause->handler_offset;
9749 bblock->flags |= BB_EXCEPTION_UNSAFE;
9751 g_assert (handler_offset != -1);
9753 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9754 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9755 ins->sreg1 = load->dreg;
9756 MONO_ADD_INS (bblock, ins);
9758 link_bblock (cfg, bblock, end_bblock);
9759 start_new_bblock = 1;
9767 CHECK_STACK_OVF (1);
9769 token = read32 (ip + 2);
9770 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC) {
9771 MonoType *type = mono_type_create_from_typespec (image, token);
9772 token = mono_type_size (type, &ialign);
9774 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9775 CHECK_TYPELOAD (klass);
9776 mono_class_init (klass);
9777 token = mono_class_value_size (klass, &align);
9779 EMIT_NEW_ICONST (cfg, ins, token);
9784 case CEE_REFANYTYPE: {
9785 MonoInst *src_var, *src;
9791 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9793 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9794 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9795 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9805 g_error ("opcode 0xfe 0x%02x not handled", ip [1]);
9810 g_error ("opcode 0x%02x not handled", *ip);
9813 if (start_new_bblock != 1)
9816 bblock->cil_length = ip - bblock->cil_code;
9817 bblock->next_bb = end_bblock;
9819 if (cfg->method == method && cfg->domainvar) {
9821 MonoInst *get_domain;
9823 cfg->cbb = init_localsbb;
9825 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9826 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9829 get_domain->dreg = alloc_preg (cfg);
9830 MONO_ADD_INS (cfg->cbb, get_domain);
9832 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9833 MONO_ADD_INS (cfg->cbb, store);
9836 #ifdef TARGET_POWERPC
9837 if (cfg->compile_aot)
9838 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
9839 mono_get_got_var (cfg);
9842 if (cfg->method == method && cfg->got_var)
9843 mono_emit_load_got_addr (cfg);
9848 cfg->cbb = init_localsbb;
9850 for (i = 0; i < header->num_locals; ++i) {
9851 MonoType *ptype = header->locals [i];
9852 int t = ptype->type;
9853 dreg = cfg->locals [i]->dreg;
9855 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
9856 t = mono_class_enum_basetype (ptype->data.klass)->type;
9858 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9859 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
9860 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
9861 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
9862 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
9863 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
9864 MONO_INST_NEW (cfg, ins, OP_R8CONST);
9865 ins->type = STACK_R8;
9866 ins->inst_p0 = (void*)&r8_0;
9867 ins->dreg = alloc_dreg (cfg, STACK_R8);
9868 MONO_ADD_INS (init_localsbb, ins);
9869 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
9870 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
9871 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
9872 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
9874 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9879 /* Add a sequence point for method entry/exit events */
9881 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
9882 MONO_ADD_INS (init_localsbb, ins);
9883 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
9884 MONO_ADD_INS (cfg->bb_exit, ins);
9889 if (cfg->method == method) {
9891 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9892 bb->region = mono_find_block_region (cfg, bb->real_offset);
9894 mono_create_spvar_for_region (cfg, bb->region);
9895 if (cfg->verbose_level > 2)
9896 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
9900 g_slist_free (class_inits);
9901 dont_inline = g_list_remove (dont_inline, method);
9903 if (inline_costs < 0) {
9906 /* Method is too large */
9907 mname = mono_method_full_name (method, TRUE);
9908 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
9909 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
9914 if ((cfg->verbose_level > 2) && (cfg->method == method))
9915 mono_print_code (cfg, "AFTER METHOD-TO-IR");
9917 return inline_costs;
9920 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
9921 g_slist_free (class_inits);
9922 dont_inline = g_list_remove (dont_inline, method);
9926 g_slist_free (class_inits);
9927 dont_inline = g_list_remove (dont_inline, method);
9931 g_slist_free (class_inits);
9932 dont_inline = g_list_remove (dont_inline, method);
9933 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
9937 g_slist_free (class_inits);
9938 dont_inline = g_list_remove (dont_inline, method);
9939 set_exception_type_from_invalid_il (cfg, method, ip);
9944 store_membase_reg_to_store_membase_imm (int opcode)
9947 case OP_STORE_MEMBASE_REG:
9948 return OP_STORE_MEMBASE_IMM;
9949 case OP_STOREI1_MEMBASE_REG:
9950 return OP_STOREI1_MEMBASE_IMM;
9951 case OP_STOREI2_MEMBASE_REG:
9952 return OP_STOREI2_MEMBASE_IMM;
9953 case OP_STOREI4_MEMBASE_REG:
9954 return OP_STOREI4_MEMBASE_IMM;
9955 case OP_STOREI8_MEMBASE_REG:
9956 return OP_STOREI8_MEMBASE_IMM;
9958 g_assert_not_reached ();
9964 #endif /* DISABLE_JIT */
9967 mono_op_to_op_imm (int opcode)
9977 return OP_IDIV_UN_IMM;
9981 return OP_IREM_UN_IMM;
9995 return OP_ISHR_UN_IMM;
10000 return OP_LSUB_IMM;
10002 return OP_LAND_IMM;
10006 return OP_LXOR_IMM;
10008 return OP_LSHL_IMM;
10010 return OP_LSHR_IMM;
10012 return OP_LSHR_UN_IMM;
10015 return OP_COMPARE_IMM;
10017 return OP_ICOMPARE_IMM;
10019 return OP_LCOMPARE_IMM;
10021 case OP_STORE_MEMBASE_REG:
10022 return OP_STORE_MEMBASE_IMM;
10023 case OP_STOREI1_MEMBASE_REG:
10024 return OP_STOREI1_MEMBASE_IMM;
10025 case OP_STOREI2_MEMBASE_REG:
10026 return OP_STOREI2_MEMBASE_IMM;
10027 case OP_STOREI4_MEMBASE_REG:
10028 return OP_STOREI4_MEMBASE_IMM;
10030 #if defined(TARGET_X86) || defined (TARGET_AMD64)
10032 return OP_X86_PUSH_IMM;
10033 case OP_X86_COMPARE_MEMBASE_REG:
10034 return OP_X86_COMPARE_MEMBASE_IMM;
10036 #if defined(TARGET_AMD64)
10037 case OP_AMD64_ICOMPARE_MEMBASE_REG:
10038 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10040 case OP_VOIDCALL_REG:
10041 return OP_VOIDCALL;
10049 return OP_LOCALLOC_IMM;
10056 ldind_to_load_membase (int opcode)
10060 return OP_LOADI1_MEMBASE;
10062 return OP_LOADU1_MEMBASE;
10064 return OP_LOADI2_MEMBASE;
10066 return OP_LOADU2_MEMBASE;
10068 return OP_LOADI4_MEMBASE;
10070 return OP_LOADU4_MEMBASE;
10072 return OP_LOAD_MEMBASE;
10073 case CEE_LDIND_REF:
10074 return OP_LOAD_MEMBASE;
10076 return OP_LOADI8_MEMBASE;
10078 return OP_LOADR4_MEMBASE;
10080 return OP_LOADR8_MEMBASE;
10082 g_assert_not_reached ();
10089 stind_to_store_membase (int opcode)
10093 return OP_STOREI1_MEMBASE_REG;
10095 return OP_STOREI2_MEMBASE_REG;
10097 return OP_STOREI4_MEMBASE_REG;
10099 case CEE_STIND_REF:
10100 return OP_STORE_MEMBASE_REG;
10102 return OP_STOREI8_MEMBASE_REG;
10104 return OP_STORER4_MEMBASE_REG;
10106 return OP_STORER8_MEMBASE_REG;
10108 g_assert_not_reached ();
10115 mono_load_membase_to_load_mem (int opcode)
10117 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
10118 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10120 case OP_LOAD_MEMBASE:
10121 return OP_LOAD_MEM;
10122 case OP_LOADU1_MEMBASE:
10123 return OP_LOADU1_MEM;
10124 case OP_LOADU2_MEMBASE:
10125 return OP_LOADU2_MEM;
10126 case OP_LOADI4_MEMBASE:
10127 return OP_LOADI4_MEM;
10128 case OP_LOADU4_MEMBASE:
10129 return OP_LOADU4_MEM;
10130 #if SIZEOF_REGISTER == 8
10131 case OP_LOADI8_MEMBASE:
10132 return OP_LOADI8_MEM;
10141 op_to_op_dest_membase (int store_opcode, int opcode)
10143 #if defined(TARGET_X86)
10144 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
10149 return OP_X86_ADD_MEMBASE_REG;
10151 return OP_X86_SUB_MEMBASE_REG;
10153 return OP_X86_AND_MEMBASE_REG;
10155 return OP_X86_OR_MEMBASE_REG;
10157 return OP_X86_XOR_MEMBASE_REG;
10160 return OP_X86_ADD_MEMBASE_IMM;
10163 return OP_X86_SUB_MEMBASE_IMM;
10166 return OP_X86_AND_MEMBASE_IMM;
10169 return OP_X86_OR_MEMBASE_IMM;
10172 return OP_X86_XOR_MEMBASE_IMM;
10178 #if defined(TARGET_AMD64)
10179 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
10184 return OP_X86_ADD_MEMBASE_REG;
10186 return OP_X86_SUB_MEMBASE_REG;
10188 return OP_X86_AND_MEMBASE_REG;
10190 return OP_X86_OR_MEMBASE_REG;
10192 return OP_X86_XOR_MEMBASE_REG;
10194 return OP_X86_ADD_MEMBASE_IMM;
10196 return OP_X86_SUB_MEMBASE_IMM;
10198 return OP_X86_AND_MEMBASE_IMM;
10200 return OP_X86_OR_MEMBASE_IMM;
10202 return OP_X86_XOR_MEMBASE_IMM;
10204 return OP_AMD64_ADD_MEMBASE_REG;
10206 return OP_AMD64_SUB_MEMBASE_REG;
10208 return OP_AMD64_AND_MEMBASE_REG;
10210 return OP_AMD64_OR_MEMBASE_REG;
10212 return OP_AMD64_XOR_MEMBASE_REG;
10215 return OP_AMD64_ADD_MEMBASE_IMM;
10218 return OP_AMD64_SUB_MEMBASE_IMM;
10221 return OP_AMD64_AND_MEMBASE_IMM;
10224 return OP_AMD64_OR_MEMBASE_IMM;
10227 return OP_AMD64_XOR_MEMBASE_IMM;
10237 op_to_op_store_membase (int store_opcode, int opcode)
10239 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10242 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10243 return OP_X86_SETEQ_MEMBASE;
10245 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10246 return OP_X86_SETNE_MEMBASE;
10254 op_to_op_src1_membase (int load_opcode, int opcode)
10257 /* FIXME: This has sign extension issues */
10259 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10260 return OP_X86_COMPARE_MEMBASE8_IMM;
10263 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10268 return OP_X86_PUSH_MEMBASE;
10269 case OP_COMPARE_IMM:
10270 case OP_ICOMPARE_IMM:
10271 return OP_X86_COMPARE_MEMBASE_IMM;
10274 return OP_X86_COMPARE_MEMBASE_REG;
10278 #ifdef TARGET_AMD64
10279 /* FIXME: This has sign extension issues */
10281 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10282 return OP_X86_COMPARE_MEMBASE8_IMM;
10287 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10288 return OP_X86_PUSH_MEMBASE;
10290 /* FIXME: This only works for 32 bit immediates
10291 case OP_COMPARE_IMM:
10292 case OP_LCOMPARE_IMM:
10293 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10294 return OP_AMD64_COMPARE_MEMBASE_IMM;
10296 case OP_ICOMPARE_IMM:
10297 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10298 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10302 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10303 return OP_AMD64_COMPARE_MEMBASE_REG;
10306 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10307 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10316 op_to_op_src2_membase (int load_opcode, int opcode)
10319 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10325 return OP_X86_COMPARE_REG_MEMBASE;
10327 return OP_X86_ADD_REG_MEMBASE;
10329 return OP_X86_SUB_REG_MEMBASE;
10331 return OP_X86_AND_REG_MEMBASE;
10333 return OP_X86_OR_REG_MEMBASE;
10335 return OP_X86_XOR_REG_MEMBASE;
10339 #ifdef TARGET_AMD64
10342 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10343 return OP_AMD64_ICOMPARE_REG_MEMBASE;
10347 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10348 return OP_AMD64_COMPARE_REG_MEMBASE;
10351 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10352 return OP_X86_ADD_REG_MEMBASE;
10354 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10355 return OP_X86_SUB_REG_MEMBASE;
10357 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10358 return OP_X86_AND_REG_MEMBASE;
10360 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10361 return OP_X86_OR_REG_MEMBASE;
10363 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10364 return OP_X86_XOR_REG_MEMBASE;
10366 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10367 return OP_AMD64_ADD_REG_MEMBASE;
10369 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10370 return OP_AMD64_SUB_REG_MEMBASE;
10372 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10373 return OP_AMD64_AND_REG_MEMBASE;
10375 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10376 return OP_AMD64_OR_REG_MEMBASE;
10378 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10379 return OP_AMD64_XOR_REG_MEMBASE;
10387 mono_op_to_op_imm_noemul (int opcode)
10390 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10395 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10403 return mono_op_to_op_imm (opcode);
10407 #ifndef DISABLE_JIT
10410 * mono_handle_global_vregs:
10412 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10416 mono_handle_global_vregs (MonoCompile *cfg)
10418 gint32 *vreg_to_bb;
10419 MonoBasicBlock *bb;
10422 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
10424 #ifdef MONO_ARCH_SIMD_INTRINSICS
10425 if (cfg->uses_simd_intrinsics)
10426 mono_simd_simplify_indirection (cfg);
10429 /* Find local vregs used in more than one bb */
10430 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10431 MonoInst *ins = bb->code;
10432 int block_num = bb->block_num;
10434 if (cfg->verbose_level > 2)
10435 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
10438 for (; ins; ins = ins->next) {
10439 const char *spec = INS_INFO (ins->opcode);
10440 int regtype = 0, regindex;
10443 if (G_UNLIKELY (cfg->verbose_level > 2))
10444 mono_print_ins (ins);
10446 g_assert (ins->opcode >= MONO_CEE_LAST);
10448 for (regindex = 0; regindex < 4; regindex ++) {
10451 if (regindex == 0) {
10452 regtype = spec [MONO_INST_DEST];
10453 if (regtype == ' ')
10456 } else if (regindex == 1) {
10457 regtype = spec [MONO_INST_SRC1];
10458 if (regtype == ' ')
10461 } else if (regindex == 2) {
10462 regtype = spec [MONO_INST_SRC2];
10463 if (regtype == ' ')
10466 } else if (regindex == 3) {
10467 regtype = spec [MONO_INST_SRC3];
10468 if (regtype == ' ')
10473 #if SIZEOF_REGISTER == 4
10474 /* In the LLVM case, the long opcodes are not decomposed */
10475 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
10477 * Since some instructions reference the original long vreg,
10478 * and some reference the two component vregs, it is quite hard
10479 * to determine when it needs to be global. So be conservative.
10481 if (!get_vreg_to_inst (cfg, vreg)) {
10482 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10484 if (cfg->verbose_level > 2)
10485 printf ("LONG VREG R%d made global.\n", vreg);
10489 * Make the component vregs volatile since the optimizations can
10490 * get confused otherwise.
10492 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
10493 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
10497 g_assert (vreg != -1);
10499 prev_bb = vreg_to_bb [vreg];
10500 if (prev_bb == 0) {
10501 /* 0 is a valid block num */
10502 vreg_to_bb [vreg] = block_num + 1;
10503 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
10504 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
10507 if (!get_vreg_to_inst (cfg, vreg)) {
10508 if (G_UNLIKELY (cfg->verbose_level > 2))
10509 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
10513 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
10516 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10519 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
10522 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
10525 g_assert_not_reached ();
10529 /* Flag as having been used in more than one bb */
10530 vreg_to_bb [vreg] = -1;
10536 /* If a variable is used in only one bblock, convert it into a local vreg */
10537 for (i = 0; i < cfg->num_varinfo; i++) {
10538 MonoInst *var = cfg->varinfo [i];
10539 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
10541 switch (var->type) {
10547 #if SIZEOF_REGISTER == 8
10550 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
10551 /* Enabling this screws up the fp stack on x86 */
10554 /* Arguments are implicitly global */
10555 /* Putting R4 vars into registers doesn't work currently */
10556 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
10558 * Make that the variable's liveness interval doesn't contain a call, since
10559 * that would cause the lvreg to be spilled, making the whole optimization
10562 /* This is too slow for JIT compilation */
10564 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
10566 int def_index, call_index, ins_index;
10567 gboolean spilled = FALSE;
10572 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
10573 const char *spec = INS_INFO (ins->opcode);
10575 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
10576 def_index = ins_index;
10578 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
10579 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
10580 if (call_index > def_index) {
10586 if (MONO_IS_CALL (ins))
10587 call_index = ins_index;
10597 if (G_UNLIKELY (cfg->verbose_level > 2))
10598 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10599 var->flags |= MONO_INST_IS_DEAD;
10600 cfg->vreg_to_inst [var->dreg] = NULL;
10607 * Compress the varinfo and vars tables so the liveness computation is faster and
10608 * takes up less space.
10611 for (i = 0; i < cfg->num_varinfo; ++i) {
10612 MonoInst *var = cfg->varinfo [i];
10613 if (pos < i && cfg->locals_start == i)
10614 cfg->locals_start = pos;
10615 if (!(var->flags & MONO_INST_IS_DEAD)) {
10617 cfg->varinfo [pos] = cfg->varinfo [i];
10618 cfg->varinfo [pos]->inst_c0 = pos;
10619 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10620 cfg->vars [pos].idx = pos;
10621 #if SIZEOF_REGISTER == 4
10622 if (cfg->varinfo [pos]->type == STACK_I8) {
10623 /* Modify the two component vars too */
10626 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10627 var1->inst_c0 = pos;
10628 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10629 var1->inst_c0 = pos;
10636 cfg->num_varinfo = pos;
10637 if (cfg->locals_start > cfg->num_varinfo)
10638 cfg->locals_start = cfg->num_varinfo;
10642 * mono_spill_global_vars:
10644 * Generate spill code for variables which are not allocated to registers,
10645 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10646 * code is generated which could be optimized by the local optimization passes.
10649 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10651 MonoBasicBlock *bb;
10653 int orig_next_vreg;
10654 guint32 *vreg_to_lvreg;
10656 guint32 i, lvregs_len;
10657 gboolean dest_has_lvreg = FALSE;
10658 guint32 stacktypes [128];
10659 MonoInst **live_range_start, **live_range_end;
10660 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
10662 *need_local_opts = FALSE;
10664 memset (spec2, 0, sizeof (spec2));
10666 /* FIXME: Move this function to mini.c */
10667 stacktypes ['i'] = STACK_PTR;
10668 stacktypes ['l'] = STACK_I8;
10669 stacktypes ['f'] = STACK_R8;
10670 #ifdef MONO_ARCH_SIMD_INTRINSICS
10671 stacktypes ['x'] = STACK_VTYPE;
10674 #if SIZEOF_REGISTER == 4
10675 /* Create MonoInsts for longs */
10676 for (i = 0; i < cfg->num_varinfo; i++) {
10677 MonoInst *ins = cfg->varinfo [i];
10679 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10680 switch (ins->type) {
10681 #ifdef MONO_ARCH_SOFT_FLOAT
10687 g_assert (ins->opcode == OP_REGOFFSET);
10689 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10691 tree->opcode = OP_REGOFFSET;
10692 tree->inst_basereg = ins->inst_basereg;
10693 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10695 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10697 tree->opcode = OP_REGOFFSET;
10698 tree->inst_basereg = ins->inst_basereg;
10699 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10709 /* FIXME: widening and truncation */
10712 * As an optimization, when a variable allocated to the stack is first loaded into
10713 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10714 * the variable again.
10716 orig_next_vreg = cfg->next_vreg;
10717 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10718 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10722 * These arrays contain the first and last instructions accessing a given
10724 * Since we emit bblocks in the same order we process them here, and we
10725 * don't split live ranges, these will precisely describe the live range of
10726 * the variable, i.e. the instruction range where a valid value can be found
10727 * in the variables location.
10729 /* FIXME: Only do this if debugging info is requested */
10730 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
10731 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
10732 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10733 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10735 /* Add spill loads/stores */
10736 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10739 if (cfg->verbose_level > 2)
10740 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10742 /* Clear vreg_to_lvreg array */
10743 for (i = 0; i < lvregs_len; i++)
10744 vreg_to_lvreg [lvregs [i]] = 0;
10748 MONO_BB_FOR_EACH_INS (bb, ins) {
10749 const char *spec = INS_INFO (ins->opcode);
10750 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
10751 gboolean store, no_lvreg;
10752 int sregs [MONO_MAX_SRC_REGS];
10754 if (G_UNLIKELY (cfg->verbose_level > 2))
10755 mono_print_ins (ins);
10757 if (ins->opcode == OP_NOP)
10761 * We handle LDADDR here as well, since it can only be decomposed
10762 * when variable addresses are known.
10764 if (ins->opcode == OP_LDADDR) {
10765 MonoInst *var = ins->inst_p0;
10767 if (var->opcode == OP_VTARG_ADDR) {
10768 /* Happens on SPARC/S390 where vtypes are passed by reference */
10769 MonoInst *vtaddr = var->inst_left;
10770 if (vtaddr->opcode == OP_REGVAR) {
10771 ins->opcode = OP_MOVE;
10772 ins->sreg1 = vtaddr->dreg;
10774 else if (var->inst_left->opcode == OP_REGOFFSET) {
10775 ins->opcode = OP_LOAD_MEMBASE;
10776 ins->inst_basereg = vtaddr->inst_basereg;
10777 ins->inst_offset = vtaddr->inst_offset;
10781 g_assert (var->opcode == OP_REGOFFSET);
10783 ins->opcode = OP_ADD_IMM;
10784 ins->sreg1 = var->inst_basereg;
10785 ins->inst_imm = var->inst_offset;
10788 *need_local_opts = TRUE;
10789 spec = INS_INFO (ins->opcode);
10792 if (ins->opcode < MONO_CEE_LAST) {
10793 mono_print_ins (ins);
10794 g_assert_not_reached ();
10798 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10802 if (MONO_IS_STORE_MEMBASE (ins)) {
10803 tmp_reg = ins->dreg;
10804 ins->dreg = ins->sreg2;
10805 ins->sreg2 = tmp_reg;
10808 spec2 [MONO_INST_DEST] = ' ';
10809 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10810 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10811 spec2 [MONO_INST_SRC3] = ' ';
10813 } else if (MONO_IS_STORE_MEMINDEX (ins))
10814 g_assert_not_reached ();
10819 if (G_UNLIKELY (cfg->verbose_level > 2)) {
10820 printf ("\t %.3s %d", spec, ins->dreg);
10821 num_sregs = mono_inst_get_src_registers (ins, sregs);
10822 for (srcindex = 0; srcindex < 3; ++srcindex)
10823 printf (" %d", sregs [srcindex]);
10830 regtype = spec [MONO_INST_DEST];
10831 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
10834 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
10835 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
10836 MonoInst *store_ins;
10838 MonoInst *def_ins = ins;
10839 int dreg = ins->dreg; /* The original vreg */
10841 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
10843 if (var->opcode == OP_REGVAR) {
10844 ins->dreg = var->dreg;
10845 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
10847 * Instead of emitting a load+store, use a _membase opcode.
10849 g_assert (var->opcode == OP_REGOFFSET);
10850 if (ins->opcode == OP_MOVE) {
10854 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
10855 ins->inst_basereg = var->inst_basereg;
10856 ins->inst_offset = var->inst_offset;
10859 spec = INS_INFO (ins->opcode);
10863 g_assert (var->opcode == OP_REGOFFSET);
10865 prev_dreg = ins->dreg;
10867 /* Invalidate any previous lvreg for this vreg */
10868 vreg_to_lvreg [ins->dreg] = 0;
10872 #ifdef MONO_ARCH_SOFT_FLOAT
10873 if (store_opcode == OP_STORER8_MEMBASE_REG) {
10875 store_opcode = OP_STOREI8_MEMBASE_REG;
10879 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
10881 if (regtype == 'l') {
10882 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
10883 mono_bblock_insert_after_ins (bb, ins, store_ins);
10884 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
10885 mono_bblock_insert_after_ins (bb, ins, store_ins);
10886 def_ins = store_ins;
10889 g_assert (store_opcode != OP_STOREV_MEMBASE);
10891 /* Try to fuse the store into the instruction itself */
10892 /* FIXME: Add more instructions */
10893 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
10894 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
10895 ins->inst_imm = ins->inst_c0;
10896 ins->inst_destbasereg = var->inst_basereg;
10897 ins->inst_offset = var->inst_offset;
10898 spec = INS_INFO (ins->opcode);
10899 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
10900 ins->opcode = store_opcode;
10901 ins->inst_destbasereg = var->inst_basereg;
10902 ins->inst_offset = var->inst_offset;
10906 tmp_reg = ins->dreg;
10907 ins->dreg = ins->sreg2;
10908 ins->sreg2 = tmp_reg;
10911 spec2 [MONO_INST_DEST] = ' ';
10912 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10913 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10914 spec2 [MONO_INST_SRC3] = ' ';
10916 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
10917 // FIXME: The backends expect the base reg to be in inst_basereg
10918 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
10920 ins->inst_basereg = var->inst_basereg;
10921 ins->inst_offset = var->inst_offset;
10922 spec = INS_INFO (ins->opcode);
10924 /* printf ("INS: "); mono_print_ins (ins); */
10925 /* Create a store instruction */
10926 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
10928 /* Insert it after the instruction */
10929 mono_bblock_insert_after_ins (bb, ins, store_ins);
10931 def_ins = store_ins;
10934 * We can't assign ins->dreg to var->dreg here, since the
10935 * sregs could use it. So set a flag, and do it after
10938 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
10939 dest_has_lvreg = TRUE;
10944 if (def_ins && !live_range_start [dreg]) {
10945 live_range_start [dreg] = def_ins;
10946 live_range_start_bb [dreg] = bb;
10953 num_sregs = mono_inst_get_src_registers (ins, sregs);
10954 for (srcindex = 0; srcindex < 3; ++srcindex) {
10955 regtype = spec [MONO_INST_SRC1 + srcindex];
10956 sreg = sregs [srcindex];
10958 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
10959 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
10960 MonoInst *var = get_vreg_to_inst (cfg, sreg);
10961 MonoInst *use_ins = ins;
10962 MonoInst *load_ins;
10963 guint32 load_opcode;
10965 if (var->opcode == OP_REGVAR) {
10966 sregs [srcindex] = var->dreg;
10967 //mono_inst_set_src_registers (ins, sregs);
10968 live_range_end [sreg] = use_ins;
10969 live_range_end_bb [sreg] = bb;
10973 g_assert (var->opcode == OP_REGOFFSET);
10975 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
10977 g_assert (load_opcode != OP_LOADV_MEMBASE);
10979 if (vreg_to_lvreg [sreg]) {
10980 g_assert (vreg_to_lvreg [sreg] != -1);
10982 /* The variable is already loaded to an lvreg */
10983 if (G_UNLIKELY (cfg->verbose_level > 2))
10984 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
10985 sregs [srcindex] = vreg_to_lvreg [sreg];
10986 //mono_inst_set_src_registers (ins, sregs);
10990 /* Try to fuse the load into the instruction */
10991 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
10992 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
10993 sregs [0] = var->inst_basereg;
10994 //mono_inst_set_src_registers (ins, sregs);
10995 ins->inst_offset = var->inst_offset;
10996 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
10997 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
10998 sregs [1] = var->inst_basereg;
10999 //mono_inst_set_src_registers (ins, sregs);
11000 ins->inst_offset = var->inst_offset;
11002 if (MONO_IS_REAL_MOVE (ins)) {
11003 ins->opcode = OP_NOP;
11006 //printf ("%d ", srcindex); mono_print_ins (ins);
11008 sreg = alloc_dreg (cfg, stacktypes [regtype]);
11010 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
11011 if (var->dreg == prev_dreg) {
11013 * sreg refers to the value loaded by the load
11014 * emitted below, but we need to use ins->dreg
11015 * since it refers to the store emitted earlier.
11019 g_assert (sreg != -1);
11020 vreg_to_lvreg [var->dreg] = sreg;
11021 g_assert (lvregs_len < 1024);
11022 lvregs [lvregs_len ++] = var->dreg;
11026 sregs [srcindex] = sreg;
11027 //mono_inst_set_src_registers (ins, sregs);
11029 if (regtype == 'l') {
11030 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
11031 mono_bblock_insert_before_ins (bb, ins, load_ins);
11032 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
11033 mono_bblock_insert_before_ins (bb, ins, load_ins);
11034 use_ins = load_ins;
11037 #if SIZEOF_REGISTER == 4
11038 g_assert (load_opcode != OP_LOADI8_MEMBASE);
11040 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
11041 mono_bblock_insert_before_ins (bb, ins, load_ins);
11042 use_ins = load_ins;
11046 if (var->dreg < orig_next_vreg) {
11047 live_range_end [var->dreg] = use_ins;
11048 live_range_end_bb [var->dreg] = bb;
11052 mono_inst_set_src_registers (ins, sregs);
11054 if (dest_has_lvreg) {
11055 g_assert (ins->dreg != -1);
11056 vreg_to_lvreg [prev_dreg] = ins->dreg;
11057 g_assert (lvregs_len < 1024);
11058 lvregs [lvregs_len ++] = prev_dreg;
11059 dest_has_lvreg = FALSE;
11063 tmp_reg = ins->dreg;
11064 ins->dreg = ins->sreg2;
11065 ins->sreg2 = tmp_reg;
11068 if (MONO_IS_CALL (ins)) {
11069 /* Clear vreg_to_lvreg array */
11070 for (i = 0; i < lvregs_len; i++)
11071 vreg_to_lvreg [lvregs [i]] = 0;
11073 } else if (ins->opcode == OP_NOP) {
11075 MONO_INST_NULLIFY_SREGS (ins);
11078 if (cfg->verbose_level > 2)
11079 mono_print_ins_index (1, ins);
11083 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
11085 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
11086 * by storing the current native offset into MonoMethodVar->live_range_start/end.
11088 for (i = 0; i < cfg->num_varinfo; ++i) {
11089 int vreg = MONO_VARINFO (cfg, i)->vreg;
11092 if (live_range_start [vreg]) {
11093 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
11095 ins->inst_c1 = vreg;
11096 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
11098 if (live_range_end [vreg]) {
11099 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
11101 ins->inst_c1 = vreg;
11102 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
11107 g_free (live_range_start);
11108 g_free (live_range_end);
11109 g_free (live_range_start_bb);
11110 g_free (live_range_end_bb);
11115 * - use 'iadd' instead of 'int_add'
11116 * - handling ovf opcodes: decompose in method_to_ir.
11117 * - unify iregs/fregs
11118 * -> partly done, the missing parts are:
11119 * - a more complete unification would involve unifying the hregs as well, so
11120 * code wouldn't need if (fp) all over the place. but that would mean the hregs
11121 * would no longer map to the machine hregs, so the code generators would need to
11122 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
11123 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
11124 * fp/non-fp branches speeds it up by about 15%.
11125 * - use sext/zext opcodes instead of shifts
11127 * - get rid of TEMPLOADs if possible and use vregs instead
11128 * - clean up usage of OP_P/OP_ opcodes
11129 * - cleanup usage of DUMMY_USE
11130 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
11132 * - set the stack type and allocate a dreg in the EMIT_NEW macros
11133 * - get rid of all the <foo>2 stuff when the new JIT is ready.
11134 * - make sure handle_stack_args () is called before the branch is emitted
11135 * - when the new IR is done, get rid of all unused stuff
11136 * - COMPARE/BEQ as separate instructions or unify them ?
11137 * - keeping them separate allows specialized compare instructions like
11138 * compare_imm, compare_membase
11139 * - most back ends unify fp compare+branch, fp compare+ceq
11140 * - integrate mono_save_args into inline_method
11141 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
11142 * - handle long shift opts on 32 bit platforms somehow: they require
11143 * 3 sregs (2 for arg1 and 1 for arg2)
11144 * - make byref a 'normal' type.
11145 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
11146 * variable if needed.
11147 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
11148 * like inline_method.
11149 * - remove inlining restrictions
11150 * - fix LNEG and enable cfold of INEG
11151 * - generalize x86 optimizations like ldelema as a peephole optimization
11152 * - add store_mem_imm for amd64
11153 * - optimize the loading of the interruption flag in the managed->native wrappers
11154 * - avoid special handling of OP_NOP in passes
11155 * - move code inserting instructions into one function/macro.
11156 * - try a coalescing phase after liveness analysis
11157 * - add float -> vreg conversion + local optimizations on !x86
11158 * - figure out how to handle decomposed branches during optimizations, ie.
11159 * compare+branch, op_jump_table+op_br etc.
11160 * - promote RuntimeXHandles to vregs
11161 * - vtype cleanups:
11162 * - add a NEW_VARLOADA_VREG macro
11163 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11164 * accessing vtype fields.
11165 * - get rid of I8CONST on 64 bit platforms
11166 * - dealing with the increase in code size due to branches created during opcode
11168 * - use extended basic blocks
11169 * - all parts of the JIT
11170 * - handle_global_vregs () && local regalloc
11171 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
11172 * - sources of increase in code size:
11175 * - isinst and castclass
11176 * - lvregs not allocated to global registers even if used multiple times
11177 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
11179 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
11180 * - add all micro optimizations from the old JIT
11181 * - put tree optimizations into the deadce pass
11182 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
11183 * specific function.
11184 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
11185 * fcompare + branchCC.
11186 * - create a helper function for allocating a stack slot, taking into account
11187 * MONO_CFG_HAS_SPILLUP.
11189 * - merge the ia64 switch changes.
11190 * - optimize mono_regstate2_alloc_int/float.
11191 * - fix the pessimistic handling of variables accessed in exception handler blocks.
11192 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
11193 * parts of the tree could be separated by other instructions, killing the tree
11194 * arguments, or stores killing loads etc. Also, should we fold loads into other
11195 * instructions if the result of the load is used multiple times ?
11196 * - make the REM_IMM optimization in mini-x86.c arch-independent.
11197 * - LAST MERGE: 108395.
11198 * - when returning vtypes in registers, generate IR and append it to the end of the
11199 * last bb instead of doing it in the epilog.
11200 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
11208 - When to decompose opcodes:
11209 - earlier: this makes some optimizations hard to implement, since the low level IR
11210 no longer contains the neccessary information. But it is easier to do.
11211 - later: harder to implement, enables more optimizations.
11212 - Branches inside bblocks:
11213 - created when decomposing complex opcodes.
11214 - branches to another bblock: harmless, but not tracked by the branch
11215 optimizations, so need to branch to a label at the start of the bblock.
11216 - branches to inside the same bblock: very problematic, trips up the local
11217 reg allocator. Can be fixed by spitting the current bblock, but that is a
11218 complex operation, since some local vregs can become global vregs etc.
11219 - Local/global vregs:
11220 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11221 local register allocator.
11222 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11223 structure, created by mono_create_var (). Assigned to hregs or the stack by
11224 the global register allocator.
11225 - When to do optimizations like alu->alu_imm:
11226 - earlier -> saves work later on since the IR will be smaller/simpler
11227 - later -> can work on more instructions
11228 - Handling of valuetypes:
11229 - When a vtype is pushed on the stack, a new temporary is created, an
11230 instruction computing its address (LDADDR) is emitted and pushed on
11231 the stack. Need to optimize cases when the vtype is used immediately as in
11232 argument passing, stloc etc.
11233 - Instead of the to_end stuff in the old JIT, simply call the function handling
11234 the values on the stack before emitting the last instruction of the bb.
11237 #endif /* DISABLE_JIT */