2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
30 #include <mono/utils/memcheck.h>
32 #include <mono/metadata/assembly.h>
33 #include <mono/metadata/loader.h>
34 #include <mono/metadata/tabledefs.h>
35 #include <mono/metadata/class.h>
36 #include <mono/metadata/object.h>
37 #include <mono/metadata/exception.h>
38 #include <mono/metadata/opcodes.h>
39 #include <mono/metadata/mono-endian.h>
40 #include <mono/metadata/tokentype.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/marshal.h>
43 #include <mono/metadata/debug-helpers.h>
44 #include <mono/metadata/mono-debug.h>
45 #include <mono/metadata/gc-internal.h>
46 #include <mono/metadata/security-manager.h>
47 #include <mono/metadata/threads-types.h>
48 #include <mono/metadata/security-core-clr.h>
49 #include <mono/metadata/monitor.h>
50 #include <mono/metadata/profiler-private.h>
51 #include <mono/metadata/profiler.h>
52 #include <mono/utils/mono-compiler.h>
53 #include <mono/metadata/mono-basic-block.h>
60 #include "jit-icalls.h"
62 #include "debugger-agent.h"
64 #define BRANCH_COST 100
65 #define INLINE_LENGTH_LIMIT 20
66 #define INLINE_FAILURE do {\
67 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
70 #define CHECK_CFG_EXCEPTION do {\
71 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
74 #define METHOD_ACCESS_FAILURE do { \
75 char *method_fname = mono_method_full_name (method, TRUE); \
76 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
77 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
78 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
79 g_free (method_fname); \
80 g_free (cil_method_fname); \
81 goto exception_exit; \
83 #define FIELD_ACCESS_FAILURE do { \
84 char *method_fname = mono_method_full_name (method, TRUE); \
85 char *field_fname = mono_field_full_name (field); \
86 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
87 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
88 g_free (method_fname); \
89 g_free (field_fname); \
90 goto exception_exit; \
92 #define GENERIC_SHARING_FAILURE(opcode) do { \
93 if (cfg->generic_sharing_context) { \
94 if (cfg->verbose_level > 2) \
95 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
96 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
97 goto exception_exit; \
101 /* Determine whenever 'ins' represents a load of the 'this' argument */
102 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
104 static int ldind_to_load_membase (int opcode);
105 static int stind_to_store_membase (int opcode);
107 int mono_op_to_op_imm (int opcode);
108 int mono_op_to_op_imm_noemul (int opcode);
110 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
111 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
112 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
114 /* helper methods signature */
115 extern MonoMethodSignature *helper_sig_class_init_trampoline;
116 extern MonoMethodSignature *helper_sig_domain_get;
117 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
118 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm;
119 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
120 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
121 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
124 * Instruction metadata
132 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
133 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
139 #if SIZEOF_REGISTER == 8
144 /* keep in sync with the enum in mini.h */
147 #include "mini-ops.h"
152 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
153 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
155 * This should contain the index of the last sreg + 1. This is not the same
156 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
158 const gint8 ins_sreg_counts[] = {
159 #include "mini-ops.h"
164 #define MONO_INIT_VARINFO(vi,id) do { \
165 (vi)->range.first_use.pos.bid = 0xffff; \
171 mono_inst_set_src_registers (MonoInst *ins, int *regs)
173 ins->sreg1 = regs [0];
174 ins->sreg2 = regs [1];
175 ins->sreg3 = regs [2];
179 mono_alloc_ireg (MonoCompile *cfg)
181 return alloc_ireg (cfg);
185 mono_alloc_freg (MonoCompile *cfg)
187 return alloc_freg (cfg);
191 mono_alloc_preg (MonoCompile *cfg)
193 return alloc_preg (cfg);
197 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
199 return alloc_dreg (cfg, stack_type);
203 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
209 switch (type->type) {
212 case MONO_TYPE_BOOLEAN:
224 case MONO_TYPE_FNPTR:
226 case MONO_TYPE_CLASS:
227 case MONO_TYPE_STRING:
228 case MONO_TYPE_OBJECT:
229 case MONO_TYPE_SZARRAY:
230 case MONO_TYPE_ARRAY:
234 #if SIZEOF_REGISTER == 8
243 case MONO_TYPE_VALUETYPE:
244 if (type->data.klass->enumtype) {
245 type = mono_class_enum_basetype (type->data.klass);
248 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
251 case MONO_TYPE_TYPEDBYREF:
253 case MONO_TYPE_GENERICINST:
254 type = &type->data.generic_class->container_class->byval_arg;
258 g_assert (cfg->generic_sharing_context);
261 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
267 mono_print_bb (MonoBasicBlock *bb, const char *msg)
272 printf ("\n%s %d: [IN: ", msg, bb->block_num);
273 for (i = 0; i < bb->in_count; ++i)
274 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
276 for (i = 0; i < bb->out_count; ++i)
277 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
279 for (tree = bb->code; tree; tree = tree->next)
280 mono_print_ins_index (-1, tree);
284 * Can't put this at the beginning, since other files reference stuff from this
289 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
291 #define GET_BBLOCK(cfg,tblock,ip) do { \
292 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
294 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
295 NEW_BBLOCK (cfg, (tblock)); \
296 (tblock)->cil_code = (ip); \
297 ADD_BBLOCK (cfg, (tblock)); \
301 #if defined(TARGET_X86) || defined(TARGET_AMD64)
302 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
303 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
304 (dest)->dreg = alloc_preg ((cfg)); \
305 (dest)->sreg1 = (sr1); \
306 (dest)->sreg2 = (sr2); \
307 (dest)->inst_imm = (imm); \
308 (dest)->backend.shift_amount = (shift); \
309 MONO_ADD_INS ((cfg)->cbb, (dest)); \
313 #if SIZEOF_REGISTER == 8
314 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
315 /* FIXME: Need to add many more cases */ \
316 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
318 int dr = alloc_preg (cfg); \
319 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
320 (ins)->sreg2 = widen->dreg; \
324 #define ADD_WIDEN_OP(ins, arg1, arg2)
327 #define ADD_BINOP(op) do { \
328 MONO_INST_NEW (cfg, ins, (op)); \
330 ins->sreg1 = sp [0]->dreg; \
331 ins->sreg2 = sp [1]->dreg; \
332 type_from_op (ins, sp [0], sp [1]); \
334 /* Have to insert a widening op */ \
335 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
336 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
337 MONO_ADD_INS ((cfg)->cbb, (ins)); \
338 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
341 #define ADD_UNOP(op) do { \
342 MONO_INST_NEW (cfg, ins, (op)); \
344 ins->sreg1 = sp [0]->dreg; \
345 type_from_op (ins, sp [0], NULL); \
347 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
348 MONO_ADD_INS ((cfg)->cbb, (ins)); \
349 *sp++ = mono_decompose_opcode (cfg, ins); \
352 #define ADD_BINCOND(next_block) do { \
355 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
356 cmp->sreg1 = sp [0]->dreg; \
357 cmp->sreg2 = sp [1]->dreg; \
358 type_from_op (cmp, sp [0], sp [1]); \
360 type_from_op (ins, sp [0], sp [1]); \
361 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
362 GET_BBLOCK (cfg, tblock, target); \
363 link_bblock (cfg, bblock, tblock); \
364 ins->inst_true_bb = tblock; \
365 if ((next_block)) { \
366 link_bblock (cfg, bblock, (next_block)); \
367 ins->inst_false_bb = (next_block); \
368 start_new_bblock = 1; \
370 GET_BBLOCK (cfg, tblock, ip); \
371 link_bblock (cfg, bblock, tblock); \
372 ins->inst_false_bb = tblock; \
373 start_new_bblock = 2; \
375 if (sp != stack_start) { \
376 handle_stack_args (cfg, stack_start, sp - stack_start); \
377 CHECK_UNVERIFIABLE (cfg); \
379 MONO_ADD_INS (bblock, cmp); \
380 MONO_ADD_INS (bblock, ins); \
384 * link_bblock: Links two basic blocks
386 * links two basic blocks in the control flow graph, the 'from'
387 * argument is the starting block and the 'to' argument is the block
388 * the control flow ends to after 'from'.
391 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
393 MonoBasicBlock **newa;
397 if (from->cil_code) {
399 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
401 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
404 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
406 printf ("edge from entry to exit\n");
411 for (i = 0; i < from->out_count; ++i) {
412 if (to == from->out_bb [i]) {
418 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
419 for (i = 0; i < from->out_count; ++i) {
420 newa [i] = from->out_bb [i];
428 for (i = 0; i < to->in_count; ++i) {
429 if (from == to->in_bb [i]) {
435 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
436 for (i = 0; i < to->in_count; ++i) {
437 newa [i] = to->in_bb [i];
446 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
448 link_bblock (cfg, from, to);
452 * mono_find_block_region:
454 * We mark each basic block with a region ID. We use that to avoid BB
455 * optimizations when blocks are in different regions.
458 * A region token that encodes where this region is, and information
459 * about the clause owner for this block.
461 * The region encodes the try/catch/filter clause that owns this block
462 * as well as the type. -1 is a special value that represents a block
463 * that is in none of try/catch/filter.
466 mono_find_block_region (MonoCompile *cfg, int offset)
468 MonoMethodHeader *header = cfg->header;
469 MonoExceptionClause *clause;
472 for (i = 0; i < header->num_clauses; ++i) {
473 clause = &header->clauses [i];
474 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
475 (offset < (clause->handler_offset)))
476 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
478 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
479 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
480 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
481 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
482 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
484 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
487 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
488 return ((i + 1) << 8) | clause->flags;
495 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
497 MonoMethodHeader *header = cfg->header;
498 MonoExceptionClause *clause;
502 for (i = 0; i < header->num_clauses; ++i) {
503 clause = &header->clauses [i];
504 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
505 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
506 if (clause->flags == type)
507 res = g_list_append (res, clause);
514 mono_create_spvar_for_region (MonoCompile *cfg, int region)
518 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
522 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
523 /* prevent it from being register allocated */
524 var->flags |= MONO_INST_INDIRECT;
526 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
530 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
532 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
536 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
540 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
544 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
545 /* prevent it from being register allocated */
546 var->flags |= MONO_INST_INDIRECT;
548 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
554 * Returns the type used in the eval stack when @type is loaded.
555 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
558 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
562 inst->klass = klass = mono_class_from_mono_type (type);
564 inst->type = STACK_MP;
569 switch (type->type) {
571 inst->type = STACK_INV;
575 case MONO_TYPE_BOOLEAN:
581 inst->type = STACK_I4;
586 case MONO_TYPE_FNPTR:
587 inst->type = STACK_PTR;
589 case MONO_TYPE_CLASS:
590 case MONO_TYPE_STRING:
591 case MONO_TYPE_OBJECT:
592 case MONO_TYPE_SZARRAY:
593 case MONO_TYPE_ARRAY:
594 inst->type = STACK_OBJ;
598 inst->type = STACK_I8;
602 inst->type = STACK_R8;
604 case MONO_TYPE_VALUETYPE:
605 if (type->data.klass->enumtype) {
606 type = mono_class_enum_basetype (type->data.klass);
610 inst->type = STACK_VTYPE;
613 case MONO_TYPE_TYPEDBYREF:
614 inst->klass = mono_defaults.typed_reference_class;
615 inst->type = STACK_VTYPE;
617 case MONO_TYPE_GENERICINST:
618 type = &type->data.generic_class->container_class->byval_arg;
621 case MONO_TYPE_MVAR :
622 /* FIXME: all the arguments must be references for now,
623 * later look inside cfg and see if the arg num is
626 g_assert (cfg->generic_sharing_context);
627 inst->type = STACK_OBJ;
630 g_error ("unknown type 0x%02x in eval stack type", type->type);
635 * The following tables are used to quickly validate the IL code in type_from_op ().
638 bin_num_table [STACK_MAX] [STACK_MAX] = {
639 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
640 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
641 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
642 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
643 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
644 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
645 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
646 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
651 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
654 /* reduce the size of this table */
656 bin_int_table [STACK_MAX] [STACK_MAX] = {
657 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
658 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
659 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
660 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
661 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
662 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
663 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
664 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
668 bin_comp_table [STACK_MAX] [STACK_MAX] = {
669 /* Inv i L p F & O vt */
671 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
672 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
673 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
674 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
675 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
676 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
677 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
680 /* reduce the size of this table */
682 shift_table [STACK_MAX] [STACK_MAX] = {
683 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
684 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
685 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
686 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
687 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
688 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
689 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
690 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
694 * Tables to map from the non-specific opcode to the matching
695 * type-specific opcode.
697 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
699 binops_op_map [STACK_MAX] = {
700 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
703 /* handles from CEE_NEG to CEE_CONV_U8 */
705 unops_op_map [STACK_MAX] = {
706 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
709 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
711 ovfops_op_map [STACK_MAX] = {
712 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
715 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
717 ovf2ops_op_map [STACK_MAX] = {
718 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
721 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
723 ovf3ops_op_map [STACK_MAX] = {
724 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
727 /* handles from CEE_BEQ to CEE_BLT_UN */
729 beqops_op_map [STACK_MAX] = {
730 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
733 /* handles from CEE_CEQ to CEE_CLT_UN */
735 ceqops_op_map [STACK_MAX] = {
736 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
740 * Sets ins->type (the type on the eval stack) according to the
741 * type of the opcode and the arguments to it.
742 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
744 * FIXME: this function sets ins->type unconditionally in some cases, but
745 * it should set it to invalid for some types (a conv.x on an object)
748 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
750 switch (ins->opcode) {
757 /* FIXME: check unverifiable args for STACK_MP */
758 ins->type = bin_num_table [src1->type] [src2->type];
759 ins->opcode += binops_op_map [ins->type];
766 ins->type = bin_int_table [src1->type] [src2->type];
767 ins->opcode += binops_op_map [ins->type];
772 ins->type = shift_table [src1->type] [src2->type];
773 ins->opcode += binops_op_map [ins->type];
778 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
779 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
780 ins->opcode = OP_LCOMPARE;
781 else if (src1->type == STACK_R8)
782 ins->opcode = OP_FCOMPARE;
784 ins->opcode = OP_ICOMPARE;
786 case OP_ICOMPARE_IMM:
787 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
788 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
789 ins->opcode = OP_LCOMPARE_IMM;
801 ins->opcode += beqops_op_map [src1->type];
804 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
805 ins->opcode += ceqops_op_map [src1->type];
811 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
812 ins->opcode += ceqops_op_map [src1->type];
816 ins->type = neg_table [src1->type];
817 ins->opcode += unops_op_map [ins->type];
820 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
821 ins->type = src1->type;
823 ins->type = STACK_INV;
824 ins->opcode += unops_op_map [ins->type];
830 ins->type = STACK_I4;
831 ins->opcode += unops_op_map [src1->type];
834 ins->type = STACK_R8;
835 switch (src1->type) {
838 ins->opcode = OP_ICONV_TO_R_UN;
841 ins->opcode = OP_LCONV_TO_R_UN;
845 case CEE_CONV_OVF_I1:
846 case CEE_CONV_OVF_U1:
847 case CEE_CONV_OVF_I2:
848 case CEE_CONV_OVF_U2:
849 case CEE_CONV_OVF_I4:
850 case CEE_CONV_OVF_U4:
851 ins->type = STACK_I4;
852 ins->opcode += ovf3ops_op_map [src1->type];
854 case CEE_CONV_OVF_I_UN:
855 case CEE_CONV_OVF_U_UN:
856 ins->type = STACK_PTR;
857 ins->opcode += ovf2ops_op_map [src1->type];
859 case CEE_CONV_OVF_I1_UN:
860 case CEE_CONV_OVF_I2_UN:
861 case CEE_CONV_OVF_I4_UN:
862 case CEE_CONV_OVF_U1_UN:
863 case CEE_CONV_OVF_U2_UN:
864 case CEE_CONV_OVF_U4_UN:
865 ins->type = STACK_I4;
866 ins->opcode += ovf2ops_op_map [src1->type];
869 ins->type = STACK_PTR;
870 switch (src1->type) {
872 ins->opcode = OP_ICONV_TO_U;
876 #if SIZEOF_REGISTER == 8
877 ins->opcode = OP_LCONV_TO_U;
879 ins->opcode = OP_MOVE;
883 ins->opcode = OP_LCONV_TO_U;
886 ins->opcode = OP_FCONV_TO_U;
892 ins->type = STACK_I8;
893 ins->opcode += unops_op_map [src1->type];
895 case CEE_CONV_OVF_I8:
896 case CEE_CONV_OVF_U8:
897 ins->type = STACK_I8;
898 ins->opcode += ovf3ops_op_map [src1->type];
900 case CEE_CONV_OVF_U8_UN:
901 case CEE_CONV_OVF_I8_UN:
902 ins->type = STACK_I8;
903 ins->opcode += ovf2ops_op_map [src1->type];
907 ins->type = STACK_R8;
908 ins->opcode += unops_op_map [src1->type];
911 ins->type = STACK_R8;
915 ins->type = STACK_I4;
916 ins->opcode += ovfops_op_map [src1->type];
921 ins->type = STACK_PTR;
922 ins->opcode += ovfops_op_map [src1->type];
930 ins->type = bin_num_table [src1->type] [src2->type];
931 ins->opcode += ovfops_op_map [src1->type];
932 if (ins->type == STACK_R8)
933 ins->type = STACK_INV;
935 case OP_LOAD_MEMBASE:
936 ins->type = STACK_PTR;
938 case OP_LOADI1_MEMBASE:
939 case OP_LOADU1_MEMBASE:
940 case OP_LOADI2_MEMBASE:
941 case OP_LOADU2_MEMBASE:
942 case OP_LOADI4_MEMBASE:
943 case OP_LOADU4_MEMBASE:
944 ins->type = STACK_PTR;
946 case OP_LOADI8_MEMBASE:
947 ins->type = STACK_I8;
949 case OP_LOADR4_MEMBASE:
950 case OP_LOADR8_MEMBASE:
951 ins->type = STACK_R8;
954 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
958 if (ins->type == STACK_MP)
959 ins->klass = mono_defaults.object_class;
964 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
970 param_table [STACK_MAX] [STACK_MAX] = {
975 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
979 switch (args->type) {
989 for (i = 0; i < sig->param_count; ++i) {
990 switch (args [i].type) {
994 if (!sig->params [i]->byref)
998 if (sig->params [i]->byref)
1000 switch (sig->params [i]->type) {
1001 case MONO_TYPE_CLASS:
1002 case MONO_TYPE_STRING:
1003 case MONO_TYPE_OBJECT:
1004 case MONO_TYPE_SZARRAY:
1005 case MONO_TYPE_ARRAY:
1012 if (sig->params [i]->byref)
1014 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1023 /*if (!param_table [args [i].type] [sig->params [i]->type])
1031 * When we need a pointer to the current domain many times in a method, we
1032 * call mono_domain_get() once and we store the result in a local variable.
1033 * This function returns the variable that represents the MonoDomain*.
1035 inline static MonoInst *
1036 mono_get_domainvar (MonoCompile *cfg)
1038 if (!cfg->domainvar)
1039 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1040 return cfg->domainvar;
1044 * The got_var contains the address of the Global Offset Table when AOT
1048 mono_get_got_var (MonoCompile *cfg)
1050 #ifdef MONO_ARCH_NEED_GOT_VAR
1051 if (!cfg->compile_aot)
1053 if (!cfg->got_var) {
1054 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1056 return cfg->got_var;
1063 mono_get_vtable_var (MonoCompile *cfg)
1065 g_assert (cfg->generic_sharing_context);
1067 if (!cfg->rgctx_var) {
1068 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1069 /* force the var to be stack allocated */
1070 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1073 return cfg->rgctx_var;
1077 type_from_stack_type (MonoInst *ins) {
1078 switch (ins->type) {
1079 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1080 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1081 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1082 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1084 return &ins->klass->this_arg;
1085 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1086 case STACK_VTYPE: return &ins->klass->byval_arg;
1088 g_error ("stack type %d to monotype not handled\n", ins->type);
1093 static G_GNUC_UNUSED int
1094 type_to_stack_type (MonoType *t)
1096 t = mono_type_get_underlying_type (t);
1100 case MONO_TYPE_BOOLEAN:
1103 case MONO_TYPE_CHAR:
1110 case MONO_TYPE_FNPTR:
1112 case MONO_TYPE_CLASS:
1113 case MONO_TYPE_STRING:
1114 case MONO_TYPE_OBJECT:
1115 case MONO_TYPE_SZARRAY:
1116 case MONO_TYPE_ARRAY:
1124 case MONO_TYPE_VALUETYPE:
1125 case MONO_TYPE_TYPEDBYREF:
1127 case MONO_TYPE_GENERICINST:
1128 if (mono_type_generic_inst_is_valuetype (t))
1134 g_assert_not_reached ();
1141 array_access_to_klass (int opcode)
1145 return mono_defaults.byte_class;
1147 return mono_defaults.uint16_class;
1150 return mono_defaults.int_class;
1153 return mono_defaults.sbyte_class;
1156 return mono_defaults.int16_class;
1159 return mono_defaults.int32_class;
1161 return mono_defaults.uint32_class;
1164 return mono_defaults.int64_class;
1167 return mono_defaults.single_class;
1170 return mono_defaults.double_class;
1171 case CEE_LDELEM_REF:
1172 case CEE_STELEM_REF:
1173 return mono_defaults.object_class;
1175 g_assert_not_reached ();
1181 * We try to share variables when possible
1184 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1189 /* inlining can result in deeper stacks */
1190 if (slot >= cfg->header->max_stack)
1191 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1193 pos = ins->type - 1 + slot * STACK_MAX;
1195 switch (ins->type) {
1202 if ((vnum = cfg->intvars [pos]))
1203 return cfg->varinfo [vnum];
1204 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1205 cfg->intvars [pos] = res->inst_c0;
1208 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1214 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1217 * Don't use this if a generic_context is set, since that means AOT can't
1218 * look up the method using just the image+token.
1219 * table == 0 means this is a reference made from a wrapper.
1221 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1222 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1223 jump_info_token->image = image;
1224 jump_info_token->token = token;
1225 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1230 * This function is called to handle items that are left on the evaluation stack
1231 * at basic block boundaries. What happens is that we save the values to local variables
1232 * and we reload them later when first entering the target basic block (with the
1233 * handle_loaded_temps () function).
1234 * A single joint point will use the same variables (stored in the array bb->out_stack or
1235 * bb->in_stack, if the basic block is before or after the joint point).
1237 * This function needs to be called _before_ emitting the last instruction of
1238 * the bb (i.e. before emitting a branch).
1239 * If the stack merge fails at a join point, cfg->unverifiable is set.
1242 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1245 MonoBasicBlock *bb = cfg->cbb;
1246 MonoBasicBlock *outb;
1247 MonoInst *inst, **locals;
1252 if (cfg->verbose_level > 3)
1253 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1254 if (!bb->out_scount) {
1255 bb->out_scount = count;
1256 //printf ("bblock %d has out:", bb->block_num);
1258 for (i = 0; i < bb->out_count; ++i) {
1259 outb = bb->out_bb [i];
1260 /* exception handlers are linked, but they should not be considered for stack args */
1261 if (outb->flags & BB_EXCEPTION_HANDLER)
1263 //printf (" %d", outb->block_num);
1264 if (outb->in_stack) {
1266 bb->out_stack = outb->in_stack;
1272 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1273 for (i = 0; i < count; ++i) {
1275 * try to reuse temps already allocated for this purpouse, if they occupy the same
1276 * stack slot and if they are of the same type.
1277 * This won't cause conflicts since if 'local' is used to
1278 * store one of the values in the in_stack of a bblock, then
1279 * the same variable will be used for the same outgoing stack
1281 * This doesn't work when inlining methods, since the bblocks
1282 * in the inlined methods do not inherit their in_stack from
1283 * the bblock they are inlined to. See bug #58863 for an
1286 if (cfg->inlined_method)
1287 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1289 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1294 for (i = 0; i < bb->out_count; ++i) {
1295 outb = bb->out_bb [i];
1296 /* exception handlers are linked, but they should not be considered for stack args */
1297 if (outb->flags & BB_EXCEPTION_HANDLER)
1299 if (outb->in_scount) {
1300 if (outb->in_scount != bb->out_scount) {
1301 cfg->unverifiable = TRUE;
1304 continue; /* check they are the same locals */
1306 outb->in_scount = count;
1307 outb->in_stack = bb->out_stack;
1310 locals = bb->out_stack;
1312 for (i = 0; i < count; ++i) {
1313 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1314 inst->cil_code = sp [i]->cil_code;
1315 sp [i] = locals [i];
1316 if (cfg->verbose_level > 3)
1317 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1321 * It is possible that the out bblocks already have in_stack assigned, and
1322 * the in_stacks differ. In this case, we will store to all the different
1329 /* Find a bblock which has a different in_stack */
1331 while (bindex < bb->out_count) {
1332 outb = bb->out_bb [bindex];
1333 /* exception handlers are linked, but they should not be considered for stack args */
1334 if (outb->flags & BB_EXCEPTION_HANDLER) {
1338 if (outb->in_stack != locals) {
1339 for (i = 0; i < count; ++i) {
1340 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1341 inst->cil_code = sp [i]->cil_code;
1342 sp [i] = locals [i];
1343 if (cfg->verbose_level > 3)
1344 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1346 locals = outb->in_stack;
1355 /* Emit code which loads interface_offsets [klass->interface_id]
1356 * The array is stored in memory before vtable.
1359 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1361 if (cfg->compile_aot) {
1362 int ioffset_reg = alloc_preg (cfg);
1363 int iid_reg = alloc_preg (cfg);
1365 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1366 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1367 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1370 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1375 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1377 int ibitmap_reg = alloc_preg (cfg);
1378 #ifdef COMPRESSED_INTERFACE_BITMAP
1380 MonoInst *res, *ins;
1381 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1382 MONO_ADD_INS (cfg->cbb, ins);
1384 if (cfg->compile_aot)
1385 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1387 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1388 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1389 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1391 int ibitmap_byte_reg = alloc_preg (cfg);
1393 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1395 if (cfg->compile_aot) {
1396 int iid_reg = alloc_preg (cfg);
1397 int shifted_iid_reg = alloc_preg (cfg);
1398 int ibitmap_byte_address_reg = alloc_preg (cfg);
1399 int masked_iid_reg = alloc_preg (cfg);
1400 int iid_one_bit_reg = alloc_preg (cfg);
1401 int iid_bit_reg = alloc_preg (cfg);
1402 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1403 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1404 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1405 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1406 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1407 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1408 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1409 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1411 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1412 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1418 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1419 * stored in "klass_reg" implements the interface "klass".
1422 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1424 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1428 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1429 * stored in "vtable_reg" implements the interface "klass".
1432 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1434 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1438 * Emit code which checks whenever the interface id of @klass is smaller than
1439 * than the value given by max_iid_reg.
1442 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1443 MonoBasicBlock *false_target)
1445 if (cfg->compile_aot) {
1446 int iid_reg = alloc_preg (cfg);
1447 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1448 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1451 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1453 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1455 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1458 /* Same as above, but obtains max_iid from a vtable */
1460 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1461 MonoBasicBlock *false_target)
1463 int max_iid_reg = alloc_preg (cfg);
1465 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1466 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1469 /* Same as above, but obtains max_iid from a klass */
1471 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1472 MonoBasicBlock *false_target)
1474 int max_iid_reg = alloc_preg (cfg);
1476 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1477 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1481 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1483 int idepth_reg = alloc_preg (cfg);
1484 int stypes_reg = alloc_preg (cfg);
1485 int stype = alloc_preg (cfg);
1487 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1488 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1489 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1490 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1492 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1493 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1495 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1496 } else if (cfg->compile_aot) {
1497 int const_reg = alloc_preg (cfg);
1498 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1499 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1501 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1503 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1507 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1509 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1513 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1515 int intf_reg = alloc_preg (cfg);
1517 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1518 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1519 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1521 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1523 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1527 * Variant of the above that takes a register to the class, not the vtable.
1530 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1532 int intf_bit_reg = alloc_preg (cfg);
1534 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1535 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1536 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1538 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1540 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1544 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1547 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1548 } else if (cfg->compile_aot) {
1549 int const_reg = alloc_preg (cfg);
1550 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1551 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1553 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1555 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1559 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1561 return mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1565 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1567 if (cfg->compile_aot) {
1568 int const_reg = alloc_preg (cfg);
1569 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1570 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1572 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1574 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1578 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1581 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1584 int rank_reg = alloc_preg (cfg);
1585 int eclass_reg = alloc_preg (cfg);
1587 g_assert (!klass_inst);
1588 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1589 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1590 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1591 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1592 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1593 if (klass->cast_class == mono_defaults.object_class) {
1594 int parent_reg = alloc_preg (cfg);
1595 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1596 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1597 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1598 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1599 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1600 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1601 } else if (klass->cast_class == mono_defaults.enum_class) {
1602 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1603 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1604 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1606 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1607 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1610 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1611 /* Check that the object is a vector too */
1612 int bounds_reg = alloc_preg (cfg);
1613 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1614 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1615 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1618 int idepth_reg = alloc_preg (cfg);
1619 int stypes_reg = alloc_preg (cfg);
1620 int stype = alloc_preg (cfg);
1622 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1623 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1624 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1625 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1627 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1628 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1629 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1634 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1636 return mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1640 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1644 g_assert (val == 0);
1649 if ((size <= 4) && (size <= align)) {
1652 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1655 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1658 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1660 #if SIZEOF_REGISTER == 8
1662 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1668 val_reg = alloc_preg (cfg);
1670 if (SIZEOF_REGISTER == 8)
1671 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1673 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1676 /* This could be optimized further if neccesary */
1678 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1685 #if !NO_UNALIGNED_ACCESS
1686 if (SIZEOF_REGISTER == 8) {
1688 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1693 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1701 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1706 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1711 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1718 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1725 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1726 g_assert (size < 10000);
1729 /* This could be optimized further if neccesary */
1731 cur_reg = alloc_preg (cfg);
1732 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1733 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1740 #if !NO_UNALIGNED_ACCESS
1741 if (SIZEOF_REGISTER == 8) {
1743 cur_reg = alloc_preg (cfg);
1744 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1745 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1754 cur_reg = alloc_preg (cfg);
1755 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1756 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1762 cur_reg = alloc_preg (cfg);
1763 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1764 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1770 cur_reg = alloc_preg (cfg);
1771 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1772 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1780 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1783 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1786 type = mini_get_basic_type_from_generic (gsctx, type);
1787 switch (type->type) {
1788 case MONO_TYPE_VOID:
1789 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1792 case MONO_TYPE_BOOLEAN:
1795 case MONO_TYPE_CHAR:
1798 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1802 case MONO_TYPE_FNPTR:
1803 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1804 case MONO_TYPE_CLASS:
1805 case MONO_TYPE_STRING:
1806 case MONO_TYPE_OBJECT:
1807 case MONO_TYPE_SZARRAY:
1808 case MONO_TYPE_ARRAY:
1809 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1812 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1815 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1816 case MONO_TYPE_VALUETYPE:
1817 if (type->data.klass->enumtype) {
1818 type = mono_class_enum_basetype (type->data.klass);
1821 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1822 case MONO_TYPE_TYPEDBYREF:
1823 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1824 case MONO_TYPE_GENERICINST:
1825 type = &type->data.generic_class->container_class->byval_arg;
1828 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1834 * target_type_is_incompatible:
1835 * @cfg: MonoCompile context
1837 * Check that the item @arg on the evaluation stack can be stored
1838 * in the target type (can be a local, or field, etc).
1839 * The cfg arg can be used to check if we need verification or just
1842 * Returns: non-0 value if arg can't be stored on a target.
1845 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1847 MonoType *simple_type;
1850 if (target->byref) {
1851 /* FIXME: check that the pointed to types match */
1852 if (arg->type == STACK_MP)
1853 return arg->klass != mono_class_from_mono_type (target);
1854 if (arg->type == STACK_PTR)
1859 simple_type = mono_type_get_underlying_type (target);
1860 switch (simple_type->type) {
1861 case MONO_TYPE_VOID:
1865 case MONO_TYPE_BOOLEAN:
1868 case MONO_TYPE_CHAR:
1871 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1875 /* STACK_MP is needed when setting pinned locals */
1876 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1881 case MONO_TYPE_FNPTR:
1882 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1885 case MONO_TYPE_CLASS:
1886 case MONO_TYPE_STRING:
1887 case MONO_TYPE_OBJECT:
1888 case MONO_TYPE_SZARRAY:
1889 case MONO_TYPE_ARRAY:
1890 if (arg->type != STACK_OBJ)
1892 /* FIXME: check type compatibility */
1896 if (arg->type != STACK_I8)
1901 if (arg->type != STACK_R8)
1904 case MONO_TYPE_VALUETYPE:
1905 if (arg->type != STACK_VTYPE)
1907 klass = mono_class_from_mono_type (simple_type);
1908 if (klass != arg->klass)
1911 case MONO_TYPE_TYPEDBYREF:
1912 if (arg->type != STACK_VTYPE)
1914 klass = mono_class_from_mono_type (simple_type);
1915 if (klass != arg->klass)
1918 case MONO_TYPE_GENERICINST:
1919 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1920 if (arg->type != STACK_VTYPE)
1922 klass = mono_class_from_mono_type (simple_type);
1923 if (klass != arg->klass)
1927 if (arg->type != STACK_OBJ)
1929 /* FIXME: check type compatibility */
1933 case MONO_TYPE_MVAR:
1934 /* FIXME: all the arguments must be references for now,
1935 * later look inside cfg and see if the arg num is
1936 * really a reference
1938 g_assert (cfg->generic_sharing_context);
1939 if (arg->type != STACK_OBJ)
1943 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1949 * Prepare arguments for passing to a function call.
1950 * Return a non-zero value if the arguments can't be passed to the given
1952 * The type checks are not yet complete and some conversions may need
1953 * casts on 32 or 64 bit architectures.
1955 * FIXME: implement this using target_type_is_incompatible ()
1958 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1960 MonoType *simple_type;
1964 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1968 for (i = 0; i < sig->param_count; ++i) {
1969 if (sig->params [i]->byref) {
1970 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1974 simple_type = sig->params [i];
1975 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1977 switch (simple_type->type) {
1978 case MONO_TYPE_VOID:
1983 case MONO_TYPE_BOOLEAN:
1986 case MONO_TYPE_CHAR:
1989 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
1995 case MONO_TYPE_FNPTR:
1996 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
1999 case MONO_TYPE_CLASS:
2000 case MONO_TYPE_STRING:
2001 case MONO_TYPE_OBJECT:
2002 case MONO_TYPE_SZARRAY:
2003 case MONO_TYPE_ARRAY:
2004 if (args [i]->type != STACK_OBJ)
2009 if (args [i]->type != STACK_I8)
2014 if (args [i]->type != STACK_R8)
2017 case MONO_TYPE_VALUETYPE:
2018 if (simple_type->data.klass->enumtype) {
2019 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2022 if (args [i]->type != STACK_VTYPE)
2025 case MONO_TYPE_TYPEDBYREF:
2026 if (args [i]->type != STACK_VTYPE)
2029 case MONO_TYPE_GENERICINST:
2030 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2034 g_error ("unknown type 0x%02x in check_call_signature",
2042 callvirt_to_call (int opcode)
2047 case OP_VOIDCALLVIRT:
2056 g_assert_not_reached ();
2063 callvirt_to_call_membase (int opcode)
2067 return OP_CALL_MEMBASE;
2068 case OP_VOIDCALLVIRT:
2069 return OP_VOIDCALL_MEMBASE;
2071 return OP_FCALL_MEMBASE;
2073 return OP_LCALL_MEMBASE;
2075 return OP_VCALL_MEMBASE;
2077 g_assert_not_reached ();
2083 #ifdef MONO_ARCH_HAVE_IMT
2085 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2087 #ifdef MONO_ARCH_IMT_REG
2088 int method_reg = alloc_preg (cfg);
2091 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2092 } else if (cfg->compile_aot) {
2093 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2096 MONO_INST_NEW (cfg, ins, OP_PCONST);
2097 ins->inst_p0 = call->method;
2098 ins->dreg = method_reg;
2099 MONO_ADD_INS (cfg->cbb, ins);
2103 if (COMPILE_LLVM (cfg))
2104 call->imt_arg_reg = method_reg;
2106 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2108 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2113 static MonoJumpInfo *
2114 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2116 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2120 ji->data.target = target;
2125 inline static MonoCallInst *
2126 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2127 MonoInst **args, int calli, int virtual, int tail)
2130 #ifdef MONO_ARCH_SOFT_FLOAT
2135 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2137 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2140 call->signature = sig;
2142 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2145 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2146 call->vret_var = cfg->vret_addr;
2147 //g_assert_not_reached ();
2149 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2150 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2153 temp->backend.is_pinvoke = sig->pinvoke;
2156 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2157 * address of return value to increase optimization opportunities.
2158 * Before vtype decomposition, the dreg of the call ins itself represents the
2159 * fact the call modifies the return value. After decomposition, the call will
2160 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2161 * will be transformed into an LDADDR.
2163 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2164 loada->dreg = alloc_preg (cfg);
2165 loada->inst_p0 = temp;
2166 /* We reference the call too since call->dreg could change during optimization */
2167 loada->inst_p1 = call;
2168 MONO_ADD_INS (cfg->cbb, loada);
2170 call->inst.dreg = temp->dreg;
2172 call->vret_var = loada;
2173 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2174 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2176 #ifdef MONO_ARCH_SOFT_FLOAT
2177 if (COMPILE_SOFT_FLOAT (cfg)) {
2179 * If the call has a float argument, we would need to do an r8->r4 conversion using
2180 * an icall, but that cannot be done during the call sequence since it would clobber
2181 * the call registers + the stack. So we do it before emitting the call.
2183 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2185 MonoInst *in = call->args [i];
2187 if (i >= sig->hasthis)
2188 t = sig->params [i - sig->hasthis];
2190 t = &mono_defaults.int_class->byval_arg;
2191 t = mono_type_get_underlying_type (t);
2193 if (!t->byref && t->type == MONO_TYPE_R4) {
2194 MonoInst *iargs [1];
2198 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2200 /* The result will be in an int vreg */
2201 call->args [i] = conv;
2208 if (COMPILE_LLVM (cfg))
2209 mono_llvm_emit_call (cfg, call);
2211 mono_arch_emit_call (cfg, call);
2213 mono_arch_emit_call (cfg, call);
2216 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2217 cfg->flags |= MONO_CFG_HAS_CALLS;
2222 inline static MonoInst*
2223 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2225 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE);
2227 call->inst.sreg1 = addr->dreg;
2229 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2231 return (MonoInst*)call;
2235 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2237 #ifdef MONO_ARCH_RGCTX_REG
2238 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2239 cfg->uses_rgctx_reg = TRUE;
2240 call->rgctx_reg = TRUE;
2242 call->rgctx_arg_reg = rgctx_reg;
2249 inline static MonoInst*
2250 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2256 rgctx_reg = mono_alloc_preg (cfg);
2257 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2259 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2261 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2262 return (MonoInst*)call;
2266 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, int rgctx_type);
2268 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, int rgctx_type);
2271 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2272 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2274 gboolean might_be_remote;
2275 gboolean virtual = this != NULL;
2276 gboolean enable_for_aot = TRUE;
2280 if (method->string_ctor) {
2281 /* Create the real signature */
2282 /* FIXME: Cache these */
2283 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2284 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2289 might_be_remote = this && sig->hasthis &&
2290 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2291 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this);
2293 context_used = mono_method_check_context_used (method);
2294 if (might_be_remote && context_used) {
2297 g_assert (cfg->generic_sharing_context);
2299 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2301 return mono_emit_calli (cfg, sig, args, addr);
2304 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE);
2306 if (might_be_remote)
2307 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2309 call->method = method;
2310 call->inst.flags |= MONO_INST_HAS_METHOD;
2311 call->inst.inst_left = this;
2314 int vtable_reg, slot_reg, this_reg;
2316 this_reg = this->dreg;
2318 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2319 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2320 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2322 /* Make a call to delegate->invoke_impl */
2323 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2324 call->inst.inst_basereg = this_reg;
2325 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2326 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2328 return (MonoInst*)call;
2332 if ((!cfg->compile_aot || enable_for_aot) &&
2333 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2334 (MONO_METHOD_IS_FINAL (method) &&
2335 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2336 !(method->klass->marshalbyref && context_used)) {
2338 * the method is not virtual, we just need to ensure this is not null
2339 * and then we can call the method directly.
2341 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2343 * The check above ensures method is not gshared, this is needed since
2344 * gshared methods can't have wrappers.
2346 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2349 if (!method->string_ctor)
2350 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2352 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2354 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2356 return (MonoInst*)call;
2359 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2361 * the method is virtual, but we can statically dispatch since either
2362 * it's class or the method itself are sealed.
2363 * But first we need to ensure it's not a null reference.
2365 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2367 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2368 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2370 return (MonoInst*)call;
2373 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2375 vtable_reg = alloc_preg (cfg);
2376 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2377 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2379 #ifdef MONO_ARCH_HAVE_IMT
2381 guint32 imt_slot = mono_method_get_imt_slot (method);
2382 emit_imt_argument (cfg, call, imt_arg);
2383 slot_reg = vtable_reg;
2384 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2387 if (slot_reg == -1) {
2388 slot_reg = alloc_preg (cfg);
2389 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2390 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2393 slot_reg = vtable_reg;
2394 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2395 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2396 #ifdef MONO_ARCH_HAVE_IMT
2398 g_assert (mono_method_signature (method)->generic_param_count);
2399 emit_imt_argument (cfg, call, imt_arg);
2404 call->inst.sreg1 = slot_reg;
2405 call->virtual = TRUE;
2408 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2410 return (MonoInst*)call;
2414 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2415 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2422 rgctx_reg = mono_alloc_preg (cfg);
2423 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2425 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2427 call = (MonoCallInst*)ins;
2429 set_rgctx_arg (cfg, call, rgctx_reg, vtable_arg);
2435 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2437 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2441 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2448 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE);
2451 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2453 return (MonoInst*)call;
2457 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2459 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2463 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2467 * mono_emit_abs_call:
2469 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2471 inline static MonoInst*
2472 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2473 MonoMethodSignature *sig, MonoInst **args)
2475 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2479 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2482 if (cfg->abs_patches == NULL)
2483 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2484 g_hash_table_insert (cfg->abs_patches, ji, ji);
2485 ins = mono_emit_native_call (cfg, ji, sig, args);
2486 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2491 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2493 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2494 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2498 * Native code might return non register sized integers
2499 * without initializing the upper bits.
2501 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2502 case OP_LOADI1_MEMBASE:
2503 widen_op = OP_ICONV_TO_I1;
2505 case OP_LOADU1_MEMBASE:
2506 widen_op = OP_ICONV_TO_U1;
2508 case OP_LOADI2_MEMBASE:
2509 widen_op = OP_ICONV_TO_I2;
2511 case OP_LOADU2_MEMBASE:
2512 widen_op = OP_ICONV_TO_U2;
2518 if (widen_op != -1) {
2519 int dreg = alloc_preg (cfg);
2522 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2523 widen->type = ins->type;
2533 get_memcpy_method (void)
2535 static MonoMethod *memcpy_method = NULL;
2536 if (!memcpy_method) {
2537 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2539 g_error ("Old corlib found. Install a new one");
2541 return memcpy_method;
2545 * Emit code to copy a valuetype of type @klass whose address is stored in
2546 * @src->dreg to memory whose address is stored at @dest->dreg.
2549 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2551 MonoInst *iargs [3];
2554 MonoMethod *memcpy_method;
2558 * This check breaks with spilled vars... need to handle it during verification anyway.
2559 * g_assert (klass && klass == src->klass && klass == dest->klass);
2563 n = mono_class_native_size (klass, &align);
2565 n = mono_class_value_size (klass, &align);
2567 #if HAVE_WRITE_BARRIERS
2568 /* if native is true there should be no references in the struct */
2569 if (klass->has_references && !native) {
2570 /* Avoid barriers when storing to the stack */
2571 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2572 (dest->opcode == OP_LDADDR))) {
2573 int context_used = 0;
2578 if (cfg->generic_sharing_context)
2579 context_used = mono_class_check_context_used (klass);
2581 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2583 if (cfg->compile_aot) {
2584 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2586 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2587 mono_class_compute_gc_descriptor (klass);
2591 /* FIXME: this does the memcpy as well (or
2592 should), so we don't need the memcpy
2594 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2599 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2600 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2601 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2605 EMIT_NEW_ICONST (cfg, iargs [2], n);
2607 memcpy_method = get_memcpy_method ();
2608 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2613 get_memset_method (void)
2615 static MonoMethod *memset_method = NULL;
2616 if (!memset_method) {
2617 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2619 g_error ("Old corlib found. Install a new one");
2621 return memset_method;
2625 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2627 MonoInst *iargs [3];
2630 MonoMethod *memset_method;
2632 /* FIXME: Optimize this for the case when dest is an LDADDR */
2634 mono_class_init (klass);
2635 n = mono_class_value_size (klass, &align);
2637 if (n <= sizeof (gpointer) * 5) {
2638 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2641 memset_method = get_memset_method ();
2643 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2644 EMIT_NEW_ICONST (cfg, iargs [2], n);
2645 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2650 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2652 MonoInst *this = NULL;
2654 g_assert (cfg->generic_sharing_context);
2656 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2657 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2658 !method->klass->valuetype)
2659 EMIT_NEW_ARGLOAD (cfg, this, 0);
2661 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2662 MonoInst *mrgctx_loc, *mrgctx_var;
2665 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2667 mrgctx_loc = mono_get_vtable_var (cfg);
2668 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2671 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2672 MonoInst *vtable_loc, *vtable_var;
2676 vtable_loc = mono_get_vtable_var (cfg);
2677 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2679 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2680 MonoInst *mrgctx_var = vtable_var;
2683 vtable_reg = alloc_preg (cfg);
2684 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2685 vtable_var->type = STACK_PTR;
2691 int vtable_reg, res_reg;
2693 vtable_reg = alloc_preg (cfg);
2694 res_reg = alloc_preg (cfg);
2695 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2700 static MonoJumpInfoRgctxEntry *
2701 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2703 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2704 res->method = method;
2705 res->in_mrgctx = in_mrgctx;
2706 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2707 res->data->type = patch_type;
2708 res->data->data.target = patch_data;
2709 res->info_type = info_type;
2714 static inline MonoInst*
2715 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2717 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2721 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2722 MonoClass *klass, int rgctx_type)
2724 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2725 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2727 return emit_rgctx_fetch (cfg, rgctx, entry);
2731 * emit_get_rgctx_method:
2733 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2734 * normal constants, else emit a load from the rgctx.
2737 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2738 MonoMethod *cmethod, int rgctx_type)
2740 if (!context_used) {
2743 switch (rgctx_type) {
2744 case MONO_RGCTX_INFO_METHOD:
2745 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
2747 case MONO_RGCTX_INFO_METHOD_RGCTX:
2748 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
2751 g_assert_not_reached ();
2754 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2755 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2757 return emit_rgctx_fetch (cfg, rgctx, entry);
2762 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2763 MonoClassField *field, int rgctx_type)
2765 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2766 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2768 return emit_rgctx_fetch (cfg, rgctx, entry);
2772 * On return the caller must check @klass for load errors.
2775 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
2777 MonoInst *vtable_arg;
2779 int context_used = 0;
2781 if (cfg->generic_sharing_context)
2782 context_used = mono_class_check_context_used (klass);
2785 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
2786 klass, MONO_RGCTX_INFO_VTABLE);
2788 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2792 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
2795 if (COMPILE_LLVM (cfg))
2796 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
2798 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
2799 #ifdef MONO_ARCH_VTABLE_REG
2800 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
2801 cfg->uses_vtable_reg = TRUE;
2808 * On return the caller must check @array_class for load errors
2811 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
2813 int vtable_reg = alloc_preg (cfg);
2814 int context_used = 0;
2816 if (cfg->generic_sharing_context)
2817 context_used = mono_class_check_context_used (array_class);
2819 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2821 if (cfg->opt & MONO_OPT_SHARED) {
2822 int class_reg = alloc_preg (cfg);
2823 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2824 if (cfg->compile_aot) {
2825 int klass_reg = alloc_preg (cfg);
2826 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
2827 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
2829 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
2831 } else if (context_used) {
2832 MonoInst *vtable_ins;
2834 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
2835 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
2837 if (cfg->compile_aot) {
2841 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2843 vt_reg = alloc_preg (cfg);
2844 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
2845 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
2848 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2850 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
2854 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
2858 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
2860 if (mini_get_debug_options ()->better_cast_details) {
2861 int to_klass_reg = alloc_preg (cfg);
2862 int vtable_reg = alloc_preg (cfg);
2863 int klass_reg = alloc_preg (cfg);
2864 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2867 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2871 MONO_ADD_INS (cfg->cbb, tls_get);
2872 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2873 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2875 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
2876 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
2877 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
2882 reset_cast_details (MonoCompile *cfg)
2884 /* Reset the variables holding the cast details */
2885 if (mini_get_debug_options ()->better_cast_details) {
2886 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2888 MONO_ADD_INS (cfg->cbb, tls_get);
2889 /* It is enough to reset the from field */
2890 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
2895 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
2896 * generic code is generated.
2899 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
2901 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
2904 MonoInst *rgctx, *addr;
2906 /* FIXME: What if the class is shared? We might not
2907 have to get the address of the method from the
2909 addr = emit_get_rgctx_method (cfg, context_used, method,
2910 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2912 rgctx = emit_get_rgctx (cfg, method, context_used);
2914 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2916 return mono_emit_method_call (cfg, method, &val, NULL);
2921 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
2925 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
2926 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
2927 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
2928 int rank_reg = alloc_dreg (cfg ,STACK_I4);
2930 obj_reg = sp [0]->dreg;
2931 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2932 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2934 /* FIXME: generics */
2935 g_assert (klass->rank == 0);
2938 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
2939 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2941 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2942 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
2945 MonoInst *element_class;
2947 /* This assertion is from the unboxcast insn */
2948 g_assert (klass->rank == 0);
2950 element_class = emit_get_rgctx_klass (cfg, context_used,
2951 klass->element_class, MONO_RGCTX_INFO_KLASS);
2953 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
2954 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2956 save_cast_details (cfg, klass->element_class, obj_reg);
2957 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
2958 reset_cast_details (cfg);
2961 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
2962 MONO_ADD_INS (cfg->cbb, add);
2963 add->type = STACK_MP;
2970 * Returns NULL and set the cfg exception on error.
2973 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
2975 MonoInst *iargs [2];
2981 MonoInst *iargs [2];
2984 FIXME: we cannot get managed_alloc here because we can't get
2985 the class's vtable (because it's not a closed class)
2987 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2988 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2991 if (cfg->opt & MONO_OPT_SHARED)
2992 rgctx_info = MONO_RGCTX_INFO_KLASS;
2994 rgctx_info = MONO_RGCTX_INFO_VTABLE;
2995 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
2997 if (cfg->opt & MONO_OPT_SHARED) {
2998 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3000 alloc_ftn = mono_object_new;
3003 alloc_ftn = mono_object_new_specific;
3006 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3009 if (cfg->opt & MONO_OPT_SHARED) {
3010 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3011 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3013 alloc_ftn = mono_object_new;
3014 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3015 /* This happens often in argument checking code, eg. throw new FooException... */
3016 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3017 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3018 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3020 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3021 MonoMethod *managed_alloc = NULL;
3025 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3026 cfg->exception_ptr = klass;
3030 #ifndef MONO_CROSS_COMPILE
3031 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3034 if (managed_alloc) {
3035 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3036 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3038 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3040 guint32 lw = vtable->klass->instance_size;
3041 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3042 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3043 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3046 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3050 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3054 * Returns NULL and set the cfg exception on error.
3057 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
3059 MonoInst *alloc, *ins;
3061 if (mono_class_is_nullable (klass)) {
3062 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3065 /* FIXME: What if the class is shared? We might not
3066 have to get the method address from the RGCTX. */
3067 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3068 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3069 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3071 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3073 return mono_emit_method_call (cfg, method, &val, NULL);
3077 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3081 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3086 // FIXME: This doesn't work yet (class libs tests fail?)
3087 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || mono_class_has_variant_generic_params (klass) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3090 * Returns NULL and set the cfg exception on error.
3093 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3095 MonoBasicBlock *is_null_bb;
3096 int obj_reg = src->dreg;
3097 int vtable_reg = alloc_preg (cfg);
3098 MonoInst *klass_inst = NULL;
3103 klass_inst = emit_get_rgctx_klass (cfg, context_used,
3104 klass, MONO_RGCTX_INFO_KLASS);
3106 if (is_complex_isinst (klass)) {
3107 /* Complex case, handle by an icall */
3113 args [1] = klass_inst;
3115 return mono_emit_jit_icall (cfg, mono_object_castclass, args);
3117 /* Simple case, handled by the code below */
3121 NEW_BBLOCK (cfg, is_null_bb);
3123 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3124 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3126 save_cast_details (cfg, klass, obj_reg);
3128 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3129 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3130 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3132 int klass_reg = alloc_preg (cfg);
3134 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3136 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3137 /* the remoting code is broken, access the class for now */
3138 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3139 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3141 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3142 cfg->exception_ptr = klass;
3145 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3147 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3148 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3150 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3152 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3153 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3157 MONO_START_BB (cfg, is_null_bb);
3159 reset_cast_details (cfg);
3165 * Returns NULL and set the cfg exception on error.
3168 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3171 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3172 int obj_reg = src->dreg;
3173 int vtable_reg = alloc_preg (cfg);
3174 int res_reg = alloc_preg (cfg);
3175 MonoInst *klass_inst = NULL;
3178 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3180 if (is_complex_isinst (klass)) {
3183 /* Complex case, handle by an icall */
3189 args [1] = klass_inst;
3191 return mono_emit_jit_icall (cfg, mono_object_isinst, args);
3193 /* Simple case, the code below can handle it */
3197 NEW_BBLOCK (cfg, is_null_bb);
3198 NEW_BBLOCK (cfg, false_bb);
3199 NEW_BBLOCK (cfg, end_bb);
3201 /* Do the assignment at the beginning, so the other assignment can be if converted */
3202 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3203 ins->type = STACK_OBJ;
3206 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3207 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3209 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3211 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3212 g_assert (!context_used);
3213 /* the is_null_bb target simply copies the input register to the output */
3214 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3216 int klass_reg = alloc_preg (cfg);
3219 int rank_reg = alloc_preg (cfg);
3220 int eclass_reg = alloc_preg (cfg);
3222 g_assert (!context_used);
3223 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3224 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3225 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3226 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3227 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3228 if (klass->cast_class == mono_defaults.object_class) {
3229 int parent_reg = alloc_preg (cfg);
3230 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3231 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3232 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3233 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3234 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3235 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3236 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3237 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3238 } else if (klass->cast_class == mono_defaults.enum_class) {
3239 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3240 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3241 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3242 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3244 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3245 /* Check that the object is a vector too */
3246 int bounds_reg = alloc_preg (cfg);
3247 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3248 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3249 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3252 /* the is_null_bb target simply copies the input register to the output */
3253 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3255 } else if (mono_class_is_nullable (klass)) {
3256 g_assert (!context_used);
3257 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3258 /* the is_null_bb target simply copies the input register to the output */
3259 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3261 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3262 g_assert (!context_used);
3263 /* the remoting code is broken, access the class for now */
3264 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3265 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3267 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3268 cfg->exception_ptr = klass;
3271 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3273 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3274 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3276 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3277 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3279 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3280 /* the is_null_bb target simply copies the input register to the output */
3281 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
3286 MONO_START_BB (cfg, false_bb);
3288 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3289 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3291 MONO_START_BB (cfg, is_null_bb);
3293 MONO_START_BB (cfg, end_bb);
3299 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3301 /* This opcode takes as input an object reference and a class, and returns:
3302 0) if the object is an instance of the class,
3303 1) if the object is not instance of the class,
3304 2) if the object is a proxy whose type cannot be determined */
3307 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3308 int obj_reg = src->dreg;
3309 int dreg = alloc_ireg (cfg);
3311 int klass_reg = alloc_preg (cfg);
3313 NEW_BBLOCK (cfg, true_bb);
3314 NEW_BBLOCK (cfg, false_bb);
3315 NEW_BBLOCK (cfg, false2_bb);
3316 NEW_BBLOCK (cfg, end_bb);
3317 NEW_BBLOCK (cfg, no_proxy_bb);
3319 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3320 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3322 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3323 NEW_BBLOCK (cfg, interface_fail_bb);
3325 tmp_reg = alloc_preg (cfg);
3326 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3327 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3328 MONO_START_BB (cfg, interface_fail_bb);
3329 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3331 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3333 tmp_reg = alloc_preg (cfg);
3334 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3335 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3336 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3338 tmp_reg = alloc_preg (cfg);
3339 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3340 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3342 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3343 tmp_reg = alloc_preg (cfg);
3344 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3345 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3347 tmp_reg = alloc_preg (cfg);
3348 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3349 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3350 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3352 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3353 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3355 MONO_START_BB (cfg, no_proxy_bb);
3357 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3360 MONO_START_BB (cfg, false_bb);
3362 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3363 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3365 MONO_START_BB (cfg, false2_bb);
3367 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3368 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3370 MONO_START_BB (cfg, true_bb);
3372 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3374 MONO_START_BB (cfg, end_bb);
3377 MONO_INST_NEW (cfg, ins, OP_ICONST);
3379 ins->type = STACK_I4;
3385 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3387 /* This opcode takes as input an object reference and a class, and returns:
3388 0) if the object is an instance of the class,
3389 1) if the object is a proxy whose type cannot be determined
3390 an InvalidCastException exception is thrown otherwhise*/
3393 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3394 int obj_reg = src->dreg;
3395 int dreg = alloc_ireg (cfg);
3396 int tmp_reg = alloc_preg (cfg);
3397 int klass_reg = alloc_preg (cfg);
3399 NEW_BBLOCK (cfg, end_bb);
3400 NEW_BBLOCK (cfg, ok_result_bb);
3402 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3403 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3405 save_cast_details (cfg, klass, obj_reg);
3407 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3408 NEW_BBLOCK (cfg, interface_fail_bb);
3410 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3411 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3412 MONO_START_BB (cfg, interface_fail_bb);
3413 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3415 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3417 tmp_reg = alloc_preg (cfg);
3418 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3419 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3420 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3422 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3423 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3426 NEW_BBLOCK (cfg, no_proxy_bb);
3428 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3429 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3430 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3432 tmp_reg = alloc_preg (cfg);
3433 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3434 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3436 tmp_reg = alloc_preg (cfg);
3437 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3438 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3439 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3441 NEW_BBLOCK (cfg, fail_1_bb);
3443 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3445 MONO_START_BB (cfg, fail_1_bb);
3447 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3448 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3450 MONO_START_BB (cfg, no_proxy_bb);
3452 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3455 MONO_START_BB (cfg, ok_result_bb);
3457 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3459 MONO_START_BB (cfg, end_bb);
3462 MONO_INST_NEW (cfg, ins, OP_ICONST);
3464 ins->type = STACK_I4;
3470 * Returns NULL and set the cfg exception on error.
3472 static G_GNUC_UNUSED MonoInst*
3473 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3475 gpointer *trampoline;
3476 MonoInst *obj, *method_ins, *tramp_ins;
3480 obj = handle_alloc (cfg, klass, FALSE, 0);
3484 /* Inline the contents of mono_delegate_ctor */
3486 /* Set target field */
3487 /* Optimize away setting of NULL target */
3488 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3489 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3491 /* Set method field */
3492 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3493 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3496 * To avoid looking up the compiled code belonging to the target method
3497 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3498 * store it, and we fill it after the method has been compiled.
3500 if (!cfg->compile_aot && !method->dynamic) {
3501 MonoInst *code_slot_ins;
3504 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3506 domain = mono_domain_get ();
3507 mono_domain_lock (domain);
3508 if (!domain_jit_info (domain)->method_code_hash)
3509 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3510 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3512 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3513 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3515 mono_domain_unlock (domain);
3517 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3519 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3522 /* Set invoke_impl field */
3523 if (cfg->compile_aot) {
3524 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3526 trampoline = mono_create_delegate_trampoline (klass);
3527 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3529 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3531 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3537 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3539 MonoJitICallInfo *info;
3541 /* Need to register the icall so it gets an icall wrapper */
3542 info = mono_get_array_new_va_icall (rank);
3544 cfg->flags |= MONO_CFG_HAS_VARARGS;
3546 /* mono_array_new_va () needs a vararg calling convention */
3547 cfg->disable_llvm = TRUE;
3549 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3550 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3554 mono_emit_load_got_addr (MonoCompile *cfg)
3556 MonoInst *getaddr, *dummy_use;
3558 if (!cfg->got_var || cfg->got_var_allocated)
3561 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3562 getaddr->dreg = cfg->got_var->dreg;
3564 /* Add it to the start of the first bblock */
3565 if (cfg->bb_entry->code) {
3566 getaddr->next = cfg->bb_entry->code;
3567 cfg->bb_entry->code = getaddr;
3570 MONO_ADD_INS (cfg->bb_entry, getaddr);
3572 cfg->got_var_allocated = TRUE;
3575 * Add a dummy use to keep the got_var alive, since real uses might
3576 * only be generated by the back ends.
3577 * Add it to end_bblock, so the variable's lifetime covers the whole
3579 * It would be better to make the usage of the got var explicit in all
3580 * cases when the backend needs it (i.e. calls, throw etc.), so this
3581 * wouldn't be needed.
3583 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3584 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3587 static int inline_limit;
3588 static gboolean inline_limit_inited;
3591 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3593 MonoMethodHeaderSummary header;
3595 #ifdef MONO_ARCH_SOFT_FLOAT
3596 MonoMethodSignature *sig = mono_method_signature (method);
3600 if (cfg->generic_sharing_context)
3603 if (cfg->inline_depth > 10)
3606 #ifdef MONO_ARCH_HAVE_LMF_OPS
3607 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3608 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3609 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3614 if (!mono_method_get_header_summary (method, &header))
3617 /*runtime, icall and pinvoke are checked by summary call*/
3618 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3619 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3620 (method->klass->marshalbyref) ||
3624 /* also consider num_locals? */
3625 /* Do the size check early to avoid creating vtables */
3626 if (!inline_limit_inited) {
3627 if (getenv ("MONO_INLINELIMIT"))
3628 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3630 inline_limit = INLINE_LENGTH_LIMIT;
3631 inline_limit_inited = TRUE;
3633 if (header.code_size >= inline_limit)
3637 * if we can initialize the class of the method right away, we do,
3638 * otherwise we don't allow inlining if the class needs initialization,
3639 * since it would mean inserting a call to mono_runtime_class_init()
3640 * inside the inlined code
3642 if (!(cfg->opt & MONO_OPT_SHARED)) {
3643 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3644 if (cfg->run_cctors && method->klass->has_cctor) {
3645 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3646 if (!method->klass->runtime_info)
3647 /* No vtable created yet */
3649 vtable = mono_class_vtable (cfg->domain, method->klass);
3652 /* This makes so that inline cannot trigger */
3653 /* .cctors: too many apps depend on them */
3654 /* running with a specific order... */
3655 if (! vtable->initialized)
3657 mono_runtime_class_init (vtable);
3659 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3660 if (!method->klass->runtime_info)
3661 /* No vtable created yet */
3663 vtable = mono_class_vtable (cfg->domain, method->klass);
3666 if (!vtable->initialized)
3671 * If we're compiling for shared code
3672 * the cctor will need to be run at aot method load time, for example,
3673 * or at the end of the compilation of the inlining method.
3675 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3680 * CAS - do not inline methods with declarative security
3681 * Note: this has to be before any possible return TRUE;
3683 if (mono_method_has_declsec (method))
3686 #ifdef MONO_ARCH_SOFT_FLOAT
3688 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3690 for (i = 0; i < sig->param_count; ++i)
3691 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3699 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3701 if (vtable->initialized && !cfg->compile_aot)
3704 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3707 if (!mono_class_needs_cctor_run (vtable->klass, method))
3710 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3711 /* The initialization is already done before the method is called */
3718 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
3722 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3724 mono_class_init (klass);
3725 size = mono_class_array_element_size (klass);
3727 mult_reg = alloc_preg (cfg);
3728 array_reg = arr->dreg;
3729 index_reg = index->dreg;
3731 #if SIZEOF_REGISTER == 8
3732 /* The array reg is 64 bits but the index reg is only 32 */
3733 if (COMPILE_LLVM (cfg)) {
3735 index2_reg = index_reg;
3737 index2_reg = alloc_preg (cfg);
3738 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3741 if (index->type == STACK_I8) {
3742 index2_reg = alloc_preg (cfg);
3743 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
3745 index2_reg = index_reg;
3750 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3752 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3753 if (size == 1 || size == 2 || size == 4 || size == 8) {
3754 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3756 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3757 ins->type = STACK_PTR;
3763 add_reg = alloc_preg (cfg);
3765 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3766 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3767 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3768 ins->type = STACK_PTR;
3769 MONO_ADD_INS (cfg->cbb, ins);
3774 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3776 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3778 int bounds_reg = alloc_preg (cfg);
3779 int add_reg = alloc_preg (cfg);
3780 int mult_reg = alloc_preg (cfg);
3781 int mult2_reg = alloc_preg (cfg);
3782 int low1_reg = alloc_preg (cfg);
3783 int low2_reg = alloc_preg (cfg);
3784 int high1_reg = alloc_preg (cfg);
3785 int high2_reg = alloc_preg (cfg);
3786 int realidx1_reg = alloc_preg (cfg);
3787 int realidx2_reg = alloc_preg (cfg);
3788 int sum_reg = alloc_preg (cfg);
3793 mono_class_init (klass);
3794 size = mono_class_array_element_size (klass);
3796 index1 = index_ins1->dreg;
3797 index2 = index_ins2->dreg;
3799 /* range checking */
3800 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3801 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3803 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3804 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3805 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3806 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3807 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3808 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3809 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3811 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3812 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3813 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3814 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3815 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3816 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3817 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3819 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3820 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3821 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3822 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3823 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3825 ins->type = STACK_MP;
3827 MONO_ADD_INS (cfg->cbb, ins);
3834 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3838 MonoMethod *addr_method;
3841 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3844 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
3846 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3847 /* emit_ldelema_2 depends on OP_LMUL */
3848 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3849 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3853 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3854 addr_method = mono_marshal_get_array_address (rank, element_size);
3855 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
3860 static MonoBreakPolicy
3861 always_insert_breakpoint (MonoMethod *method)
3863 return MONO_BREAK_POLICY_ALWAYS;
3866 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
3869 * mono_set_break_policy:
3870 * policy_callback: the new callback function
3872 * Allow embedders to decide wherther to actually obey breakpoint instructions
3873 * (both break IL instructions and Debugger.Break () method calls), for example
3874 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
3875 * untrusted or semi-trusted code.
3877 * @policy_callback will be called every time a break point instruction needs to
3878 * be inserted with the method argument being the method that calls Debugger.Break()
3879 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
3880 * if it wants the breakpoint to not be effective in the given method.
3881 * #MONO_BREAK_POLICY_ALWAYS is the default.
3884 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
3886 if (policy_callback)
3887 break_policy_func = policy_callback;
3889 break_policy_func = always_insert_breakpoint;
3893 should_insert_brekpoint (MonoMethod *method) {
3894 switch (break_policy_func (method)) {
3895 case MONO_BREAK_POLICY_ALWAYS:
3897 case MONO_BREAK_POLICY_NEVER:
3899 case MONO_BREAK_POLICY_ON_DBG:
3900 return mono_debug_using_mono_debugger ();
3902 g_warning ("Incorrect value returned from break policy callback");
3907 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
3909 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
3911 MonoInst *addr, *store, *load;
3912 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
3914 /* the bounds check is already done by the callers */
3915 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
3917 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
3918 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
3920 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
3921 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
3927 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
3929 MonoInst *ins = NULL;
3931 static MonoClass *runtime_helpers_class = NULL;
3932 if (! runtime_helpers_class)
3933 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
3934 "System.Runtime.CompilerServices", "RuntimeHelpers");
3936 if (cmethod->klass == mono_defaults.string_class) {
3937 if (strcmp (cmethod->name, "get_Chars") == 0) {
3938 int dreg = alloc_ireg (cfg);
3939 int index_reg = alloc_preg (cfg);
3940 int mult_reg = alloc_preg (cfg);
3941 int add_reg = alloc_preg (cfg);
3943 #if SIZEOF_REGISTER == 8
3944 /* The array reg is 64 bits but the index reg is only 32 */
3945 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
3947 index_reg = args [1]->dreg;
3949 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
3951 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3952 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
3953 add_reg = ins->dreg;
3954 /* Avoid a warning */
3956 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3959 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
3960 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3961 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3962 add_reg, G_STRUCT_OFFSET (MonoString, chars));
3964 type_from_op (ins, NULL, NULL);
3966 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3967 int dreg = alloc_ireg (cfg);
3968 /* Decompose later to allow more optimizations */
3969 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
3970 ins->type = STACK_I4;
3971 cfg->cbb->has_array_access = TRUE;
3972 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
3975 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
3976 int mult_reg = alloc_preg (cfg);
3977 int add_reg = alloc_preg (cfg);
3979 /* The corlib functions check for oob already. */
3980 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
3981 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3982 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
3983 return cfg->cbb->last_ins;
3986 } else if (cmethod->klass == mono_defaults.object_class) {
3988 if (strcmp (cmethod->name, "GetType") == 0) {
3989 int dreg = alloc_preg (cfg);
3990 int vt_reg = alloc_preg (cfg);
3991 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3992 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
3993 type_from_op (ins, NULL, NULL);
3996 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
3997 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0) {
3998 int dreg = alloc_ireg (cfg);
3999 int t1 = alloc_ireg (cfg);
4001 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
4002 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
4003 ins->type = STACK_I4;
4007 } else if (strcmp (cmethod->name, ".ctor") == 0) {
4008 MONO_INST_NEW (cfg, ins, OP_NOP);
4009 MONO_ADD_INS (cfg->cbb, ins);
4013 } else if (cmethod->klass == mono_defaults.array_class) {
4014 if (strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
4015 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
4016 if (cmethod->name [0] != 'g')
4019 if (strcmp (cmethod->name, "get_Rank") == 0) {
4020 int dreg = alloc_ireg (cfg);
4021 int vtable_reg = alloc_preg (cfg);
4022 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
4023 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4024 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
4025 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4026 type_from_op (ins, NULL, NULL);
4029 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4030 int dreg = alloc_ireg (cfg);
4032 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4033 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4034 type_from_op (ins, NULL, NULL);
4039 } else if (cmethod->klass == runtime_helpers_class) {
4041 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
4042 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
4046 } else if (cmethod->klass == mono_defaults.thread_class) {
4047 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
4048 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
4049 MONO_ADD_INS (cfg->cbb, ins);
4051 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
4052 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4053 MONO_ADD_INS (cfg->cbb, ins);
4056 } else if (cmethod->klass == mono_defaults.monitor_class) {
4057 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4058 if (strcmp (cmethod->name, "Enter") == 0) {
4061 if (COMPILE_LLVM (cfg)) {
4063 * Pass the argument normally, the LLVM backend will handle the
4064 * calling convention problems.
4066 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4068 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
4069 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4070 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4071 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4074 return (MonoInst*)call;
4075 } else if (strcmp (cmethod->name, "Exit") == 0) {
4078 if (COMPILE_LLVM (cfg)) {
4079 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4081 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
4082 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4083 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4084 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4087 return (MonoInst*)call;
4089 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4090 MonoMethod *fast_method = NULL;
4092 /* Avoid infinite recursion */
4093 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
4094 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
4095 strcmp (cfg->method->name, "FastMonitorExit") == 0))
4098 if (strcmp (cmethod->name, "Enter") == 0 ||
4099 strcmp (cmethod->name, "Exit") == 0)
4100 fast_method = mono_monitor_get_fast_path (cmethod);
4104 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4106 } else if (cmethod->klass->image == mono_defaults.corlib &&
4107 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4108 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4111 #if SIZEOF_REGISTER == 8
4112 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
4113 /* 64 bit reads are already atomic */
4114 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
4115 ins->dreg = mono_alloc_preg (cfg);
4116 ins->inst_basereg = args [0]->dreg;
4117 ins->inst_offset = 0;
4118 MONO_ADD_INS (cfg->cbb, ins);
4122 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4123 if (strcmp (cmethod->name, "Increment") == 0) {
4124 MonoInst *ins_iconst;
4127 if (fsig->params [0]->type == MONO_TYPE_I4)
4128 opcode = OP_ATOMIC_ADD_NEW_I4;
4129 #if SIZEOF_REGISTER == 8
4130 else if (fsig->params [0]->type == MONO_TYPE_I8)
4131 opcode = OP_ATOMIC_ADD_NEW_I8;
4134 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4135 ins_iconst->inst_c0 = 1;
4136 ins_iconst->dreg = mono_alloc_ireg (cfg);
4137 MONO_ADD_INS (cfg->cbb, ins_iconst);
4139 MONO_INST_NEW (cfg, ins, opcode);
4140 ins->dreg = mono_alloc_ireg (cfg);
4141 ins->inst_basereg = args [0]->dreg;
4142 ins->inst_offset = 0;
4143 ins->sreg2 = ins_iconst->dreg;
4144 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4145 MONO_ADD_INS (cfg->cbb, ins);
4147 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4148 MonoInst *ins_iconst;
4151 if (fsig->params [0]->type == MONO_TYPE_I4)
4152 opcode = OP_ATOMIC_ADD_NEW_I4;
4153 #if SIZEOF_REGISTER == 8
4154 else if (fsig->params [0]->type == MONO_TYPE_I8)
4155 opcode = OP_ATOMIC_ADD_NEW_I8;
4158 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4159 ins_iconst->inst_c0 = -1;
4160 ins_iconst->dreg = mono_alloc_ireg (cfg);
4161 MONO_ADD_INS (cfg->cbb, ins_iconst);
4163 MONO_INST_NEW (cfg, ins, opcode);
4164 ins->dreg = mono_alloc_ireg (cfg);
4165 ins->inst_basereg = args [0]->dreg;
4166 ins->inst_offset = 0;
4167 ins->sreg2 = ins_iconst->dreg;
4168 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4169 MONO_ADD_INS (cfg->cbb, ins);
4171 } else if (strcmp (cmethod->name, "Add") == 0) {
4174 if (fsig->params [0]->type == MONO_TYPE_I4)
4175 opcode = OP_ATOMIC_ADD_NEW_I4;
4176 #if SIZEOF_REGISTER == 8
4177 else if (fsig->params [0]->type == MONO_TYPE_I8)
4178 opcode = OP_ATOMIC_ADD_NEW_I8;
4182 MONO_INST_NEW (cfg, ins, opcode);
4183 ins->dreg = mono_alloc_ireg (cfg);
4184 ins->inst_basereg = args [0]->dreg;
4185 ins->inst_offset = 0;
4186 ins->sreg2 = args [1]->dreg;
4187 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4188 MONO_ADD_INS (cfg->cbb, ins);
4191 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4193 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4194 if (strcmp (cmethod->name, "Exchange") == 0) {
4196 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4198 if (fsig->params [0]->type == MONO_TYPE_I4)
4199 opcode = OP_ATOMIC_EXCHANGE_I4;
4200 #if SIZEOF_REGISTER == 8
4201 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4202 (fsig->params [0]->type == MONO_TYPE_I))
4203 opcode = OP_ATOMIC_EXCHANGE_I8;
4205 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4206 opcode = OP_ATOMIC_EXCHANGE_I4;
4211 MONO_INST_NEW (cfg, ins, opcode);
4212 ins->dreg = mono_alloc_ireg (cfg);
4213 ins->inst_basereg = args [0]->dreg;
4214 ins->inst_offset = 0;
4215 ins->sreg2 = args [1]->dreg;
4216 MONO_ADD_INS (cfg->cbb, ins);
4218 switch (fsig->params [0]->type) {
4220 ins->type = STACK_I4;
4224 ins->type = STACK_I8;
4226 case MONO_TYPE_OBJECT:
4227 ins->type = STACK_OBJ;
4230 g_assert_not_reached ();
4233 #if HAVE_WRITE_BARRIERS
4235 MonoInst *dummy_use;
4236 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4237 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4238 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [1]);
4242 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4244 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4245 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4247 gboolean is_ref = MONO_TYPE_IS_REFERENCE (fsig->params [1]);
4248 if (fsig->params [1]->type == MONO_TYPE_I4)
4250 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4251 size = sizeof (gpointer);
4252 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
4255 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4256 ins->dreg = alloc_ireg (cfg);
4257 ins->sreg1 = args [0]->dreg;
4258 ins->sreg2 = args [1]->dreg;
4259 ins->sreg3 = args [2]->dreg;
4260 ins->type = STACK_I4;
4261 MONO_ADD_INS (cfg->cbb, ins);
4262 } else if (size == 8) {
4263 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4264 ins->dreg = alloc_ireg (cfg);
4265 ins->sreg1 = args [0]->dreg;
4266 ins->sreg2 = args [1]->dreg;
4267 ins->sreg3 = args [2]->dreg;
4268 ins->type = STACK_I8;
4269 MONO_ADD_INS (cfg->cbb, ins);
4271 /* g_assert_not_reached (); */
4273 #if HAVE_WRITE_BARRIERS
4275 MonoInst *dummy_use;
4276 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4277 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4278 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [1]);
4282 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4286 } else if (cmethod->klass->image == mono_defaults.corlib) {
4287 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4288 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4289 if (should_insert_brekpoint (cfg->method))
4290 MONO_INST_NEW (cfg, ins, OP_BREAK);
4292 MONO_INST_NEW (cfg, ins, OP_NOP);
4293 MONO_ADD_INS (cfg->cbb, ins);
4296 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4297 && strcmp (cmethod->klass->name, "Environment") == 0) {
4299 EMIT_NEW_ICONST (cfg, ins, 1);
4301 EMIT_NEW_ICONST (cfg, ins, 0);
4305 } else if (cmethod->klass == mono_defaults.math_class) {
4307 * There is general branches code for Min/Max, but it does not work for
4309 * http://everything2.com/?node_id=1051618
4313 #ifdef MONO_ARCH_SIMD_INTRINSICS
4314 if (cfg->opt & MONO_OPT_SIMD) {
4315 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4321 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4325 * This entry point could be used later for arbitrary method
4328 inline static MonoInst*
4329 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4330 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4332 if (method->klass == mono_defaults.string_class) {
4333 /* managed string allocation support */
4334 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS)) {
4335 MonoInst *iargs [2];
4336 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4337 MonoMethod *managed_alloc = NULL;
4339 g_assert (vtable); /*Should not fail since it System.String*/
4340 #ifndef MONO_CROSS_COMPILE
4341 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4345 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4346 iargs [1] = args [0];
4347 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4354 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4356 MonoInst *store, *temp;
4359 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4360 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4363 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4364 * would be different than the MonoInst's used to represent arguments, and
4365 * the ldelema implementation can't deal with that.
4366 * Solution: When ldelema is used on an inline argument, create a var for
4367 * it, emit ldelema on that var, and emit the saving code below in
4368 * inline_method () if needed.
4370 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4371 cfg->args [i] = temp;
4372 /* This uses cfg->args [i] which is set by the preceeding line */
4373 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4374 store->cil_code = sp [0]->cil_code;
4379 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4380 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4382 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4384 check_inline_called_method_name_limit (MonoMethod *called_method)
4387 static char *limit = NULL;
4389 if (limit == NULL) {
4390 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4392 if (limit_string != NULL)
4393 limit = limit_string;
4395 limit = (char *) "";
4398 if (limit [0] != '\0') {
4399 char *called_method_name = mono_method_full_name (called_method, TRUE);
4401 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4402 g_free (called_method_name);
4404 //return (strncmp_result <= 0);
4405 return (strncmp_result == 0);
4412 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4414 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4417 static char *limit = NULL;
4419 if (limit == NULL) {
4420 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4421 if (limit_string != NULL) {
4422 limit = limit_string;
4424 limit = (char *) "";
4428 if (limit [0] != '\0') {
4429 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4431 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4432 g_free (caller_method_name);
4434 //return (strncmp_result <= 0);
4435 return (strncmp_result == 0);
4443 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4444 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4446 MonoInst *ins, *rvar = NULL;
4447 MonoMethodHeader *cheader;
4448 MonoBasicBlock *ebblock, *sbblock;
4450 MonoMethod *prev_inlined_method;
4451 MonoInst **prev_locals, **prev_args;
4452 MonoType **prev_arg_types;
4453 guint prev_real_offset;
4454 GHashTable *prev_cbb_hash;
4455 MonoBasicBlock **prev_cil_offset_to_bb;
4456 MonoBasicBlock *prev_cbb;
4457 unsigned char* prev_cil_start;
4458 guint32 prev_cil_offset_to_bb_len;
4459 MonoMethod *prev_current_method;
4460 MonoGenericContext *prev_generic_context;
4461 gboolean ret_var_set, prev_ret_var_set;
4463 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4465 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4466 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4469 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4470 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4474 if (cfg->verbose_level > 2)
4475 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4477 if (!cmethod->inline_info) {
4478 mono_jit_stats.inlineable_methods++;
4479 cmethod->inline_info = 1;
4482 /* allocate local variables */
4483 cheader = mono_method_get_header (cmethod);
4485 if (cheader == NULL || mono_loader_get_last_error ()) {
4487 mono_metadata_free_mh (cheader);
4488 mono_loader_clear_error ();
4492 /* allocate space to store the return value */
4493 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4494 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4498 prev_locals = cfg->locals;
4499 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4500 for (i = 0; i < cheader->num_locals; ++i)
4501 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4503 /* allocate start and end blocks */
4504 /* This is needed so if the inline is aborted, we can clean up */
4505 NEW_BBLOCK (cfg, sbblock);
4506 sbblock->real_offset = real_offset;
4508 NEW_BBLOCK (cfg, ebblock);
4509 ebblock->block_num = cfg->num_bblocks++;
4510 ebblock->real_offset = real_offset;
4512 prev_args = cfg->args;
4513 prev_arg_types = cfg->arg_types;
4514 prev_inlined_method = cfg->inlined_method;
4515 cfg->inlined_method = cmethod;
4516 cfg->ret_var_set = FALSE;
4517 cfg->inline_depth ++;
4518 prev_real_offset = cfg->real_offset;
4519 prev_cbb_hash = cfg->cbb_hash;
4520 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4521 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4522 prev_cil_start = cfg->cil_start;
4523 prev_cbb = cfg->cbb;
4524 prev_current_method = cfg->current_method;
4525 prev_generic_context = cfg->generic_context;
4526 prev_ret_var_set = cfg->ret_var_set;
4528 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4530 ret_var_set = cfg->ret_var_set;
4532 cfg->inlined_method = prev_inlined_method;
4533 cfg->real_offset = prev_real_offset;
4534 cfg->cbb_hash = prev_cbb_hash;
4535 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4536 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4537 cfg->cil_start = prev_cil_start;
4538 cfg->locals = prev_locals;
4539 cfg->args = prev_args;
4540 cfg->arg_types = prev_arg_types;
4541 cfg->current_method = prev_current_method;
4542 cfg->generic_context = prev_generic_context;
4543 cfg->ret_var_set = prev_ret_var_set;
4544 cfg->inline_depth --;
4546 if ((costs >= 0 && costs < 60) || inline_allways) {
4547 if (cfg->verbose_level > 2)
4548 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4550 mono_jit_stats.inlined_methods++;
4552 /* always add some code to avoid block split failures */
4553 MONO_INST_NEW (cfg, ins, OP_NOP);
4554 MONO_ADD_INS (prev_cbb, ins);
4556 prev_cbb->next_bb = sbblock;
4557 link_bblock (cfg, prev_cbb, sbblock);
4560 * Get rid of the begin and end bblocks if possible to aid local
4563 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4565 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4566 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4568 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4569 MonoBasicBlock *prev = ebblock->in_bb [0];
4570 mono_merge_basic_blocks (cfg, prev, ebblock);
4572 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4573 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4574 cfg->cbb = prev_cbb;
4582 * If the inlined method contains only a throw, then the ret var is not
4583 * set, so set it to a dummy value.
4586 static double r8_0 = 0.0;
4588 switch (rvar->type) {
4590 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4593 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4598 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4601 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4602 ins->type = STACK_R8;
4603 ins->inst_p0 = (void*)&r8_0;
4604 ins->dreg = rvar->dreg;
4605 MONO_ADD_INS (cfg->cbb, ins);
4608 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4611 g_assert_not_reached ();
4615 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4618 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4621 if (cfg->verbose_level > 2)
4622 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4623 cfg->exception_type = MONO_EXCEPTION_NONE;
4624 mono_loader_clear_error ();
4626 /* This gets rid of the newly added bblocks */
4627 cfg->cbb = prev_cbb;
4629 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4634 * Some of these comments may well be out-of-date.
4635 * Design decisions: we do a single pass over the IL code (and we do bblock
4636 * splitting/merging in the few cases when it's required: a back jump to an IL
4637 * address that was not already seen as bblock starting point).
4638 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4639 * Complex operations are decomposed in simpler ones right away. We need to let the
4640 * arch-specific code peek and poke inside this process somehow (except when the
4641 * optimizations can take advantage of the full semantic info of coarse opcodes).
4642 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4643 * MonoInst->opcode initially is the IL opcode or some simplification of that
4644 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4645 * opcode with value bigger than OP_LAST.
4646 * At this point the IR can be handed over to an interpreter, a dumb code generator
4647 * or to the optimizing code generator that will translate it to SSA form.
4649 * Profiling directed optimizations.
4650 * We may compile by default with few or no optimizations and instrument the code
4651 * or the user may indicate what methods to optimize the most either in a config file
4652 * or through repeated runs where the compiler applies offline the optimizations to
4653 * each method and then decides if it was worth it.
4656 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4657 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4658 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4659 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4660 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4661 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4662 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4663 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4665 /* offset from br.s -> br like opcodes */
4666 #define BIG_BRANCH_OFFSET 13
4669 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4671 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4673 return b == NULL || b == bb;
4677 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4679 unsigned char *ip = start;
4680 unsigned char *target;
4683 MonoBasicBlock *bblock;
4684 const MonoOpcode *opcode;
4687 cli_addr = ip - start;
4688 i = mono_opcode_value ((const guint8 **)&ip, end);
4691 opcode = &mono_opcodes [i];
4692 switch (opcode->argument) {
4693 case MonoInlineNone:
4696 case MonoInlineString:
4697 case MonoInlineType:
4698 case MonoInlineField:
4699 case MonoInlineMethod:
4702 case MonoShortInlineR:
4709 case MonoShortInlineVar:
4710 case MonoShortInlineI:
4713 case MonoShortInlineBrTarget:
4714 target = start + cli_addr + 2 + (signed char)ip [1];
4715 GET_BBLOCK (cfg, bblock, target);
4718 GET_BBLOCK (cfg, bblock, ip);
4720 case MonoInlineBrTarget:
4721 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4722 GET_BBLOCK (cfg, bblock, target);
4725 GET_BBLOCK (cfg, bblock, ip);
4727 case MonoInlineSwitch: {
4728 guint32 n = read32 (ip + 1);
4731 cli_addr += 5 + 4 * n;
4732 target = start + cli_addr;
4733 GET_BBLOCK (cfg, bblock, target);
4735 for (j = 0; j < n; ++j) {
4736 target = start + cli_addr + (gint32)read32 (ip);
4737 GET_BBLOCK (cfg, bblock, target);
4747 g_assert_not_reached ();
4750 if (i == CEE_THROW) {
4751 unsigned char *bb_start = ip - 1;
4753 /* Find the start of the bblock containing the throw */
4755 while ((bb_start >= start) && !bblock) {
4756 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4760 bblock->out_of_line = 1;
4769 static inline MonoMethod *
4770 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4774 if (m->wrapper_type != MONO_WRAPPER_NONE)
4775 return mono_method_get_wrapper_data (m, token);
4777 method = mono_get_method_full (m->klass->image, token, klass, context);
4782 static inline MonoMethod *
4783 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4785 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4787 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4793 static inline MonoClass*
4794 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4798 if (method->wrapper_type != MONO_WRAPPER_NONE)
4799 klass = mono_method_get_wrapper_data (method, token);
4801 klass = mono_class_get_full (method->klass->image, token, context);
4803 mono_class_init (klass);
4808 * Returns TRUE if the JIT should abort inlining because "callee"
4809 * is influenced by security attributes.
4812 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4816 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
4820 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
4821 if (result == MONO_JIT_SECURITY_OK)
4824 if (result == MONO_JIT_LINKDEMAND_ECMA) {
4825 /* Generate code to throw a SecurityException before the actual call/link */
4826 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4829 NEW_ICONST (cfg, args [0], 4);
4830 NEW_METHODCONST (cfg, args [1], caller);
4831 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
4832 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
4833 /* don't hide previous results */
4834 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
4835 cfg->exception_data = result;
4843 throw_exception (void)
4845 static MonoMethod *method = NULL;
4848 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4849 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
4856 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
4858 MonoMethod *thrower = throw_exception ();
4861 EMIT_NEW_PCONST (cfg, args [0], ex);
4862 mono_emit_method_call (cfg, thrower, args, NULL);
4866 * Return the original method is a wrapper is specified. We can only access
4867 * the custom attributes from the original method.
4870 get_original_method (MonoMethod *method)
4872 if (method->wrapper_type == MONO_WRAPPER_NONE)
4875 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
4876 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
4879 /* in other cases we need to find the original method */
4880 return mono_marshal_method_from_wrapper (method);
4884 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
4885 MonoBasicBlock *bblock, unsigned char *ip)
4887 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4888 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
4890 emit_throw_exception (cfg, ex);
4894 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4895 MonoBasicBlock *bblock, unsigned char *ip)
4897 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4898 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
4900 emit_throw_exception (cfg, ex);
4904 * Check that the IL instructions at ip are the array initialization
4905 * sequence and return the pointer to the data and the size.
4908 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
4911 * newarr[System.Int32]
4913 * ldtoken field valuetype ...
4914 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
4916 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
4917 guint32 token = read32 (ip + 7);
4918 guint32 field_token = read32 (ip + 2);
4919 guint32 field_index = field_token & 0xffffff;
4921 const char *data_ptr;
4923 MonoMethod *cmethod;
4924 MonoClass *dummy_class;
4925 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
4931 *out_field_token = field_token;
4933 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
4936 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
4938 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
4939 case MONO_TYPE_BOOLEAN:
4943 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
4944 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
4945 case MONO_TYPE_CHAR:
4955 return NULL; /* stupid ARM FP swapped format */
4965 if (size > mono_type_size (field->type, &dummy_align))
4968 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
4969 if (!method->klass->image->dynamic) {
4970 field_index = read32 (ip + 2) & 0xffffff;
4971 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
4972 data_ptr = mono_image_rva_map (method->klass->image, rva);
4973 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
4974 /* for aot code we do the lookup on load */
4975 if (aot && data_ptr)
4976 return GUINT_TO_POINTER (rva);
4978 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
4980 data_ptr = mono_field_get_data (field);
4988 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
4990 char *method_fname = mono_method_full_name (method, TRUE);
4992 MonoMethodHeader *header = mono_method_get_header (method);
4994 if (header->code_size == 0)
4995 method_code = g_strdup ("method body is empty.");
4997 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
4998 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
4999 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
5000 g_free (method_fname);
5001 g_free (method_code);
5002 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
5006 set_exception_object (MonoCompile *cfg, MonoException *exception)
5008 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
5009 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
5010 cfg->exception_ptr = exception;
5014 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5018 if (cfg->generic_sharing_context)
5019 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
5021 type = &klass->byval_arg;
5022 return MONO_TYPE_IS_REFERENCE (type);
5026 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5029 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5030 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5031 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5032 /* Optimize reg-reg moves away */
5034 * Can't optimize other opcodes, since sp[0] might point to
5035 * the last ins of a decomposed opcode.
5037 sp [0]->dreg = (cfg)->locals [n]->dreg;
5039 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5044 * ldloca inhibits many optimizations so try to get rid of it in common
5047 static inline unsigned char *
5048 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5057 local = read16 (ip + 2);
5061 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5062 gboolean skip = FALSE;
5064 /* From the INITOBJ case */
5065 token = read32 (ip + 2);
5066 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5067 CHECK_TYPELOAD (klass);
5068 if (generic_class_is_reference_type (cfg, klass)) {
5069 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5070 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5071 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5072 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5073 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5086 is_exception_class (MonoClass *class)
5089 if (class == mono_defaults.exception_class)
5091 class = class->parent;
5097 * mono_method_to_ir:
5099 * Translate the .net IL into linear IR.
5102 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5103 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5104 guint inline_offset, gboolean is_virtual_call)
5107 MonoInst *ins, **sp, **stack_start;
5108 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5109 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
5110 MonoMethod *cmethod, *method_definition;
5111 MonoInst **arg_array;
5112 MonoMethodHeader *header;
5114 guint32 token, ins_flag;
5116 MonoClass *constrained_call = NULL;
5117 unsigned char *ip, *end, *target, *err_pos;
5118 static double r8_0 = 0.0;
5119 MonoMethodSignature *sig;
5120 MonoGenericContext *generic_context = NULL;
5121 MonoGenericContainer *generic_container = NULL;
5122 MonoType **param_types;
5123 int i, n, start_new_bblock, dreg;
5124 int num_calls = 0, inline_costs = 0;
5125 int breakpoint_id = 0;
5127 MonoBoolean security, pinvoke;
5128 MonoSecurityManager* secman = NULL;
5129 MonoDeclSecurityActions actions;
5130 GSList *class_inits = NULL;
5131 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5133 gboolean init_locals, seq_points, skip_dead_blocks;
5135 /* serialization and xdomain stuff may need access to private fields and methods */
5136 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5137 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5138 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5139 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5140 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5141 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5143 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5145 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5146 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5147 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5148 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5150 image = method->klass->image;
5151 header = mono_method_get_header (method);
5152 generic_container = mono_method_get_generic_container (method);
5153 sig = mono_method_signature (method);
5154 num_args = sig->hasthis + sig->param_count;
5155 ip = (unsigned char*)header->code;
5156 cfg->cil_start = ip;
5157 end = ip + header->code_size;
5158 mono_jit_stats.cil_code_size += header->code_size;
5159 init_locals = header->init_locals;
5161 seq_points = cfg->gen_seq_points && cfg->method == method;
5164 * Methods without init_locals set could cause asserts in various passes
5169 method_definition = method;
5170 while (method_definition->is_inflated) {
5171 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5172 method_definition = imethod->declaring;
5175 /* SkipVerification is not allowed if core-clr is enabled */
5176 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5178 dont_verify_stloc = TRUE;
5181 if (!dont_verify && mini_method_verify (cfg, method_definition))
5182 goto exception_exit;
5184 if (mono_debug_using_mono_debugger ())
5185 cfg->keep_cil_nops = TRUE;
5187 if (sig->is_inflated)
5188 generic_context = mono_method_get_context (method);
5189 else if (generic_container)
5190 generic_context = &generic_container->context;
5191 cfg->generic_context = generic_context;
5193 if (!cfg->generic_sharing_context)
5194 g_assert (!sig->has_type_parameters);
5196 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5197 g_assert (method->is_inflated);
5198 g_assert (mono_method_get_context (method)->method_inst);
5200 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5201 g_assert (sig->generic_param_count);
5203 if (cfg->method == method) {
5204 cfg->real_offset = 0;
5206 cfg->real_offset = inline_offset;
5209 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5210 cfg->cil_offset_to_bb_len = header->code_size;
5212 cfg->current_method = method;
5214 if (cfg->verbose_level > 2)
5215 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5217 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5219 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5220 for (n = 0; n < sig->param_count; ++n)
5221 param_types [n + sig->hasthis] = sig->params [n];
5222 cfg->arg_types = param_types;
5224 dont_inline = g_list_prepend (dont_inline, method);
5225 if (cfg->method == method) {
5227 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5228 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5231 NEW_BBLOCK (cfg, start_bblock);
5232 cfg->bb_entry = start_bblock;
5233 start_bblock->cil_code = NULL;
5234 start_bblock->cil_length = 0;
5237 NEW_BBLOCK (cfg, end_bblock);
5238 cfg->bb_exit = end_bblock;
5239 end_bblock->cil_code = NULL;
5240 end_bblock->cil_length = 0;
5241 g_assert (cfg->num_bblocks == 2);
5243 arg_array = cfg->args;
5245 if (header->num_clauses) {
5246 cfg->spvars = g_hash_table_new (NULL, NULL);
5247 cfg->exvars = g_hash_table_new (NULL, NULL);
5249 /* handle exception clauses */
5250 for (i = 0; i < header->num_clauses; ++i) {
5251 MonoBasicBlock *try_bb;
5252 MonoExceptionClause *clause = &header->clauses [i];
5253 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5254 try_bb->real_offset = clause->try_offset;
5255 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5256 tblock->real_offset = clause->handler_offset;
5257 tblock->flags |= BB_EXCEPTION_HANDLER;
5259 link_bblock (cfg, try_bb, tblock);
5261 if (*(ip + clause->handler_offset) == CEE_POP)
5262 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5264 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5265 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5266 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5267 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5268 MONO_ADD_INS (tblock, ins);
5270 /* todo: is a fault block unsafe to optimize? */
5271 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5272 tblock->flags |= BB_EXCEPTION_UNSAFE;
5276 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5278 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5280 /* catch and filter blocks get the exception object on the stack */
5281 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5282 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5283 MonoInst *dummy_use;
5285 /* mostly like handle_stack_args (), but just sets the input args */
5286 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5287 tblock->in_scount = 1;
5288 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5289 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5292 * Add a dummy use for the exvar so its liveness info will be
5296 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5298 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5299 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5300 tblock->flags |= BB_EXCEPTION_HANDLER;
5301 tblock->real_offset = clause->data.filter_offset;
5302 tblock->in_scount = 1;
5303 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5304 /* The filter block shares the exvar with the handler block */
5305 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5306 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5307 MONO_ADD_INS (tblock, ins);
5311 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5312 clause->data.catch_class &&
5313 cfg->generic_sharing_context &&
5314 mono_class_check_context_used (clause->data.catch_class)) {
5316 * In shared generic code with catch
5317 * clauses containing type variables
5318 * the exception handling code has to
5319 * be able to get to the rgctx.
5320 * Therefore we have to make sure that
5321 * the vtable/mrgctx argument (for
5322 * static or generic methods) or the
5323 * "this" argument (for non-static
5324 * methods) are live.
5326 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5327 mini_method_get_context (method)->method_inst ||
5328 method->klass->valuetype) {
5329 mono_get_vtable_var (cfg);
5331 MonoInst *dummy_use;
5333 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5338 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5339 cfg->cbb = start_bblock;
5340 cfg->args = arg_array;
5341 mono_save_args (cfg, sig, inline_args);
5344 /* FIRST CODE BLOCK */
5345 NEW_BBLOCK (cfg, bblock);
5346 bblock->cil_code = ip;
5350 ADD_BBLOCK (cfg, bblock);
5352 if (cfg->method == method) {
5353 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5354 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5355 MONO_INST_NEW (cfg, ins, OP_BREAK);
5356 MONO_ADD_INS (bblock, ins);
5360 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5361 secman = mono_security_manager_get_methods ();
5363 security = (secman && mono_method_has_declsec (method));
5364 /* at this point having security doesn't mean we have any code to generate */
5365 if (security && (cfg->method == method)) {
5366 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5367 * And we do not want to enter the next section (with allocation) if we
5368 * have nothing to generate */
5369 security = mono_declsec_get_demands (method, &actions);
5372 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5373 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5375 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5376 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5377 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5379 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5380 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5384 mono_custom_attrs_free (custom);
5387 custom = mono_custom_attrs_from_class (wrapped->klass);
5388 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5392 mono_custom_attrs_free (custom);
5395 /* not a P/Invoke after all */
5400 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5401 /* we use a separate basic block for the initialization code */
5402 NEW_BBLOCK (cfg, init_localsbb);
5403 cfg->bb_init = init_localsbb;
5404 init_localsbb->real_offset = cfg->real_offset;
5405 start_bblock->next_bb = init_localsbb;
5406 init_localsbb->next_bb = bblock;
5407 link_bblock (cfg, start_bblock, init_localsbb);
5408 link_bblock (cfg, init_localsbb, bblock);
5410 cfg->cbb = init_localsbb;
5412 start_bblock->next_bb = bblock;
5413 link_bblock (cfg, start_bblock, bblock);
5416 /* at this point we know, if security is TRUE, that some code needs to be generated */
5417 if (security && (cfg->method == method)) {
5420 mono_jit_stats.cas_demand_generation++;
5422 if (actions.demand.blob) {
5423 /* Add code for SecurityAction.Demand */
5424 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5425 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5426 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5427 mono_emit_method_call (cfg, secman->demand, args, NULL);
5429 if (actions.noncasdemand.blob) {
5430 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5431 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5432 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5433 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5434 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5435 mono_emit_method_call (cfg, secman->demand, args, NULL);
5437 if (actions.demandchoice.blob) {
5438 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5439 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5440 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5441 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5442 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5446 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5448 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5451 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5452 /* check if this is native code, e.g. an icall or a p/invoke */
5453 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5454 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5456 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
5457 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
5459 /* if this ia a native call then it can only be JITted from platform code */
5460 if ((icall || pinvk) && method->klass && method->klass->image) {
5461 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
5462 MonoException *ex = icall ? mono_get_exception_security () :
5463 mono_get_exception_method_access ();
5464 emit_throw_exception (cfg, ex);
5471 if (header->code_size == 0)
5474 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5479 if (cfg->method == method)
5480 mono_debug_init_method (cfg, bblock, breakpoint_id);
5482 for (n = 0; n < header->num_locals; ++n) {
5483 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5488 /* We force the vtable variable here for all shared methods
5489 for the possibility that they might show up in a stack
5490 trace where their exact instantiation is needed. */
5491 if (cfg->generic_sharing_context && method == cfg->method) {
5492 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5493 mini_method_get_context (method)->method_inst ||
5494 method->klass->valuetype) {
5495 mono_get_vtable_var (cfg);
5497 /* FIXME: Is there a better way to do this?
5498 We need the variable live for the duration
5499 of the whole method. */
5500 cfg->args [0]->flags |= MONO_INST_INDIRECT;
5504 /* add a check for this != NULL to inlined methods */
5505 if (is_virtual_call) {
5508 NEW_ARGLOAD (cfg, arg_ins, 0);
5509 MONO_ADD_INS (cfg->cbb, arg_ins);
5510 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
5513 skip_dead_blocks = !dont_verify;
5514 if (skip_dead_blocks) {
5515 original_bb = bb = mono_basic_block_split (method, &error);
5516 if (!mono_error_ok (&error)) {
5517 mono_error_cleanup (&error);
5523 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5524 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5527 start_new_bblock = 0;
5530 if (cfg->method == method)
5531 cfg->real_offset = ip - header->code;
5533 cfg->real_offset = inline_offset;
5538 if (start_new_bblock) {
5539 bblock->cil_length = ip - bblock->cil_code;
5540 if (start_new_bblock == 2) {
5541 g_assert (ip == tblock->cil_code);
5543 GET_BBLOCK (cfg, tblock, ip);
5545 bblock->next_bb = tblock;
5548 start_new_bblock = 0;
5549 for (i = 0; i < bblock->in_scount; ++i) {
5550 if (cfg->verbose_level > 3)
5551 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5552 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5556 g_slist_free (class_inits);
5559 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5560 link_bblock (cfg, bblock, tblock);
5561 if (sp != stack_start) {
5562 handle_stack_args (cfg, stack_start, sp - stack_start);
5564 CHECK_UNVERIFIABLE (cfg);
5566 bblock->next_bb = tblock;
5569 for (i = 0; i < bblock->in_scount; ++i) {
5570 if (cfg->verbose_level > 3)
5571 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5572 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5575 g_slist_free (class_inits);
5580 if (skip_dead_blocks) {
5581 int ip_offset = ip - header->code;
5583 if (ip_offset == bb->end)
5587 int op_size = mono_opcode_size (ip, end);
5588 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
5590 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
5592 if (ip_offset + op_size == bb->end) {
5593 MONO_INST_NEW (cfg, ins, OP_NOP);
5594 MONO_ADD_INS (bblock, ins);
5595 start_new_bblock = 1;
5603 * Sequence points are points where the debugger can place a breakpoint.
5604 * Currently, we generate these automatically at points where the IL
5607 if (seq_points && sp == stack_start) {
5608 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
5609 MONO_ADD_INS (cfg->cbb, ins);
5612 bblock->real_offset = cfg->real_offset;
5614 if ((cfg->method == method) && cfg->coverage_info) {
5615 guint32 cil_offset = ip - header->code;
5616 cfg->coverage_info->data [cil_offset].cil_code = ip;
5618 /* TODO: Use an increment here */
5619 #if defined(TARGET_X86)
5620 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5621 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5623 MONO_ADD_INS (cfg->cbb, ins);
5625 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5626 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5630 if (cfg->verbose_level > 3)
5631 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5635 if (cfg->keep_cil_nops)
5636 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
5638 MONO_INST_NEW (cfg, ins, OP_NOP);
5640 MONO_ADD_INS (bblock, ins);
5643 if (should_insert_brekpoint (cfg->method))
5644 MONO_INST_NEW (cfg, ins, OP_BREAK);
5646 MONO_INST_NEW (cfg, ins, OP_NOP);
5648 MONO_ADD_INS (bblock, ins);
5654 CHECK_STACK_OVF (1);
5655 n = (*ip)-CEE_LDARG_0;
5657 EMIT_NEW_ARGLOAD (cfg, ins, n);
5665 CHECK_STACK_OVF (1);
5666 n = (*ip)-CEE_LDLOC_0;
5668 EMIT_NEW_LOCLOAD (cfg, ins, n);
5677 n = (*ip)-CEE_STLOC_0;
5680 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5682 emit_stloc_ir (cfg, sp, header, n);
5689 CHECK_STACK_OVF (1);
5692 EMIT_NEW_ARGLOAD (cfg, ins, n);
5698 CHECK_STACK_OVF (1);
5701 NEW_ARGLOADA (cfg, ins, n);
5702 MONO_ADD_INS (cfg->cbb, ins);
5712 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5714 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5719 CHECK_STACK_OVF (1);
5722 EMIT_NEW_LOCLOAD (cfg, ins, n);
5726 case CEE_LDLOCA_S: {
5727 unsigned char *tmp_ip;
5729 CHECK_STACK_OVF (1);
5730 CHECK_LOCAL (ip [1]);
5732 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
5738 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5747 CHECK_LOCAL (ip [1]);
5748 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5750 emit_stloc_ir (cfg, sp, header, ip [1]);
5755 CHECK_STACK_OVF (1);
5756 EMIT_NEW_PCONST (cfg, ins, NULL);
5757 ins->type = STACK_OBJ;
5762 CHECK_STACK_OVF (1);
5763 EMIT_NEW_ICONST (cfg, ins, -1);
5776 CHECK_STACK_OVF (1);
5777 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
5783 CHECK_STACK_OVF (1);
5785 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
5791 CHECK_STACK_OVF (1);
5792 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
5798 CHECK_STACK_OVF (1);
5799 MONO_INST_NEW (cfg, ins, OP_I8CONST);
5800 ins->type = STACK_I8;
5801 ins->dreg = alloc_dreg (cfg, STACK_I8);
5803 ins->inst_l = (gint64)read64 (ip);
5804 MONO_ADD_INS (bblock, ins);
5810 gboolean use_aotconst = FALSE;
5812 #ifdef TARGET_POWERPC
5813 /* FIXME: Clean this up */
5814 if (cfg->compile_aot)
5815 use_aotconst = TRUE;
5818 /* FIXME: we should really allocate this only late in the compilation process */
5819 f = mono_domain_alloc (cfg->domain, sizeof (float));
5821 CHECK_STACK_OVF (1);
5827 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
5829 dreg = alloc_freg (cfg);
5830 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
5831 ins->type = STACK_R8;
5833 MONO_INST_NEW (cfg, ins, OP_R4CONST);
5834 ins->type = STACK_R8;
5835 ins->dreg = alloc_dreg (cfg, STACK_R8);
5837 MONO_ADD_INS (bblock, ins);
5847 gboolean use_aotconst = FALSE;
5849 #ifdef TARGET_POWERPC
5850 /* FIXME: Clean this up */
5851 if (cfg->compile_aot)
5852 use_aotconst = TRUE;
5855 /* FIXME: we should really allocate this only late in the compilation process */
5856 d = mono_domain_alloc (cfg->domain, sizeof (double));
5858 CHECK_STACK_OVF (1);
5864 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
5866 dreg = alloc_freg (cfg);
5867 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
5868 ins->type = STACK_R8;
5870 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5871 ins->type = STACK_R8;
5872 ins->dreg = alloc_dreg (cfg, STACK_R8);
5874 MONO_ADD_INS (bblock, ins);
5883 MonoInst *temp, *store;
5885 CHECK_STACK_OVF (1);
5889 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
5890 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
5892 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5895 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5908 if (sp [0]->type == STACK_R8)
5909 /* we need to pop the value from the x86 FP stack */
5910 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
5919 if (stack_start != sp)
5921 token = read32 (ip + 1);
5922 /* FIXME: check the signature matches */
5923 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5928 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
5929 GENERIC_SHARING_FAILURE (CEE_JMP);
5931 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5932 CHECK_CFG_EXCEPTION;
5934 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
5936 MonoMethodSignature *fsig = mono_method_signature (cmethod);
5939 /* Handle tail calls similarly to calls */
5940 n = fsig->param_count + fsig->hasthis;
5942 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
5943 call->method = cmethod;
5944 call->tail_call = TRUE;
5945 call->signature = mono_method_signature (cmethod);
5946 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
5947 call->inst.inst_p0 = cmethod;
5948 for (i = 0; i < n; ++i)
5949 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
5951 mono_arch_emit_call (cfg, call);
5952 MONO_ADD_INS (bblock, (MonoInst*)call);
5955 for (i = 0; i < num_args; ++i)
5956 /* Prevent arguments from being optimized away */
5957 arg_array [i]->flags |= MONO_INST_VOLATILE;
5959 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
5960 ins = (MonoInst*)call;
5961 ins->inst_p0 = cmethod;
5962 MONO_ADD_INS (bblock, ins);
5966 start_new_bblock = 1;
5971 case CEE_CALLVIRT: {
5972 MonoInst *addr = NULL;
5973 MonoMethodSignature *fsig = NULL;
5975 int virtual = *ip == CEE_CALLVIRT;
5976 int calli = *ip == CEE_CALLI;
5977 gboolean pass_imt_from_rgctx = FALSE;
5978 MonoInst *imt_arg = NULL;
5979 gboolean pass_vtable = FALSE;
5980 gboolean pass_mrgctx = FALSE;
5981 MonoInst *vtable_arg = NULL;
5982 gboolean check_this = FALSE;
5983 gboolean supported_tail_call = FALSE;
5986 token = read32 (ip + 1);
5993 if (method->wrapper_type != MONO_WRAPPER_NONE)
5994 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
5996 fsig = mono_metadata_parse_signature (image, token);
5998 n = fsig->param_count + fsig->hasthis;
6000 if (method->dynamic && fsig->pinvoke) {
6004 * This is a call through a function pointer using a pinvoke
6005 * signature. Have to create a wrapper and call that instead.
6006 * FIXME: This is very slow, need to create a wrapper at JIT time
6007 * instead based on the signature.
6009 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
6010 EMIT_NEW_PCONST (cfg, args [1], fsig);
6012 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
6015 MonoMethod *cil_method;
6017 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6018 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6019 cil_method = cmethod;
6020 } else if (constrained_call) {
6021 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6023 * This is needed since get_method_constrained can't find
6024 * the method in klass representing a type var.
6025 * The type var is guaranteed to be a reference type in this
6028 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6029 cil_method = cmethod;
6030 g_assert (!cmethod->klass->valuetype);
6032 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6035 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6036 cil_method = cmethod;
6041 if (!dont_verify && !cfg->skip_visibility) {
6042 MonoMethod *target_method = cil_method;
6043 if (method->is_inflated) {
6044 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6046 if (!mono_method_can_access_method (method_definition, target_method) &&
6047 !mono_method_can_access_method (method, cil_method))
6048 METHOD_ACCESS_FAILURE;
6051 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6052 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6054 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6055 /* MS.NET seems to silently convert this to a callvirt */
6060 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
6061 * converts to a callvirt.
6063 * tests/bug-515884.il is an example of this behavior
6065 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
6066 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
6067 if (!virtual && cmethod->klass->marshalbyref && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
6071 if (!cmethod->klass->inited)
6072 if (!mono_class_init (cmethod->klass))
6075 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6076 mini_class_is_system_array (cmethod->klass)) {
6077 array_rank = cmethod->klass->rank;
6078 fsig = mono_method_signature (cmethod);
6080 fsig = mono_method_signature (cmethod);
6085 if (fsig->pinvoke) {
6086 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6087 check_for_pending_exc, FALSE);
6088 fsig = mono_method_signature (wrapper);
6089 } else if (constrained_call) {
6090 fsig = mono_method_signature (cmethod);
6092 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6096 mono_save_token_info (cfg, image, token, cil_method);
6098 n = fsig->param_count + fsig->hasthis;
6100 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6101 if (check_linkdemand (cfg, method, cmethod))
6103 CHECK_CFG_EXCEPTION;
6106 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6107 g_assert_not_reached ();
6110 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6113 if (!cfg->generic_sharing_context && cmethod)
6114 g_assert (!mono_method_check_context_used (cmethod));
6118 //g_assert (!virtual || fsig->hasthis);
6122 if (constrained_call) {
6124 * We have the `constrained.' prefix opcode.
6126 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6128 * The type parameter is instantiated as a valuetype,
6129 * but that type doesn't override the method we're
6130 * calling, so we need to box `this'.
6132 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
6133 ins->klass = constrained_call;
6134 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call));
6135 CHECK_CFG_EXCEPTION;
6136 } else if (!constrained_call->valuetype) {
6137 int dreg = alloc_preg (cfg);
6140 * The type parameter is instantiated as a reference
6141 * type. We have a managed pointer on the stack, so
6142 * we need to dereference it here.
6144 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6145 ins->type = STACK_OBJ;
6147 } else if (cmethod->klass->valuetype)
6149 constrained_call = NULL;
6152 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6156 * If the callee is a shared method, then its static cctor
6157 * might not get called after the call was patched.
6159 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6160 emit_generic_class_init (cfg, cmethod->klass);
6161 CHECK_TYPELOAD (cmethod->klass);
6164 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6165 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6166 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6167 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6168 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6171 * Pass vtable iff target method might
6172 * be shared, which means that sharing
6173 * is enabled for its class and its
6174 * context is sharable (and it's not a
6177 if (sharing_enabled && context_sharable &&
6178 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6182 if (cmethod && mini_method_get_context (cmethod) &&
6183 mini_method_get_context (cmethod)->method_inst) {
6184 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6185 MonoGenericContext *context = mini_method_get_context (cmethod);
6186 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6188 g_assert (!pass_vtable);
6190 if (sharing_enabled && context_sharable)
6194 if (cfg->generic_sharing_context && cmethod) {
6195 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6197 context_used = mono_method_check_context_used (cmethod);
6199 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6200 /* Generic method interface
6201 calls are resolved via a
6202 helper function and don't
6204 if (!cmethod_context || !cmethod_context->method_inst)
6205 pass_imt_from_rgctx = TRUE;
6209 * If a shared method calls another
6210 * shared method then the caller must
6211 * have a generic sharing context
6212 * because the magic trampoline
6213 * requires it. FIXME: We shouldn't
6214 * have to force the vtable/mrgctx
6215 * variable here. Instead there
6216 * should be a flag in the cfg to
6217 * request a generic sharing context.
6220 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6221 mono_get_vtable_var (cfg);
6226 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6228 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6230 CHECK_TYPELOAD (cmethod->klass);
6231 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6236 g_assert (!vtable_arg);
6238 if (!cfg->compile_aot) {
6240 * emit_get_rgctx_method () calls mono_class_vtable () so check
6241 * for type load errors before.
6243 mono_class_setup_vtable (cmethod->klass);
6244 CHECK_TYPELOAD (cmethod->klass);
6247 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6249 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6250 MONO_METHOD_IS_FINAL (cmethod)) {
6257 if (pass_imt_from_rgctx) {
6258 g_assert (!pass_vtable);
6261 imt_arg = emit_get_rgctx_method (cfg, context_used,
6262 cmethod, MONO_RGCTX_INFO_METHOD);
6266 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
6268 /* Calling virtual generic methods */
6269 if (cmethod && virtual &&
6270 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6271 !(MONO_METHOD_IS_FINAL (cmethod) &&
6272 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6273 mono_method_signature (cmethod)->generic_param_count) {
6274 MonoInst *this_temp, *this_arg_temp, *store;
6275 MonoInst *iargs [4];
6277 g_assert (mono_method_signature (cmethod)->is_inflated);
6279 /* Prevent inlining of methods that contain indirect calls */
6282 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
6283 /* The llvm vcall trampolines doesn't support generic virtual calls yet */
6284 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt && !mono_use_llvm) {
6285 g_assert (!imt_arg);
6287 g_assert (cmethod->is_inflated);
6288 imt_arg = emit_get_rgctx_method (cfg, context_used,
6289 cmethod, MONO_RGCTX_INFO_METHOD);
6290 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6294 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6295 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6296 MONO_ADD_INS (bblock, store);
6298 /* FIXME: This should be a managed pointer */
6299 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6301 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6302 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6303 cmethod, MONO_RGCTX_INFO_METHOD);
6304 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6305 addr = mono_emit_jit_icall (cfg,
6306 mono_helper_compile_generic_method, iargs);
6308 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6310 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6313 if (!MONO_TYPE_IS_VOID (fsig->ret))
6314 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6316 CHECK_CFG_EXCEPTION;
6323 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6324 supported_tail_call = cmethod && MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
6326 supported_tail_call = cmethod && mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6330 /* FIXME: runtime generic context pointer for jumps? */
6331 /* FIXME: handle this for generic sharing eventually */
6332 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) && supported_tail_call) {
6335 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6338 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6339 /* Handle tail calls similarly to calls */
6340 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE);
6342 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6343 call->tail_call = TRUE;
6344 call->method = cmethod;
6345 call->signature = mono_method_signature (cmethod);
6348 * We implement tail calls by storing the actual arguments into the
6349 * argument variables, then emitting a CEE_JMP.
6351 for (i = 0; i < n; ++i) {
6352 /* Prevent argument from being register allocated */
6353 arg_array [i]->flags |= MONO_INST_VOLATILE;
6354 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6358 ins = (MonoInst*)call;
6359 ins->inst_p0 = cmethod;
6360 ins->inst_p1 = arg_array [0];
6361 MONO_ADD_INS (bblock, ins);
6362 link_bblock (cfg, bblock, end_bblock);
6363 start_new_bblock = 1;
6365 CHECK_CFG_EXCEPTION;
6367 /* skip CEE_RET as well */
6373 /* Conversion to a JIT intrinsic */
6374 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6375 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6376 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6381 CHECK_CFG_EXCEPTION;
6389 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6390 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6391 mono_method_check_inlining (cfg, cmethod) &&
6392 !g_list_find (dont_inline, cmethod)) {
6394 gboolean allways = FALSE;
6396 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6397 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6398 /* Prevent inlining of methods that call wrappers */
6400 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6404 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6406 cfg->real_offset += 5;
6409 if (!MONO_TYPE_IS_VOID (fsig->ret))
6410 /* *sp is already set by inline_method */
6413 inline_costs += costs;
6419 inline_costs += 10 * num_calls++;
6421 /* Tail recursion elimination */
6422 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6423 gboolean has_vtargs = FALSE;
6426 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6429 /* keep it simple */
6430 for (i = fsig->param_count - 1; i >= 0; i--) {
6431 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6436 for (i = 0; i < n; ++i)
6437 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6438 MONO_INST_NEW (cfg, ins, OP_BR);
6439 MONO_ADD_INS (bblock, ins);
6440 tblock = start_bblock->out_bb [0];
6441 link_bblock (cfg, bblock, tblock);
6442 ins->inst_target_bb = tblock;
6443 start_new_bblock = 1;
6445 /* skip the CEE_RET, too */
6446 if (ip_in_bb (cfg, bblock, ip + 5))
6456 /* Generic sharing */
6457 /* FIXME: only do this for generic methods if
6458 they are not shared! */
6459 if (context_used && !imt_arg && !array_rank &&
6460 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6461 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6462 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6463 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6466 g_assert (cfg->generic_sharing_context && cmethod);
6470 * We are compiling a call to a
6471 * generic method from shared code,
6472 * which means that we have to look up
6473 * the method in the rgctx and do an
6476 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6479 /* Indirect calls */
6481 g_assert (!imt_arg);
6483 if (*ip == CEE_CALL)
6484 g_assert (context_used);
6485 else if (*ip == CEE_CALLI)
6486 g_assert (!vtable_arg);
6488 /* FIXME: what the hell is this??? */
6489 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6490 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6492 /* Prevent inlining of methods with indirect calls */
6497 int rgctx_reg = mono_alloc_preg (cfg);
6499 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6500 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6501 call = (MonoCallInst*)ins;
6502 set_rgctx_arg (cfg, call, rgctx_reg, vtable_arg);
6504 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6506 * Instead of emitting an indirect call, emit a direct call
6507 * with the contents of the aotconst as the patch info.
6509 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
6511 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6512 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
6515 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6518 if (!MONO_TYPE_IS_VOID (fsig->ret))
6519 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6521 CHECK_CFG_EXCEPTION;
6532 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6533 if (sp [fsig->param_count]->type == STACK_OBJ) {
6534 MonoInst *iargs [2];
6537 iargs [1] = sp [fsig->param_count];
6539 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6542 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6543 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6544 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6545 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6547 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6550 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6551 if (!cmethod->klass->element_class->valuetype && !readonly)
6552 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6553 CHECK_TYPELOAD (cmethod->klass);
6556 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6559 g_assert_not_reached ();
6562 CHECK_CFG_EXCEPTION;
6569 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6571 if (!MONO_TYPE_IS_VOID (fsig->ret))
6572 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6574 CHECK_CFG_EXCEPTION;
6584 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6586 } else if (imt_arg) {
6587 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6589 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6592 if (!MONO_TYPE_IS_VOID (fsig->ret))
6593 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6595 CHECK_CFG_EXCEPTION;
6602 if (cfg->method != method) {
6603 /* return from inlined method */
6605 * If in_count == 0, that means the ret is unreachable due to
6606 * being preceeded by a throw. In that case, inline_method () will
6607 * handle setting the return value
6608 * (test case: test_0_inline_throw ()).
6610 if (return_var && cfg->cbb->in_count) {
6614 //g_assert (returnvar != -1);
6615 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6616 cfg->ret_var_set = TRUE;
6620 MonoType *ret_type = mono_method_signature (method)->ret;
6624 * Place a seq point here too even through the IL stack is not
6625 * empty, so a step over on
6628 * will work correctly.
6630 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
6631 MONO_ADD_INS (cfg->cbb, ins);
6634 g_assert (!return_var);
6637 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6640 if (!cfg->vret_addr) {
6643 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6645 EMIT_NEW_RETLOADA (cfg, ret_addr);
6647 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6648 ins->klass = mono_class_from_mono_type (ret_type);
6651 #ifdef MONO_ARCH_SOFT_FLOAT
6652 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6653 MonoInst *iargs [1];
6657 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6658 mono_arch_emit_setret (cfg, method, conv);
6660 mono_arch_emit_setret (cfg, method, *sp);
6663 mono_arch_emit_setret (cfg, method, *sp);
6668 if (sp != stack_start)
6670 MONO_INST_NEW (cfg, ins, OP_BR);
6672 ins->inst_target_bb = end_bblock;
6673 MONO_ADD_INS (bblock, ins);
6674 link_bblock (cfg, bblock, end_bblock);
6675 start_new_bblock = 1;
6679 MONO_INST_NEW (cfg, ins, OP_BR);
6681 target = ip + 1 + (signed char)(*ip);
6683 GET_BBLOCK (cfg, tblock, target);
6684 link_bblock (cfg, bblock, tblock);
6685 ins->inst_target_bb = tblock;
6686 if (sp != stack_start) {
6687 handle_stack_args (cfg, stack_start, sp - stack_start);
6689 CHECK_UNVERIFIABLE (cfg);
6691 MONO_ADD_INS (bblock, ins);
6692 start_new_bblock = 1;
6693 inline_costs += BRANCH_COST;
6707 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6709 target = ip + 1 + *(signed char*)ip;
6715 inline_costs += BRANCH_COST;
6719 MONO_INST_NEW (cfg, ins, OP_BR);
6722 target = ip + 4 + (gint32)read32(ip);
6724 GET_BBLOCK (cfg, tblock, target);
6725 link_bblock (cfg, bblock, tblock);
6726 ins->inst_target_bb = tblock;
6727 if (sp != stack_start) {
6728 handle_stack_args (cfg, stack_start, sp - stack_start);
6730 CHECK_UNVERIFIABLE (cfg);
6733 MONO_ADD_INS (bblock, ins);
6735 start_new_bblock = 1;
6736 inline_costs += BRANCH_COST;
6743 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6744 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6745 guint32 opsize = is_short ? 1 : 4;
6747 CHECK_OPSIZE (opsize);
6749 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6752 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6757 GET_BBLOCK (cfg, tblock, target);
6758 link_bblock (cfg, bblock, tblock);
6759 GET_BBLOCK (cfg, tblock, ip);
6760 link_bblock (cfg, bblock, tblock);
6762 if (sp != stack_start) {
6763 handle_stack_args (cfg, stack_start, sp - stack_start);
6764 CHECK_UNVERIFIABLE (cfg);
6767 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6768 cmp->sreg1 = sp [0]->dreg;
6769 type_from_op (cmp, sp [0], NULL);
6772 #if SIZEOF_REGISTER == 4
6773 if (cmp->opcode == OP_LCOMPARE_IMM) {
6774 /* Convert it to OP_LCOMPARE */
6775 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6776 ins->type = STACK_I8;
6777 ins->dreg = alloc_dreg (cfg, STACK_I8);
6779 MONO_ADD_INS (bblock, ins);
6780 cmp->opcode = OP_LCOMPARE;
6781 cmp->sreg2 = ins->dreg;
6784 MONO_ADD_INS (bblock, cmp);
6786 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
6787 type_from_op (ins, sp [0], NULL);
6788 MONO_ADD_INS (bblock, ins);
6789 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
6790 GET_BBLOCK (cfg, tblock, target);
6791 ins->inst_true_bb = tblock;
6792 GET_BBLOCK (cfg, tblock, ip);
6793 ins->inst_false_bb = tblock;
6794 start_new_bblock = 2;
6797 inline_costs += BRANCH_COST;
6812 MONO_INST_NEW (cfg, ins, *ip);
6814 target = ip + 4 + (gint32)read32(ip);
6820 inline_costs += BRANCH_COST;
6824 MonoBasicBlock **targets;
6825 MonoBasicBlock *default_bblock;
6826 MonoJumpInfoBBTable *table;
6827 int offset_reg = alloc_preg (cfg);
6828 int target_reg = alloc_preg (cfg);
6829 int table_reg = alloc_preg (cfg);
6830 int sum_reg = alloc_preg (cfg);
6831 gboolean use_op_switch;
6835 n = read32 (ip + 1);
6838 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
6842 CHECK_OPSIZE (n * sizeof (guint32));
6843 target = ip + n * sizeof (guint32);
6845 GET_BBLOCK (cfg, default_bblock, target);
6847 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
6848 for (i = 0; i < n; ++i) {
6849 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
6850 targets [i] = tblock;
6854 if (sp != stack_start) {
6856 * Link the current bb with the targets as well, so handle_stack_args
6857 * will set their in_stack correctly.
6859 link_bblock (cfg, bblock, default_bblock);
6860 for (i = 0; i < n; ++i)
6861 link_bblock (cfg, bblock, targets [i]);
6863 handle_stack_args (cfg, stack_start, sp - stack_start);
6865 CHECK_UNVERIFIABLE (cfg);
6868 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
6869 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
6872 for (i = 0; i < n; ++i)
6873 link_bblock (cfg, bblock, targets [i]);
6875 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
6876 table->table = targets;
6877 table->table_size = n;
6879 use_op_switch = FALSE;
6881 /* ARM implements SWITCH statements differently */
6882 /* FIXME: Make it use the generic implementation */
6883 if (!cfg->compile_aot)
6884 use_op_switch = TRUE;
6887 if (COMPILE_LLVM (cfg))
6888 use_op_switch = TRUE;
6890 cfg->cbb->has_jump_table = 1;
6892 if (use_op_switch) {
6893 MONO_INST_NEW (cfg, ins, OP_SWITCH);
6894 ins->sreg1 = src1->dreg;
6895 ins->inst_p0 = table;
6896 ins->inst_many_bb = targets;
6897 ins->klass = GUINT_TO_POINTER (n);
6898 MONO_ADD_INS (cfg->cbb, ins);
6900 if (sizeof (gpointer) == 8)
6901 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
6903 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
6905 #if SIZEOF_REGISTER == 8
6906 /* The upper word might not be zero, and we add it to a 64 bit address later */
6907 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
6910 if (cfg->compile_aot) {
6911 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
6913 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
6914 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
6915 ins->inst_p0 = table;
6916 ins->dreg = table_reg;
6917 MONO_ADD_INS (cfg->cbb, ins);
6920 /* FIXME: Use load_memindex */
6921 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
6922 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
6923 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
6925 start_new_bblock = 1;
6926 inline_costs += (BRANCH_COST * 2);
6946 dreg = alloc_freg (cfg);
6949 dreg = alloc_lreg (cfg);
6952 dreg = alloc_preg (cfg);
6955 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
6956 ins->type = ldind_type [*ip - CEE_LDIND_I1];
6957 ins->flags |= ins_flag;
6959 MONO_ADD_INS (bblock, ins);
6974 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
6975 ins->flags |= ins_flag;
6977 MONO_ADD_INS (bblock, ins);
6979 #if HAVE_WRITE_BARRIERS
6980 if (*ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0))) {
6981 MonoInst *dummy_use;
6982 /* insert call to write barrier */
6983 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
6984 mono_emit_method_call (cfg, write_barrier, sp, NULL);
6985 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
6996 MONO_INST_NEW (cfg, ins, (*ip));
6998 ins->sreg1 = sp [0]->dreg;
6999 ins->sreg2 = sp [1]->dreg;
7000 type_from_op (ins, sp [0], sp [1]);
7002 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7004 /* Use the immediate opcodes if possible */
7005 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
7006 int imm_opcode = mono_op_to_op_imm (ins->opcode);
7007 if (imm_opcode != -1) {
7008 ins->opcode = imm_opcode;
7009 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7012 sp [1]->opcode = OP_NOP;
7016 MONO_ADD_INS ((cfg)->cbb, (ins));
7018 *sp++ = mono_decompose_opcode (cfg, ins);
7035 MONO_INST_NEW (cfg, ins, (*ip));
7037 ins->sreg1 = sp [0]->dreg;
7038 ins->sreg2 = sp [1]->dreg;
7039 type_from_op (ins, sp [0], sp [1]);
7041 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7042 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7044 /* FIXME: Pass opcode to is_inst_imm */
7046 /* Use the immediate opcodes if possible */
7047 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7050 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7051 if (imm_opcode != -1) {
7052 ins->opcode = imm_opcode;
7053 if (sp [1]->opcode == OP_I8CONST) {
7054 #if SIZEOF_REGISTER == 8
7055 ins->inst_imm = sp [1]->inst_l;
7057 ins->inst_ls_word = sp [1]->inst_ls_word;
7058 ins->inst_ms_word = sp [1]->inst_ms_word;
7062 ins->inst_imm = (gssize)(sp [1]->inst_c0);
7065 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7066 if (sp [1]->next == NULL)
7067 sp [1]->opcode = OP_NOP;
7070 MONO_ADD_INS ((cfg)->cbb, (ins));
7072 *sp++ = mono_decompose_opcode (cfg, ins);
7085 case CEE_CONV_OVF_I8:
7086 case CEE_CONV_OVF_U8:
7090 /* Special case this earlier so we have long constants in the IR */
7091 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7092 int data = sp [-1]->inst_c0;
7093 sp [-1]->opcode = OP_I8CONST;
7094 sp [-1]->type = STACK_I8;
7095 #if SIZEOF_REGISTER == 8
7096 if ((*ip) == CEE_CONV_U8)
7097 sp [-1]->inst_c0 = (guint32)data;
7099 sp [-1]->inst_c0 = data;
7101 sp [-1]->inst_ls_word = data;
7102 if ((*ip) == CEE_CONV_U8)
7103 sp [-1]->inst_ms_word = 0;
7105 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7107 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7114 case CEE_CONV_OVF_I4:
7115 case CEE_CONV_OVF_I1:
7116 case CEE_CONV_OVF_I2:
7117 case CEE_CONV_OVF_I:
7118 case CEE_CONV_OVF_U:
7121 if (sp [-1]->type == STACK_R8) {
7122 ADD_UNOP (CEE_CONV_OVF_I8);
7129 case CEE_CONV_OVF_U1:
7130 case CEE_CONV_OVF_U2:
7131 case CEE_CONV_OVF_U4:
7134 if (sp [-1]->type == STACK_R8) {
7135 ADD_UNOP (CEE_CONV_OVF_U8);
7142 case CEE_CONV_OVF_I1_UN:
7143 case CEE_CONV_OVF_I2_UN:
7144 case CEE_CONV_OVF_I4_UN:
7145 case CEE_CONV_OVF_I8_UN:
7146 case CEE_CONV_OVF_U1_UN:
7147 case CEE_CONV_OVF_U2_UN:
7148 case CEE_CONV_OVF_U4_UN:
7149 case CEE_CONV_OVF_U8_UN:
7150 case CEE_CONV_OVF_I_UN:
7151 case CEE_CONV_OVF_U_UN:
7158 CHECK_CFG_EXCEPTION;
7162 case CEE_ADD_OVF_UN:
7164 case CEE_MUL_OVF_UN:
7166 case CEE_SUB_OVF_UN:
7174 token = read32 (ip + 1);
7175 klass = mini_get_class (method, token, generic_context);
7176 CHECK_TYPELOAD (klass);
7178 if (generic_class_is_reference_type (cfg, klass)) {
7179 MonoInst *store, *load;
7180 int dreg = alloc_preg (cfg);
7182 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7183 load->flags |= ins_flag;
7184 MONO_ADD_INS (cfg->cbb, load);
7186 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7187 store->flags |= ins_flag;
7188 MONO_ADD_INS (cfg->cbb, store);
7190 #if HAVE_WRITE_BARRIERS
7191 if (cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER) {
7192 MonoInst *dummy_use;
7193 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
7194 mono_emit_method_call (cfg, write_barrier, sp, NULL);
7195 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
7199 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7211 token = read32 (ip + 1);
7212 klass = mini_get_class (method, token, generic_context);
7213 CHECK_TYPELOAD (klass);
7215 /* Optimize the common ldobj+stloc combination */
7225 loc_index = ip [5] - CEE_STLOC_0;
7232 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7233 CHECK_LOCAL (loc_index);
7235 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7236 ins->dreg = cfg->locals [loc_index]->dreg;
7242 /* Optimize the ldobj+stobj combination */
7243 /* The reference case ends up being a load+store anyway */
7244 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7249 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7256 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7265 CHECK_STACK_OVF (1);
7267 n = read32 (ip + 1);
7269 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7270 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7271 ins->type = STACK_OBJ;
7274 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7275 MonoInst *iargs [1];
7277 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7278 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7280 if (cfg->opt & MONO_OPT_SHARED) {
7281 MonoInst *iargs [3];
7283 if (cfg->compile_aot) {
7284 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7286 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7287 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7288 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7289 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7290 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7292 if (bblock->out_of_line) {
7293 MonoInst *iargs [2];
7295 if (image == mono_defaults.corlib) {
7297 * Avoid relocations in AOT and save some space by using a
7298 * version of helper_ldstr specialized to mscorlib.
7300 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7301 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7303 /* Avoid creating the string object */
7304 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7305 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7306 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7310 if (cfg->compile_aot) {
7311 NEW_LDSTRCONST (cfg, ins, image, n);
7313 MONO_ADD_INS (bblock, ins);
7316 NEW_PCONST (cfg, ins, NULL);
7317 ins->type = STACK_OBJ;
7318 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7320 MONO_ADD_INS (bblock, ins);
7329 MonoInst *iargs [2];
7330 MonoMethodSignature *fsig;
7333 MonoInst *vtable_arg = NULL;
7336 token = read32 (ip + 1);
7337 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7340 fsig = mono_method_get_signature (cmethod, image, token);
7344 mono_save_token_info (cfg, image, token, cmethod);
7346 if (!mono_class_init (cmethod->klass))
7349 if (cfg->generic_sharing_context)
7350 context_used = mono_method_check_context_used (cmethod);
7352 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7353 if (check_linkdemand (cfg, method, cmethod))
7355 CHECK_CFG_EXCEPTION;
7356 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7357 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7360 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7361 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7362 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7363 mono_class_vtable (cfg->domain, cmethod->klass);
7364 CHECK_TYPELOAD (cmethod->klass);
7366 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7367 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7370 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7371 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7373 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7375 CHECK_TYPELOAD (cmethod->klass);
7376 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7381 n = fsig->param_count;
7385 * Generate smaller code for the common newobj <exception> instruction in
7386 * argument checking code.
7388 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7389 is_exception_class (cmethod->klass) && n <= 2 &&
7390 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7391 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7392 MonoInst *iargs [3];
7394 g_assert (!vtable_arg);
7398 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7401 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7405 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7410 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7413 g_assert_not_reached ();
7421 /* move the args to allow room for 'this' in the first position */
7427 /* check_call_signature () requires sp[0] to be set */
7428 this_ins.type = STACK_OBJ;
7430 if (check_call_signature (cfg, fsig, sp))
7435 if (mini_class_is_system_array (cmethod->klass)) {
7436 g_assert (!vtable_arg);
7438 *sp = emit_get_rgctx_method (cfg, context_used,
7439 cmethod, MONO_RGCTX_INFO_METHOD);
7441 /* Avoid varargs in the common case */
7442 if (fsig->param_count == 1)
7443 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7444 else if (fsig->param_count == 2)
7445 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7446 else if (fsig->param_count == 3)
7447 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
7449 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7450 } else if (cmethod->string_ctor) {
7451 g_assert (!context_used);
7452 g_assert (!vtable_arg);
7453 /* we simply pass a null pointer */
7454 EMIT_NEW_PCONST (cfg, *sp, NULL);
7455 /* now call the string ctor */
7456 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7458 MonoInst* callvirt_this_arg = NULL;
7460 if (cmethod->klass->valuetype) {
7461 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7462 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7463 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7468 * The code generated by mini_emit_virtual_call () expects
7469 * iargs [0] to be a boxed instance, but luckily the vcall
7470 * will be transformed into a normal call there.
7472 } else if (context_used) {
7473 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
7476 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7478 CHECK_TYPELOAD (cmethod->klass);
7481 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7482 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7483 * As a workaround, we call class cctors before allocating objects.
7485 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7486 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7487 if (cfg->verbose_level > 2)
7488 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7489 class_inits = g_slist_prepend (class_inits, vtable);
7492 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
7495 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
7498 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7500 /* Now call the actual ctor */
7501 /* Avoid virtual calls to ctors if possible */
7502 if (cmethod->klass->marshalbyref)
7503 callvirt_this_arg = sp [0];
7505 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7506 mono_method_check_inlining (cfg, cmethod) &&
7507 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7508 !g_list_find (dont_inline, cmethod)) {
7511 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7512 cfg->real_offset += 5;
7515 inline_costs += costs - 5;
7518 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7520 } else if (context_used &&
7521 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7522 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7523 MonoInst *cmethod_addr;
7525 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7526 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7528 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7531 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7532 callvirt_this_arg, NULL, vtable_arg);
7536 if (alloc == NULL) {
7538 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7539 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7553 token = read32 (ip + 1);
7554 klass = mini_get_class (method, token, generic_context);
7555 CHECK_TYPELOAD (klass);
7556 if (sp [0]->type != STACK_OBJ)
7559 if (cfg->generic_sharing_context)
7560 context_used = mono_class_check_context_used (klass);
7562 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7569 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7571 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7575 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7576 MonoMethod *mono_castclass;
7577 MonoInst *iargs [1];
7580 mono_castclass = mono_marshal_get_castclass (klass);
7583 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7584 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7585 g_assert (costs > 0);
7588 cfg->real_offset += 5;
7593 inline_costs += costs;
7596 ins = handle_castclass (cfg, klass, *sp, context_used);
7597 CHECK_CFG_EXCEPTION;
7607 token = read32 (ip + 1);
7608 klass = mini_get_class (method, token, generic_context);
7609 CHECK_TYPELOAD (klass);
7610 if (sp [0]->type != STACK_OBJ)
7613 if (cfg->generic_sharing_context)
7614 context_used = mono_class_check_context_used (klass);
7616 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7623 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7625 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7629 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7630 MonoMethod *mono_isinst;
7631 MonoInst *iargs [1];
7634 mono_isinst = mono_marshal_get_isinst (klass);
7637 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7638 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7639 g_assert (costs > 0);
7642 cfg->real_offset += 5;
7647 inline_costs += costs;
7650 ins = handle_isinst (cfg, klass, *sp, context_used);
7651 CHECK_CFG_EXCEPTION;
7658 case CEE_UNBOX_ANY: {
7662 token = read32 (ip + 1);
7663 klass = mini_get_class (method, token, generic_context);
7664 CHECK_TYPELOAD (klass);
7666 mono_save_token_info (cfg, image, token, klass);
7668 if (cfg->generic_sharing_context)
7669 context_used = mono_class_check_context_used (klass);
7671 if (generic_class_is_reference_type (cfg, klass)) {
7672 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
7674 MonoInst *iargs [2];
7679 iargs [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7680 ins = mono_emit_jit_icall (cfg, mono_object_castclass, iargs);
7684 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7685 MonoMethod *mono_castclass;
7686 MonoInst *iargs [1];
7689 mono_castclass = mono_marshal_get_castclass (klass);
7692 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7693 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7695 g_assert (costs > 0);
7698 cfg->real_offset += 5;
7702 inline_costs += costs;
7704 ins = handle_castclass (cfg, klass, *sp, 0);
7705 CHECK_CFG_EXCEPTION;
7713 if (mono_class_is_nullable (klass)) {
7714 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
7721 ins = handle_unbox (cfg, klass, sp, context_used);
7727 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7740 token = read32 (ip + 1);
7741 klass = mini_get_class (method, token, generic_context);
7742 CHECK_TYPELOAD (klass);
7744 mono_save_token_info (cfg, image, token, klass);
7746 if (cfg->generic_sharing_context)
7747 context_used = mono_class_check_context_used (klass);
7749 if (generic_class_is_reference_type (cfg, klass)) {
7755 if (klass == mono_defaults.void_class)
7757 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7759 /* frequent check in generic code: box (struct), brtrue */
7760 if (!mono_class_is_nullable (klass) &&
7761 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) && (ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S)) {
7762 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7764 MONO_INST_NEW (cfg, ins, OP_BR);
7765 if (*ip == CEE_BRTRUE_S) {
7768 target = ip + 1 + (signed char)(*ip);
7773 target = ip + 4 + (gint)(read32 (ip));
7776 GET_BBLOCK (cfg, tblock, target);
7777 link_bblock (cfg, bblock, tblock);
7778 ins->inst_target_bb = tblock;
7779 GET_BBLOCK (cfg, tblock, ip);
7781 * This leads to some inconsistency, since the two bblocks are
7782 * not really connected, but it is needed for handling stack
7783 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
7784 * FIXME: This should only be needed if sp != stack_start, but that
7785 * doesn't work for some reason (test failure in mcs/tests on x86).
7787 link_bblock (cfg, bblock, tblock);
7788 if (sp != stack_start) {
7789 handle_stack_args (cfg, stack_start, sp - stack_start);
7791 CHECK_UNVERIFIABLE (cfg);
7793 MONO_ADD_INS (bblock, ins);
7794 start_new_bblock = 1;
7798 *sp++ = handle_box (cfg, val, klass, context_used);
7800 CHECK_CFG_EXCEPTION;
7809 token = read32 (ip + 1);
7810 klass = mini_get_class (method, token, generic_context);
7811 CHECK_TYPELOAD (klass);
7813 mono_save_token_info (cfg, image, token, klass);
7815 if (cfg->generic_sharing_context)
7816 context_used = mono_class_check_context_used (klass);
7818 if (mono_class_is_nullable (klass)) {
7821 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
7822 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
7826 ins = handle_unbox (cfg, klass, sp, context_used);
7836 MonoClassField *field;
7840 if (*ip == CEE_STFLD) {
7847 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
7849 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
7852 token = read32 (ip + 1);
7853 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7854 field = mono_method_get_wrapper_data (method, token);
7855 klass = field->parent;
7858 field = mono_field_from_token (image, token, &klass, generic_context);
7862 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7863 FIELD_ACCESS_FAILURE;
7864 mono_class_init (klass);
7866 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
7867 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
7868 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
7869 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
7872 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
7873 if (*ip == CEE_STFLD) {
7874 if (target_type_is_incompatible (cfg, field->type, sp [1]))
7876 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7877 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
7878 MonoInst *iargs [5];
7881 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7882 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7883 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
7887 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
7888 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
7889 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7890 g_assert (costs > 0);
7892 cfg->real_offset += 5;
7895 inline_costs += costs;
7897 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
7902 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
7904 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
7905 store->flags |= MONO_INST_FAULT;
7907 #if HAVE_WRITE_BARRIERS
7908 if (mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
7909 /* insert call to write barrier */
7910 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
7911 MonoInst *iargs [2], *dummy_use;
7914 dreg = alloc_preg (cfg);
7915 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7917 mono_emit_method_call (cfg, write_barrier, iargs, NULL);
7919 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
7923 store->flags |= ins_flag;
7930 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7931 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
7932 MonoInst *iargs [4];
7935 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7936 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7937 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
7938 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
7939 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
7940 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7942 g_assert (costs > 0);
7944 cfg->real_offset += 5;
7948 inline_costs += costs;
7950 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
7954 if (sp [0]->type == STACK_VTYPE) {
7957 /* Have to compute the address of the variable */
7959 var = get_vreg_to_inst (cfg, sp [0]->dreg);
7961 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
7963 g_assert (var->klass == klass);
7965 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
7969 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
7971 if (*ip == CEE_LDFLDA) {
7972 dreg = alloc_preg (cfg);
7974 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7975 ins->klass = mono_class_from_mono_type (field->type);
7976 ins->type = STACK_MP;
7981 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
7982 load->flags |= ins_flag;
7983 load->flags |= MONO_INST_FAULT;
7994 MonoClassField *field;
7995 gpointer addr = NULL;
7996 gboolean is_special_static;
7999 token = read32 (ip + 1);
8001 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8002 field = mono_method_get_wrapper_data (method, token);
8003 klass = field->parent;
8006 field = mono_field_from_token (image, token, &klass, generic_context);
8009 mono_class_init (klass);
8010 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8011 FIELD_ACCESS_FAILURE;
8013 /* if the class is Critical then transparent code cannot access it's fields */
8014 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8015 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8018 * We can only support shared generic static
8019 * field access on architectures where the
8020 * trampoline code has been extended to handle
8021 * the generic class init.
8023 #ifndef MONO_ARCH_VTABLE_REG
8024 GENERIC_SHARING_FAILURE (*ip);
8027 if (cfg->generic_sharing_context)
8028 context_used = mono_class_check_context_used (klass);
8030 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
8032 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8033 * to be called here.
8035 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8036 mono_class_vtable (cfg->domain, klass);
8037 CHECK_TYPELOAD (klass);
8039 mono_domain_lock (cfg->domain);
8040 if (cfg->domain->special_static_fields)
8041 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8042 mono_domain_unlock (cfg->domain);
8044 is_special_static = mono_class_field_is_special_static (field);
8046 /* Generate IR to compute the field address */
8047 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
8049 * Fast access to TLS data
8050 * Inline version of get_thread_static_data () in
8054 int idx, static_data_reg, array_reg, dreg;
8055 MonoInst *thread_ins;
8057 // offset &= 0x7fffffff;
8058 // idx = (offset >> 24) - 1;
8059 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
8061 thread_ins = mono_get_thread_intrinsic (cfg);
8062 MONO_ADD_INS (cfg->cbb, thread_ins);
8063 static_data_reg = alloc_ireg (cfg);
8064 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
8066 if (cfg->compile_aot) {
8067 int offset_reg, offset2_reg, idx_reg;
8069 /* For TLS variables, this will return the TLS offset */
8070 EMIT_NEW_SFLDACONST (cfg, ins, field);
8071 offset_reg = ins->dreg;
8072 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
8073 idx_reg = alloc_ireg (cfg);
8074 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
8075 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
8076 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
8077 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
8078 array_reg = alloc_ireg (cfg);
8079 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
8080 offset2_reg = alloc_ireg (cfg);
8081 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
8082 dreg = alloc_ireg (cfg);
8083 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
8085 offset = (gsize)addr & 0x7fffffff;
8086 idx = (offset >> 24) - 1;
8088 array_reg = alloc_ireg (cfg);
8089 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
8090 dreg = alloc_ireg (cfg);
8091 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
8093 } else if ((cfg->opt & MONO_OPT_SHARED) ||
8094 (cfg->compile_aot && is_special_static) ||
8095 (context_used && is_special_static)) {
8096 MonoInst *iargs [2];
8098 g_assert (field->parent);
8099 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8101 iargs [1] = emit_get_rgctx_field (cfg, context_used,
8102 field, MONO_RGCTX_INFO_CLASS_FIELD);
8104 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8106 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8107 } else if (context_used) {
8108 MonoInst *static_data;
8111 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8112 method->klass->name_space, method->klass->name, method->name,
8113 depth, field->offset);
8116 if (mono_class_needs_cctor_run (klass, method))
8117 emit_generic_class_init (cfg, klass);
8120 * The pointer we're computing here is
8122 * super_info.static_data + field->offset
8124 static_data = emit_get_rgctx_klass (cfg, context_used,
8125 klass, MONO_RGCTX_INFO_STATIC_DATA);
8127 if (field->offset == 0) {
8130 int addr_reg = mono_alloc_preg (cfg);
8131 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8133 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8134 MonoInst *iargs [2];
8136 g_assert (field->parent);
8137 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8138 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8139 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8141 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8143 CHECK_TYPELOAD (klass);
8145 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8146 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8147 if (cfg->verbose_level > 2)
8148 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8149 class_inits = g_slist_prepend (class_inits, vtable);
8151 if (cfg->run_cctors) {
8153 /* This makes so that inline cannot trigger */
8154 /* .cctors: too many apps depend on them */
8155 /* running with a specific order... */
8156 if (! vtable->initialized)
8158 ex = mono_runtime_class_init_full (vtable, FALSE);
8160 set_exception_object (cfg, ex);
8161 goto exception_exit;
8165 addr = (char*)vtable->data + field->offset;
8167 if (cfg->compile_aot)
8168 EMIT_NEW_SFLDACONST (cfg, ins, field);
8170 EMIT_NEW_PCONST (cfg, ins, addr);
8172 MonoInst *iargs [1];
8173 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8174 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8178 /* Generate IR to do the actual load/store operation */
8180 if (*ip == CEE_LDSFLDA) {
8181 ins->klass = mono_class_from_mono_type (field->type);
8182 ins->type = STACK_PTR;
8184 } else if (*ip == CEE_STSFLD) {
8189 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
8190 store->flags |= ins_flag;
8192 gboolean is_const = FALSE;
8193 MonoVTable *vtable = NULL;
8195 if (!context_used) {
8196 vtable = mono_class_vtable (cfg->domain, klass);
8197 CHECK_TYPELOAD (klass);
8199 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8200 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8201 gpointer addr = (char*)vtable->data + field->offset;
8202 int ro_type = field->type->type;
8203 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
8204 ro_type = mono_class_enum_basetype (field->type->data.klass)->type;
8206 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8209 case MONO_TYPE_BOOLEAN:
8211 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8215 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8218 case MONO_TYPE_CHAR:
8220 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8224 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8229 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8233 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8239 case MONO_TYPE_FNPTR:
8240 #ifndef HAVE_MOVING_COLLECTOR
8241 case MONO_TYPE_STRING:
8242 case MONO_TYPE_OBJECT:
8243 case MONO_TYPE_CLASS:
8244 case MONO_TYPE_SZARRAY:
8245 case MONO_TYPE_ARRAY:
8247 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8248 type_to_eval_stack_type ((cfg), field->type, *sp);
8253 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8258 case MONO_TYPE_VALUETYPE:
8268 CHECK_STACK_OVF (1);
8270 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8271 load->flags |= ins_flag;
8284 token = read32 (ip + 1);
8285 klass = mini_get_class (method, token, generic_context);
8286 CHECK_TYPELOAD (klass);
8287 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8288 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8289 #if HAVE_WRITE_BARRIERS
8290 if (cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
8291 generic_class_is_reference_type (cfg, klass)) {
8292 MonoInst *dummy_use;
8293 /* insert call to write barrier */
8294 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
8295 mono_emit_method_call (cfg, write_barrier, sp, NULL);
8296 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
8309 const char *data_ptr;
8311 guint32 field_token;
8317 token = read32 (ip + 1);
8319 klass = mini_get_class (method, token, generic_context);
8320 CHECK_TYPELOAD (klass);
8322 if (cfg->generic_sharing_context)
8323 context_used = mono_class_check_context_used (klass);
8325 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
8326 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
8327 ins->sreg1 = sp [0]->dreg;
8328 ins->type = STACK_I4;
8329 ins->dreg = alloc_ireg (cfg);
8330 MONO_ADD_INS (cfg->cbb, ins);
8331 *sp = mono_decompose_opcode (cfg, ins);
8336 MonoClass *array_class = mono_array_class_get (klass, 1);
8337 /* FIXME: we cannot get a managed
8338 allocator because we can't get the
8339 open generic class's vtable. We
8340 have the same problem in
8341 handle_alloc(). This
8342 needs to be solved so that we can
8343 have managed allocs of shared
8346 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
8347 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
8349 MonoMethod *managed_alloc = NULL;
8351 /* FIXME: Decompose later to help abcrem */
8354 args [0] = emit_get_rgctx_klass (cfg, context_used,
8355 array_class, MONO_RGCTX_INFO_VTABLE);
8360 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
8362 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8364 if (cfg->opt & MONO_OPT_SHARED) {
8365 /* Decompose now to avoid problems with references to the domainvar */
8366 MonoInst *iargs [3];
8368 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8369 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8372 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8374 /* Decompose later since it is needed by abcrem */
8375 MonoClass *array_type = mono_array_class_get (klass, 1);
8376 mono_class_vtable (cfg->domain, array_type);
8377 CHECK_TYPELOAD (array_type);
8379 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8380 ins->dreg = alloc_preg (cfg);
8381 ins->sreg1 = sp [0]->dreg;
8382 ins->inst_newa_class = klass;
8383 ins->type = STACK_OBJ;
8385 MONO_ADD_INS (cfg->cbb, ins);
8386 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8387 cfg->cbb->has_array_access = TRUE;
8389 /* Needed so mono_emit_load_get_addr () gets called */
8390 mono_get_got_var (cfg);
8400 * we inline/optimize the initialization sequence if possible.
8401 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8402 * for small sizes open code the memcpy
8403 * ensure the rva field is big enough
8405 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8406 MonoMethod *memcpy_method = get_memcpy_method ();
8407 MonoInst *iargs [3];
8408 int add_reg = alloc_preg (cfg);
8410 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8411 if (cfg->compile_aot) {
8412 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8414 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8416 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8417 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8426 if (sp [0]->type != STACK_OBJ)
8429 dreg = alloc_preg (cfg);
8430 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8431 ins->dreg = alloc_preg (cfg);
8432 ins->sreg1 = sp [0]->dreg;
8433 ins->type = STACK_I4;
8434 MONO_ADD_INS (cfg->cbb, ins);
8435 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8436 cfg->cbb->has_array_access = TRUE;
8444 if (sp [0]->type != STACK_OBJ)
8447 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8449 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8450 CHECK_TYPELOAD (klass);
8451 /* we need to make sure that this array is exactly the type it needs
8452 * to be for correctness. the wrappers are lax with their usage
8453 * so we need to ignore them here
8455 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
8456 MonoClass *array_class = mono_array_class_get (klass, 1);
8457 mini_emit_check_array_type (cfg, sp [0], array_class);
8458 CHECK_TYPELOAD (array_class);
8462 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8477 case CEE_LDELEM_REF: {
8483 if (*ip == CEE_LDELEM) {
8485 token = read32 (ip + 1);
8486 klass = mini_get_class (method, token, generic_context);
8487 CHECK_TYPELOAD (klass);
8488 mono_class_init (klass);
8491 klass = array_access_to_klass (*ip);
8493 if (sp [0]->type != STACK_OBJ)
8496 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8498 if (sp [1]->opcode == OP_ICONST) {
8499 int array_reg = sp [0]->dreg;
8500 int index_reg = sp [1]->dreg;
8501 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8503 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8504 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8506 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8507 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8510 if (*ip == CEE_LDELEM)
8523 case CEE_STELEM_REF:
8530 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8532 if (*ip == CEE_STELEM) {
8534 token = read32 (ip + 1);
8535 klass = mini_get_class (method, token, generic_context);
8536 CHECK_TYPELOAD (klass);
8537 mono_class_init (klass);
8540 klass = array_access_to_klass (*ip);
8542 if (sp [0]->type != STACK_OBJ)
8545 /* storing a NULL doesn't need any of the complex checks in stelemref */
8546 if (generic_class_is_reference_type (cfg, klass) &&
8547 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8548 MonoMethod* helper = mono_marshal_get_stelemref ();
8549 MonoInst *iargs [3];
8551 if (sp [0]->type != STACK_OBJ)
8553 if (sp [2]->type != STACK_OBJ)
8560 mono_emit_method_call (cfg, helper, iargs, NULL);
8562 if (sp [1]->opcode == OP_ICONST) {
8563 int array_reg = sp [0]->dreg;
8564 int index_reg = sp [1]->dreg;
8565 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8567 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8568 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8570 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8571 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8575 if (*ip == CEE_STELEM)
8582 case CEE_CKFINITE: {
8586 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8587 ins->sreg1 = sp [0]->dreg;
8588 ins->dreg = alloc_freg (cfg);
8589 ins->type = STACK_R8;
8590 MONO_ADD_INS (bblock, ins);
8592 *sp++ = mono_decompose_opcode (cfg, ins);
8597 case CEE_REFANYVAL: {
8598 MonoInst *src_var, *src;
8600 int klass_reg = alloc_preg (cfg);
8601 int dreg = alloc_preg (cfg);
8604 MONO_INST_NEW (cfg, ins, *ip);
8607 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8608 CHECK_TYPELOAD (klass);
8609 mono_class_init (klass);
8611 if (cfg->generic_sharing_context)
8612 context_used = mono_class_check_context_used (klass);
8615 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8617 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8618 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8619 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8622 MonoInst *klass_ins;
8624 klass_ins = emit_get_rgctx_klass (cfg, context_used,
8625 klass, MONO_RGCTX_INFO_KLASS);
8628 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8629 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8631 mini_emit_class_check (cfg, klass_reg, klass);
8633 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8634 ins->type = STACK_MP;
8639 case CEE_MKREFANY: {
8640 MonoInst *loc, *addr;
8643 MONO_INST_NEW (cfg, ins, *ip);
8646 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8647 CHECK_TYPELOAD (klass);
8648 mono_class_init (klass);
8650 if (cfg->generic_sharing_context)
8651 context_used = mono_class_check_context_used (klass);
8653 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8654 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8657 MonoInst *const_ins;
8658 int type_reg = alloc_preg (cfg);
8660 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
8661 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8662 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8663 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8664 } else if (cfg->compile_aot) {
8665 int const_reg = alloc_preg (cfg);
8666 int type_reg = alloc_preg (cfg);
8668 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8669 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8670 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8671 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8673 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8674 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8676 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8678 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8679 ins->type = STACK_VTYPE;
8680 ins->klass = mono_defaults.typed_reference_class;
8687 MonoClass *handle_class;
8689 CHECK_STACK_OVF (1);
8692 n = read32 (ip + 1);
8694 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
8695 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8696 handle = mono_method_get_wrapper_data (method, n);
8697 handle_class = mono_method_get_wrapper_data (method, n + 1);
8698 if (handle_class == mono_defaults.typehandle_class)
8699 handle = &((MonoClass*)handle)->byval_arg;
8702 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8706 mono_class_init (handle_class);
8707 if (cfg->generic_sharing_context) {
8708 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
8709 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
8710 /* This case handles ldtoken
8711 of an open type, like for
8714 } else if (handle_class == mono_defaults.typehandle_class) {
8715 /* If we get a MONO_TYPE_CLASS
8716 then we need to provide the
8718 instantiation of it. */
8719 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8722 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8723 } else if (handle_class == mono_defaults.fieldhandle_class)
8724 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8725 else if (handle_class == mono_defaults.methodhandle_class)
8726 context_used = mono_method_check_context_used (handle);
8728 g_assert_not_reached ();
8731 if ((cfg->opt & MONO_OPT_SHARED) &&
8732 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
8733 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
8734 MonoInst *addr, *vtvar, *iargs [3];
8735 int method_context_used;
8737 if (cfg->generic_sharing_context)
8738 method_context_used = mono_method_check_context_used (method);
8740 method_context_used = 0;
8742 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8744 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8745 EMIT_NEW_ICONST (cfg, iargs [1], n);
8746 if (method_context_used) {
8747 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
8748 method, MONO_RGCTX_INFO_METHOD);
8749 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
8751 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
8752 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
8754 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8756 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8758 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8760 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
8761 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
8762 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
8763 (cmethod->klass == mono_defaults.monotype_class->parent) &&
8764 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
8765 MonoClass *tclass = mono_class_from_mono_type (handle);
8767 mono_class_init (tclass);
8769 ins = emit_get_rgctx_klass (cfg, context_used,
8770 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
8771 } else if (cfg->compile_aot) {
8772 if (method->wrapper_type) {
8773 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
8774 /* Special case for static synchronized wrappers */
8775 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
8777 /* FIXME: n is not a normal token */
8778 cfg->disable_aot = TRUE;
8779 EMIT_NEW_PCONST (cfg, ins, NULL);
8782 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
8785 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
8787 ins->type = STACK_OBJ;
8788 ins->klass = cmethod->klass;
8791 MonoInst *addr, *vtvar;
8793 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8796 if (handle_class == mono_defaults.typehandle_class) {
8797 ins = emit_get_rgctx_klass (cfg, context_used,
8798 mono_class_from_mono_type (handle),
8799 MONO_RGCTX_INFO_TYPE);
8800 } else if (handle_class == mono_defaults.methodhandle_class) {
8801 ins = emit_get_rgctx_method (cfg, context_used,
8802 handle, MONO_RGCTX_INFO_METHOD);
8803 } else if (handle_class == mono_defaults.fieldhandle_class) {
8804 ins = emit_get_rgctx_field (cfg, context_used,
8805 handle, MONO_RGCTX_INFO_CLASS_FIELD);
8807 g_assert_not_reached ();
8809 } else if (cfg->compile_aot) {
8810 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
8812 EMIT_NEW_PCONST (cfg, ins, handle);
8814 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8815 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8816 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8826 MONO_INST_NEW (cfg, ins, OP_THROW);
8828 ins->sreg1 = sp [0]->dreg;
8830 bblock->out_of_line = TRUE;
8831 MONO_ADD_INS (bblock, ins);
8832 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
8833 MONO_ADD_INS (bblock, ins);
8836 link_bblock (cfg, bblock, end_bblock);
8837 start_new_bblock = 1;
8839 case CEE_ENDFINALLY:
8840 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
8841 MONO_ADD_INS (bblock, ins);
8843 start_new_bblock = 1;
8846 * Control will leave the method so empty the stack, otherwise
8847 * the next basic block will start with a nonempty stack.
8849 while (sp != stack_start) {
8857 if (*ip == CEE_LEAVE) {
8859 target = ip + 5 + (gint32)read32(ip + 1);
8862 target = ip + 2 + (signed char)(ip [1]);
8865 /* empty the stack */
8866 while (sp != stack_start) {
8871 * If this leave statement is in a catch block, check for a
8872 * pending exception, and rethrow it if necessary.
8873 * We avoid doing this in runtime invoke wrappers, since those are called
8874 * by native code which excepts the wrapper to catch all exceptions.
8876 for (i = 0; i < header->num_clauses; ++i) {
8877 MonoExceptionClause *clause = &header->clauses [i];
8880 * Use <= in the final comparison to handle clauses with multiple
8881 * leave statements, like in bug #78024.
8882 * The ordering of the exception clauses guarantees that we find the
8885 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
8887 MonoBasicBlock *dont_throw;
8892 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
8895 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
8897 NEW_BBLOCK (cfg, dont_throw);
8900 * Currently, we allways rethrow the abort exception, despite the
8901 * fact that this is not correct. See thread6.cs for an example.
8902 * But propagating the abort exception is more important than
8903 * getting the sematics right.
8905 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
8906 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
8907 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
8909 MONO_START_BB (cfg, dont_throw);
8914 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
8916 MonoExceptionClause *clause;
8918 for (tmp = handlers; tmp; tmp = tmp->next) {
8920 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
8922 link_bblock (cfg, bblock, tblock);
8923 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
8924 ins->inst_target_bb = tblock;
8925 ins->inst_eh_block = clause;
8926 MONO_ADD_INS (bblock, ins);
8927 bblock->has_call_handler = 1;
8928 if (COMPILE_LLVM (cfg)) {
8929 MonoBasicBlock *target_bb;
8932 * Link the finally bblock with the target, since it will
8933 * conceptually branch there.
8934 * FIXME: Have to link the bblock containing the endfinally.
8936 GET_BBLOCK (cfg, target_bb, target);
8937 link_bblock (cfg, tblock, target_bb);
8940 g_list_free (handlers);
8943 MONO_INST_NEW (cfg, ins, OP_BR);
8944 MONO_ADD_INS (bblock, ins);
8945 GET_BBLOCK (cfg, tblock, target);
8946 link_bblock (cfg, bblock, tblock);
8947 ins->inst_target_bb = tblock;
8948 start_new_bblock = 1;
8950 if (*ip == CEE_LEAVE)
8959 * Mono specific opcodes
8961 case MONO_CUSTOM_PREFIX: {
8963 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
8967 case CEE_MONO_ICALL: {
8969 MonoJitICallInfo *info;
8971 token = read32 (ip + 2);
8972 func = mono_method_get_wrapper_data (method, token);
8973 info = mono_find_jit_icall_by_addr (func);
8976 CHECK_STACK (info->sig->param_count);
8977 sp -= info->sig->param_count;
8979 ins = mono_emit_jit_icall (cfg, info->func, sp);
8980 if (!MONO_TYPE_IS_VOID (info->sig->ret))
8984 inline_costs += 10 * num_calls++;
8988 case CEE_MONO_LDPTR: {
8991 CHECK_STACK_OVF (1);
8993 token = read32 (ip + 2);
8995 ptr = mono_method_get_wrapper_data (method, token);
8996 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
8997 MonoJitICallInfo *callinfo;
8998 const char *icall_name;
9000 icall_name = method->name + strlen ("__icall_wrapper_");
9001 g_assert (icall_name);
9002 callinfo = mono_find_jit_icall_by_name (icall_name);
9003 g_assert (callinfo);
9005 if (ptr == callinfo->func) {
9006 /* Will be transformed into an AOTCONST later */
9007 EMIT_NEW_PCONST (cfg, ins, ptr);
9013 /* FIXME: Generalize this */
9014 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
9015 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
9020 EMIT_NEW_PCONST (cfg, ins, ptr);
9023 inline_costs += 10 * num_calls++;
9024 /* Can't embed random pointers into AOT code */
9025 cfg->disable_aot = 1;
9028 case CEE_MONO_ICALL_ADDR: {
9029 MonoMethod *cmethod;
9032 CHECK_STACK_OVF (1);
9034 token = read32 (ip + 2);
9036 cmethod = mono_method_get_wrapper_data (method, token);
9038 if (cfg->compile_aot) {
9039 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
9041 ptr = mono_lookup_internal_call (cmethod);
9043 EMIT_NEW_PCONST (cfg, ins, ptr);
9049 case CEE_MONO_VTADDR: {
9050 MonoInst *src_var, *src;
9056 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9057 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
9062 case CEE_MONO_NEWOBJ: {
9063 MonoInst *iargs [2];
9065 CHECK_STACK_OVF (1);
9067 token = read32 (ip + 2);
9068 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9069 mono_class_init (klass);
9070 NEW_DOMAINCONST (cfg, iargs [0]);
9071 MONO_ADD_INS (cfg->cbb, iargs [0]);
9072 NEW_CLASSCONST (cfg, iargs [1], klass);
9073 MONO_ADD_INS (cfg->cbb, iargs [1]);
9074 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
9076 inline_costs += 10 * num_calls++;
9079 case CEE_MONO_OBJADDR:
9082 MONO_INST_NEW (cfg, ins, OP_MOVE);
9083 ins->dreg = alloc_preg (cfg);
9084 ins->sreg1 = sp [0]->dreg;
9085 ins->type = STACK_MP;
9086 MONO_ADD_INS (cfg->cbb, ins);
9090 case CEE_MONO_LDNATIVEOBJ:
9092 * Similar to LDOBJ, but instead load the unmanaged
9093 * representation of the vtype to the stack.
9098 token = read32 (ip + 2);
9099 klass = mono_method_get_wrapper_data (method, token);
9100 g_assert (klass->valuetype);
9101 mono_class_init (klass);
9104 MonoInst *src, *dest, *temp;
9107 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
9108 temp->backend.is_pinvoke = 1;
9109 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
9110 mini_emit_stobj (cfg, dest, src, klass, TRUE);
9112 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
9113 dest->type = STACK_VTYPE;
9114 dest->klass = klass;
9120 case CEE_MONO_RETOBJ: {
9122 * Same as RET, but return the native representation of a vtype
9125 g_assert (cfg->ret);
9126 g_assert (mono_method_signature (method)->pinvoke);
9131 token = read32 (ip + 2);
9132 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9134 if (!cfg->vret_addr) {
9135 g_assert (cfg->ret_var_is_local);
9137 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
9139 EMIT_NEW_RETLOADA (cfg, ins);
9141 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
9143 if (sp != stack_start)
9146 MONO_INST_NEW (cfg, ins, OP_BR);
9147 ins->inst_target_bb = end_bblock;
9148 MONO_ADD_INS (bblock, ins);
9149 link_bblock (cfg, bblock, end_bblock);
9150 start_new_bblock = 1;
9154 case CEE_MONO_CISINST:
9155 case CEE_MONO_CCASTCLASS: {
9160 token = read32 (ip + 2);
9161 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9162 if (ip [1] == CEE_MONO_CISINST)
9163 ins = handle_cisinst (cfg, klass, sp [0]);
9165 ins = handle_ccastclass (cfg, klass, sp [0]);
9171 case CEE_MONO_SAVE_LMF:
9172 case CEE_MONO_RESTORE_LMF:
9173 #ifdef MONO_ARCH_HAVE_LMF_OPS
9174 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9175 MONO_ADD_INS (bblock, ins);
9176 cfg->need_lmf_area = TRUE;
9180 case CEE_MONO_CLASSCONST:
9181 CHECK_STACK_OVF (1);
9183 token = read32 (ip + 2);
9184 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9187 inline_costs += 10 * num_calls++;
9189 case CEE_MONO_NOT_TAKEN:
9190 bblock->out_of_line = TRUE;
9194 CHECK_STACK_OVF (1);
9196 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9197 ins->dreg = alloc_preg (cfg);
9198 ins->inst_offset = (gint32)read32 (ip + 2);
9199 ins->type = STACK_PTR;
9200 MONO_ADD_INS (bblock, ins);
9204 case CEE_MONO_DYN_CALL: {
9207 /* It would be easier to call a trampoline, but that would put an
9208 * extra frame on the stack, confusing exception handling. So
9209 * implement it inline using an opcode for now.
9212 if (!cfg->dyn_call_var) {
9213 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9214 /* prevent it from being register allocated */
9215 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
9218 /* Has to use a call inst since it local regalloc expects it */
9219 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
9220 ins = (MonoInst*)call;
9222 ins->sreg1 = sp [0]->dreg;
9223 ins->sreg2 = sp [1]->dreg;
9224 MONO_ADD_INS (bblock, ins);
9226 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
9227 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
9231 inline_costs += 10 * num_calls++;
9236 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9246 /* somewhat similar to LDTOKEN */
9247 MonoInst *addr, *vtvar;
9248 CHECK_STACK_OVF (1);
9249 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9251 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9252 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9254 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9255 ins->type = STACK_VTYPE;
9256 ins->klass = mono_defaults.argumenthandle_class;
9269 * The following transforms:
9270 * CEE_CEQ into OP_CEQ
9271 * CEE_CGT into OP_CGT
9272 * CEE_CGT_UN into OP_CGT_UN
9273 * CEE_CLT into OP_CLT
9274 * CEE_CLT_UN into OP_CLT_UN
9276 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9278 MONO_INST_NEW (cfg, ins, cmp->opcode);
9280 cmp->sreg1 = sp [0]->dreg;
9281 cmp->sreg2 = sp [1]->dreg;
9282 type_from_op (cmp, sp [0], sp [1]);
9284 if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9285 cmp->opcode = OP_LCOMPARE;
9286 else if (sp [0]->type == STACK_R8)
9287 cmp->opcode = OP_FCOMPARE;
9289 cmp->opcode = OP_ICOMPARE;
9290 MONO_ADD_INS (bblock, cmp);
9291 ins->type = STACK_I4;
9292 ins->dreg = alloc_dreg (cfg, ins->type);
9293 type_from_op (ins, sp [0], sp [1]);
9295 if (cmp->opcode == OP_FCOMPARE) {
9297 * The backends expect the fceq opcodes to do the
9300 cmp->opcode = OP_NOP;
9301 ins->sreg1 = cmp->sreg1;
9302 ins->sreg2 = cmp->sreg2;
9304 MONO_ADD_INS (bblock, ins);
9311 MonoMethod *cil_method;
9312 gboolean needs_static_rgctx_invoke;
9314 CHECK_STACK_OVF (1);
9316 n = read32 (ip + 2);
9317 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9320 mono_class_init (cmethod->klass);
9322 mono_save_token_info (cfg, image, n, cmethod);
9324 if (cfg->generic_sharing_context)
9325 context_used = mono_method_check_context_used (cmethod);
9327 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9329 cil_method = cmethod;
9330 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9331 METHOD_ACCESS_FAILURE;
9333 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9334 if (check_linkdemand (cfg, method, cmethod))
9336 CHECK_CFG_EXCEPTION;
9337 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9338 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9342 * Optimize the common case of ldftn+delegate creation
9344 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9345 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9346 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9348 int invoke_context_used = 0;
9350 invoke = mono_get_delegate_invoke (ctor_method->klass);
9351 if (!invoke || !mono_method_signature (invoke))
9354 if (cfg->generic_sharing_context)
9355 invoke_context_used = mono_method_check_context_used (invoke);
9357 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
9358 /* FIXME: SGEN support */
9359 if (invoke_context_used == 0) {
9360 MonoInst *target_ins;
9363 if (cfg->verbose_level > 3)
9364 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9365 target_ins = sp [-1];
9367 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
9368 CHECK_CFG_EXCEPTION;
9377 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
9378 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
9382 inline_costs += 10 * num_calls++;
9385 case CEE_LDVIRTFTN: {
9390 n = read32 (ip + 2);
9391 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9394 mono_class_init (cmethod->klass);
9396 if (cfg->generic_sharing_context)
9397 context_used = mono_method_check_context_used (cmethod);
9399 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9400 if (check_linkdemand (cfg, method, cmethod))
9402 CHECK_CFG_EXCEPTION;
9403 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9404 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9410 args [1] = emit_get_rgctx_method (cfg, context_used,
9411 cmethod, MONO_RGCTX_INFO_METHOD);
9414 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
9416 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
9419 inline_costs += 10 * num_calls++;
9423 CHECK_STACK_OVF (1);
9425 n = read16 (ip + 2);
9427 EMIT_NEW_ARGLOAD (cfg, ins, n);
9432 CHECK_STACK_OVF (1);
9434 n = read16 (ip + 2);
9436 NEW_ARGLOADA (cfg, ins, n);
9437 MONO_ADD_INS (cfg->cbb, ins);
9445 n = read16 (ip + 2);
9447 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9449 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9453 CHECK_STACK_OVF (1);
9455 n = read16 (ip + 2);
9457 EMIT_NEW_LOCLOAD (cfg, ins, n);
9462 unsigned char *tmp_ip;
9463 CHECK_STACK_OVF (1);
9465 n = read16 (ip + 2);
9468 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9474 EMIT_NEW_LOCLOADA (cfg, ins, n);
9483 n = read16 (ip + 2);
9485 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9487 emit_stloc_ir (cfg, sp, header, n);
9494 if (sp != stack_start)
9496 if (cfg->method != method)
9498 * Inlining this into a loop in a parent could lead to
9499 * stack overflows which is different behavior than the
9500 * non-inlined case, thus disable inlining in this case.
9502 goto inline_failure;
9504 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9505 ins->dreg = alloc_preg (cfg);
9506 ins->sreg1 = sp [0]->dreg;
9507 ins->type = STACK_PTR;
9508 MONO_ADD_INS (cfg->cbb, ins);
9510 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9512 ins->flags |= MONO_INST_INIT;
9517 case CEE_ENDFILTER: {
9518 MonoExceptionClause *clause, *nearest;
9519 int cc, nearest_num;
9523 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9525 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9526 ins->sreg1 = (*sp)->dreg;
9527 MONO_ADD_INS (bblock, ins);
9528 start_new_bblock = 1;
9533 for (cc = 0; cc < header->num_clauses; ++cc) {
9534 clause = &header->clauses [cc];
9535 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9536 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9537 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9543 if ((ip - header->code) != nearest->handler_offset)
9548 case CEE_UNALIGNED_:
9549 ins_flag |= MONO_INST_UNALIGNED;
9550 /* FIXME: record alignment? we can assume 1 for now */
9555 ins_flag |= MONO_INST_VOLATILE;
9559 ins_flag |= MONO_INST_TAILCALL;
9560 cfg->flags |= MONO_CFG_HAS_TAIL;
9561 /* Can't inline tail calls at this time */
9562 inline_costs += 100000;
9569 token = read32 (ip + 2);
9570 klass = mini_get_class (method, token, generic_context);
9571 CHECK_TYPELOAD (klass);
9572 if (generic_class_is_reference_type (cfg, klass))
9573 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9575 mini_emit_initobj (cfg, *sp, NULL, klass);
9579 case CEE_CONSTRAINED_:
9581 token = read32 (ip + 2);
9582 if (method->wrapper_type != MONO_WRAPPER_NONE)
9583 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
9585 constrained_call = mono_class_get_full (image, token, generic_context);
9586 CHECK_TYPELOAD (constrained_call);
9591 MonoInst *iargs [3];
9595 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9596 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9597 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9598 /* emit_memset only works when val == 0 */
9599 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9604 if (ip [1] == CEE_CPBLK) {
9605 MonoMethod *memcpy_method = get_memcpy_method ();
9606 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9608 MonoMethod *memset_method = get_memset_method ();
9609 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9619 ins_flag |= MONO_INST_NOTYPECHECK;
9621 ins_flag |= MONO_INST_NORANGECHECK;
9622 /* we ignore the no-nullcheck for now since we
9623 * really do it explicitly only when doing callvirt->call
9629 int handler_offset = -1;
9631 for (i = 0; i < header->num_clauses; ++i) {
9632 MonoExceptionClause *clause = &header->clauses [i];
9633 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9634 handler_offset = clause->handler_offset;
9639 bblock->flags |= BB_EXCEPTION_UNSAFE;
9641 g_assert (handler_offset != -1);
9643 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9644 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9645 ins->sreg1 = load->dreg;
9646 MONO_ADD_INS (bblock, ins);
9648 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
9649 MONO_ADD_INS (bblock, ins);
9652 link_bblock (cfg, bblock, end_bblock);
9653 start_new_bblock = 1;
9661 CHECK_STACK_OVF (1);
9663 token = read32 (ip + 2);
9664 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic) {
9665 MonoType *type = mono_type_create_from_typespec (image, token);
9666 token = mono_type_size (type, &ialign);
9668 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9669 CHECK_TYPELOAD (klass);
9670 mono_class_init (klass);
9671 token = mono_class_value_size (klass, &align);
9673 EMIT_NEW_ICONST (cfg, ins, token);
9678 case CEE_REFANYTYPE: {
9679 MonoInst *src_var, *src;
9685 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9687 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9688 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9689 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9707 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
9717 g_warning ("opcode 0x%02x not handled", *ip);
9721 if (start_new_bblock != 1)
9724 bblock->cil_length = ip - bblock->cil_code;
9725 bblock->next_bb = end_bblock;
9727 if (cfg->method == method && cfg->domainvar) {
9729 MonoInst *get_domain;
9731 cfg->cbb = init_localsbb;
9733 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9734 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9737 get_domain->dreg = alloc_preg (cfg);
9738 MONO_ADD_INS (cfg->cbb, get_domain);
9740 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9741 MONO_ADD_INS (cfg->cbb, store);
9744 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
9745 if (cfg->compile_aot)
9746 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
9747 mono_get_got_var (cfg);
9750 if (cfg->method == method && cfg->got_var)
9751 mono_emit_load_got_addr (cfg);
9756 cfg->cbb = init_localsbb;
9758 for (i = 0; i < header->num_locals; ++i) {
9759 MonoType *ptype = header->locals [i];
9760 int t = ptype->type;
9761 dreg = cfg->locals [i]->dreg;
9763 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
9764 t = mono_class_enum_basetype (ptype->data.klass)->type;
9766 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9767 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
9768 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
9769 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
9770 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
9771 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
9772 MONO_INST_NEW (cfg, ins, OP_R8CONST);
9773 ins->type = STACK_R8;
9774 ins->inst_p0 = (void*)&r8_0;
9775 ins->dreg = alloc_dreg (cfg, STACK_R8);
9776 MONO_ADD_INS (init_localsbb, ins);
9777 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
9778 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
9779 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
9780 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
9782 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9787 if (cfg->init_ref_vars && cfg->method == method) {
9788 /* Emit initialization for ref vars */
9789 // FIXME: Avoid duplication initialization for IL locals.
9790 for (i = 0; i < cfg->num_varinfo; ++i) {
9791 MonoInst *ins = cfg->varinfo [i];
9793 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
9794 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
9798 /* Add a sequence point for method entry/exit events */
9800 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
9801 MONO_ADD_INS (init_localsbb, ins);
9802 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
9803 MONO_ADD_INS (cfg->bb_exit, ins);
9808 if (cfg->method == method) {
9810 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9811 bb->region = mono_find_block_region (cfg, bb->real_offset);
9813 mono_create_spvar_for_region (cfg, bb->region);
9814 if (cfg->verbose_level > 2)
9815 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
9819 g_slist_free (class_inits);
9820 dont_inline = g_list_remove (dont_inline, method);
9822 if (inline_costs < 0) {
9825 /* Method is too large */
9826 mname = mono_method_full_name (method, TRUE);
9827 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
9828 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
9830 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
9831 mono_basic_block_free (original_bb);
9835 if ((cfg->verbose_level > 2) && (cfg->method == method))
9836 mono_print_code (cfg, "AFTER METHOD-TO-IR");
9838 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
9839 mono_basic_block_free (original_bb);
9840 return inline_costs;
9843 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
9850 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
9854 set_exception_type_from_invalid_il (cfg, method, ip);
9858 g_slist_free (class_inits);
9859 mono_basic_block_free (original_bb);
9860 dont_inline = g_list_remove (dont_inline, method);
9861 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
9866 store_membase_reg_to_store_membase_imm (int opcode)
9869 case OP_STORE_MEMBASE_REG:
9870 return OP_STORE_MEMBASE_IMM;
9871 case OP_STOREI1_MEMBASE_REG:
9872 return OP_STOREI1_MEMBASE_IMM;
9873 case OP_STOREI2_MEMBASE_REG:
9874 return OP_STOREI2_MEMBASE_IMM;
9875 case OP_STOREI4_MEMBASE_REG:
9876 return OP_STOREI4_MEMBASE_IMM;
9877 case OP_STOREI8_MEMBASE_REG:
9878 return OP_STOREI8_MEMBASE_IMM;
9880 g_assert_not_reached ();
9886 #endif /* DISABLE_JIT */
9889 mono_op_to_op_imm (int opcode)
9899 return OP_IDIV_UN_IMM;
9903 return OP_IREM_UN_IMM;
9917 return OP_ISHR_UN_IMM;
9934 return OP_LSHR_UN_IMM;
9937 return OP_COMPARE_IMM;
9939 return OP_ICOMPARE_IMM;
9941 return OP_LCOMPARE_IMM;
9943 case OP_STORE_MEMBASE_REG:
9944 return OP_STORE_MEMBASE_IMM;
9945 case OP_STOREI1_MEMBASE_REG:
9946 return OP_STOREI1_MEMBASE_IMM;
9947 case OP_STOREI2_MEMBASE_REG:
9948 return OP_STOREI2_MEMBASE_IMM;
9949 case OP_STOREI4_MEMBASE_REG:
9950 return OP_STOREI4_MEMBASE_IMM;
9952 #if defined(TARGET_X86) || defined (TARGET_AMD64)
9954 return OP_X86_PUSH_IMM;
9955 case OP_X86_COMPARE_MEMBASE_REG:
9956 return OP_X86_COMPARE_MEMBASE_IMM;
9958 #if defined(TARGET_AMD64)
9959 case OP_AMD64_ICOMPARE_MEMBASE_REG:
9960 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9962 case OP_VOIDCALL_REG:
9971 return OP_LOCALLOC_IMM;
9978 ldind_to_load_membase (int opcode)
9982 return OP_LOADI1_MEMBASE;
9984 return OP_LOADU1_MEMBASE;
9986 return OP_LOADI2_MEMBASE;
9988 return OP_LOADU2_MEMBASE;
9990 return OP_LOADI4_MEMBASE;
9992 return OP_LOADU4_MEMBASE;
9994 return OP_LOAD_MEMBASE;
9996 return OP_LOAD_MEMBASE;
9998 return OP_LOADI8_MEMBASE;
10000 return OP_LOADR4_MEMBASE;
10002 return OP_LOADR8_MEMBASE;
10004 g_assert_not_reached ();
10011 stind_to_store_membase (int opcode)
10015 return OP_STOREI1_MEMBASE_REG;
10017 return OP_STOREI2_MEMBASE_REG;
10019 return OP_STOREI4_MEMBASE_REG;
10021 case CEE_STIND_REF:
10022 return OP_STORE_MEMBASE_REG;
10024 return OP_STOREI8_MEMBASE_REG;
10026 return OP_STORER4_MEMBASE_REG;
10028 return OP_STORER8_MEMBASE_REG;
10030 g_assert_not_reached ();
10037 mono_load_membase_to_load_mem (int opcode)
10039 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
10040 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10042 case OP_LOAD_MEMBASE:
10043 return OP_LOAD_MEM;
10044 case OP_LOADU1_MEMBASE:
10045 return OP_LOADU1_MEM;
10046 case OP_LOADU2_MEMBASE:
10047 return OP_LOADU2_MEM;
10048 case OP_LOADI4_MEMBASE:
10049 return OP_LOADI4_MEM;
10050 case OP_LOADU4_MEMBASE:
10051 return OP_LOADU4_MEM;
10052 #if SIZEOF_REGISTER == 8
10053 case OP_LOADI8_MEMBASE:
10054 return OP_LOADI8_MEM;
10063 op_to_op_dest_membase (int store_opcode, int opcode)
10065 #if defined(TARGET_X86)
10066 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
10071 return OP_X86_ADD_MEMBASE_REG;
10073 return OP_X86_SUB_MEMBASE_REG;
10075 return OP_X86_AND_MEMBASE_REG;
10077 return OP_X86_OR_MEMBASE_REG;
10079 return OP_X86_XOR_MEMBASE_REG;
10082 return OP_X86_ADD_MEMBASE_IMM;
10085 return OP_X86_SUB_MEMBASE_IMM;
10088 return OP_X86_AND_MEMBASE_IMM;
10091 return OP_X86_OR_MEMBASE_IMM;
10094 return OP_X86_XOR_MEMBASE_IMM;
10100 #if defined(TARGET_AMD64)
10101 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
10106 return OP_X86_ADD_MEMBASE_REG;
10108 return OP_X86_SUB_MEMBASE_REG;
10110 return OP_X86_AND_MEMBASE_REG;
10112 return OP_X86_OR_MEMBASE_REG;
10114 return OP_X86_XOR_MEMBASE_REG;
10116 return OP_X86_ADD_MEMBASE_IMM;
10118 return OP_X86_SUB_MEMBASE_IMM;
10120 return OP_X86_AND_MEMBASE_IMM;
10122 return OP_X86_OR_MEMBASE_IMM;
10124 return OP_X86_XOR_MEMBASE_IMM;
10126 return OP_AMD64_ADD_MEMBASE_REG;
10128 return OP_AMD64_SUB_MEMBASE_REG;
10130 return OP_AMD64_AND_MEMBASE_REG;
10132 return OP_AMD64_OR_MEMBASE_REG;
10134 return OP_AMD64_XOR_MEMBASE_REG;
10137 return OP_AMD64_ADD_MEMBASE_IMM;
10140 return OP_AMD64_SUB_MEMBASE_IMM;
10143 return OP_AMD64_AND_MEMBASE_IMM;
10146 return OP_AMD64_OR_MEMBASE_IMM;
10149 return OP_AMD64_XOR_MEMBASE_IMM;
10159 op_to_op_store_membase (int store_opcode, int opcode)
10161 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10164 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10165 return OP_X86_SETEQ_MEMBASE;
10167 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10168 return OP_X86_SETNE_MEMBASE;
10176 op_to_op_src1_membase (int load_opcode, int opcode)
10179 /* FIXME: This has sign extension issues */
10181 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10182 return OP_X86_COMPARE_MEMBASE8_IMM;
10185 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10190 return OP_X86_PUSH_MEMBASE;
10191 case OP_COMPARE_IMM:
10192 case OP_ICOMPARE_IMM:
10193 return OP_X86_COMPARE_MEMBASE_IMM;
10196 return OP_X86_COMPARE_MEMBASE_REG;
10200 #ifdef TARGET_AMD64
10201 /* FIXME: This has sign extension issues */
10203 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10204 return OP_X86_COMPARE_MEMBASE8_IMM;
10209 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10210 return OP_X86_PUSH_MEMBASE;
10212 /* FIXME: This only works for 32 bit immediates
10213 case OP_COMPARE_IMM:
10214 case OP_LCOMPARE_IMM:
10215 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10216 return OP_AMD64_COMPARE_MEMBASE_IMM;
10218 case OP_ICOMPARE_IMM:
10219 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10220 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10224 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10225 return OP_AMD64_COMPARE_MEMBASE_REG;
10228 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10229 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10238 op_to_op_src2_membase (int load_opcode, int opcode)
10241 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10247 return OP_X86_COMPARE_REG_MEMBASE;
10249 return OP_X86_ADD_REG_MEMBASE;
10251 return OP_X86_SUB_REG_MEMBASE;
10253 return OP_X86_AND_REG_MEMBASE;
10255 return OP_X86_OR_REG_MEMBASE;
10257 return OP_X86_XOR_REG_MEMBASE;
10261 #ifdef TARGET_AMD64
10264 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10265 return OP_AMD64_ICOMPARE_REG_MEMBASE;
10269 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10270 return OP_AMD64_COMPARE_REG_MEMBASE;
10273 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10274 return OP_X86_ADD_REG_MEMBASE;
10276 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10277 return OP_X86_SUB_REG_MEMBASE;
10279 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10280 return OP_X86_AND_REG_MEMBASE;
10282 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10283 return OP_X86_OR_REG_MEMBASE;
10285 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10286 return OP_X86_XOR_REG_MEMBASE;
10288 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10289 return OP_AMD64_ADD_REG_MEMBASE;
10291 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10292 return OP_AMD64_SUB_REG_MEMBASE;
10294 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10295 return OP_AMD64_AND_REG_MEMBASE;
10297 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10298 return OP_AMD64_OR_REG_MEMBASE;
10300 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10301 return OP_AMD64_XOR_REG_MEMBASE;
10309 mono_op_to_op_imm_noemul (int opcode)
10312 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10318 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10326 return mono_op_to_op_imm (opcode);
10330 #ifndef DISABLE_JIT
10333 * mono_handle_global_vregs:
10335 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10339 mono_handle_global_vregs (MonoCompile *cfg)
10341 gint32 *vreg_to_bb;
10342 MonoBasicBlock *bb;
10345 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
10347 #ifdef MONO_ARCH_SIMD_INTRINSICS
10348 if (cfg->uses_simd_intrinsics)
10349 mono_simd_simplify_indirection (cfg);
10352 /* Find local vregs used in more than one bb */
10353 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10354 MonoInst *ins = bb->code;
10355 int block_num = bb->block_num;
10357 if (cfg->verbose_level > 2)
10358 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
10361 for (; ins; ins = ins->next) {
10362 const char *spec = INS_INFO (ins->opcode);
10363 int regtype = 0, regindex;
10366 if (G_UNLIKELY (cfg->verbose_level > 2))
10367 mono_print_ins (ins);
10369 g_assert (ins->opcode >= MONO_CEE_LAST);
10371 for (regindex = 0; regindex < 4; regindex ++) {
10374 if (regindex == 0) {
10375 regtype = spec [MONO_INST_DEST];
10376 if (regtype == ' ')
10379 } else if (regindex == 1) {
10380 regtype = spec [MONO_INST_SRC1];
10381 if (regtype == ' ')
10384 } else if (regindex == 2) {
10385 regtype = spec [MONO_INST_SRC2];
10386 if (regtype == ' ')
10389 } else if (regindex == 3) {
10390 regtype = spec [MONO_INST_SRC3];
10391 if (regtype == ' ')
10396 #if SIZEOF_REGISTER == 4
10397 /* In the LLVM case, the long opcodes are not decomposed */
10398 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
10400 * Since some instructions reference the original long vreg,
10401 * and some reference the two component vregs, it is quite hard
10402 * to determine when it needs to be global. So be conservative.
10404 if (!get_vreg_to_inst (cfg, vreg)) {
10405 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10407 if (cfg->verbose_level > 2)
10408 printf ("LONG VREG R%d made global.\n", vreg);
10412 * Make the component vregs volatile since the optimizations can
10413 * get confused otherwise.
10415 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
10416 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
10420 g_assert (vreg != -1);
10422 prev_bb = vreg_to_bb [vreg];
10423 if (prev_bb == 0) {
10424 /* 0 is a valid block num */
10425 vreg_to_bb [vreg] = block_num + 1;
10426 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
10427 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
10430 if (!get_vreg_to_inst (cfg, vreg)) {
10431 if (G_UNLIKELY (cfg->verbose_level > 2))
10432 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
10436 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
10439 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10442 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
10445 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
10448 g_assert_not_reached ();
10452 /* Flag as having been used in more than one bb */
10453 vreg_to_bb [vreg] = -1;
10459 /* If a variable is used in only one bblock, convert it into a local vreg */
10460 for (i = 0; i < cfg->num_varinfo; i++) {
10461 MonoInst *var = cfg->varinfo [i];
10462 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
10464 switch (var->type) {
10470 #if SIZEOF_REGISTER == 8
10473 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
10474 /* Enabling this screws up the fp stack on x86 */
10477 /* Arguments are implicitly global */
10478 /* Putting R4 vars into registers doesn't work currently */
10479 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
10481 * Make that the variable's liveness interval doesn't contain a call, since
10482 * that would cause the lvreg to be spilled, making the whole optimization
10485 /* This is too slow for JIT compilation */
10487 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
10489 int def_index, call_index, ins_index;
10490 gboolean spilled = FALSE;
10495 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
10496 const char *spec = INS_INFO (ins->opcode);
10498 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
10499 def_index = ins_index;
10501 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
10502 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
10503 if (call_index > def_index) {
10509 if (MONO_IS_CALL (ins))
10510 call_index = ins_index;
10520 if (G_UNLIKELY (cfg->verbose_level > 2))
10521 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10522 var->flags |= MONO_INST_IS_DEAD;
10523 cfg->vreg_to_inst [var->dreg] = NULL;
10530 * Compress the varinfo and vars tables so the liveness computation is faster and
10531 * takes up less space.
10534 for (i = 0; i < cfg->num_varinfo; ++i) {
10535 MonoInst *var = cfg->varinfo [i];
10536 if (pos < i && cfg->locals_start == i)
10537 cfg->locals_start = pos;
10538 if (!(var->flags & MONO_INST_IS_DEAD)) {
10540 cfg->varinfo [pos] = cfg->varinfo [i];
10541 cfg->varinfo [pos]->inst_c0 = pos;
10542 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10543 cfg->vars [pos].idx = pos;
10544 #if SIZEOF_REGISTER == 4
10545 if (cfg->varinfo [pos]->type == STACK_I8) {
10546 /* Modify the two component vars too */
10549 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10550 var1->inst_c0 = pos;
10551 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10552 var1->inst_c0 = pos;
10559 cfg->num_varinfo = pos;
10560 if (cfg->locals_start > cfg->num_varinfo)
10561 cfg->locals_start = cfg->num_varinfo;
10565 * mono_spill_global_vars:
10567 * Generate spill code for variables which are not allocated to registers,
10568 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10569 * code is generated which could be optimized by the local optimization passes.
10572 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10574 MonoBasicBlock *bb;
10576 int orig_next_vreg;
10577 guint32 *vreg_to_lvreg;
10579 guint32 i, lvregs_len;
10580 gboolean dest_has_lvreg = FALSE;
10581 guint32 stacktypes [128];
10582 MonoInst **live_range_start, **live_range_end;
10583 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
10585 *need_local_opts = FALSE;
10587 memset (spec2, 0, sizeof (spec2));
10589 /* FIXME: Move this function to mini.c */
10590 stacktypes ['i'] = STACK_PTR;
10591 stacktypes ['l'] = STACK_I8;
10592 stacktypes ['f'] = STACK_R8;
10593 #ifdef MONO_ARCH_SIMD_INTRINSICS
10594 stacktypes ['x'] = STACK_VTYPE;
10597 #if SIZEOF_REGISTER == 4
10598 /* Create MonoInsts for longs */
10599 for (i = 0; i < cfg->num_varinfo; i++) {
10600 MonoInst *ins = cfg->varinfo [i];
10602 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10603 switch (ins->type) {
10608 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
10611 g_assert (ins->opcode == OP_REGOFFSET);
10613 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10615 tree->opcode = OP_REGOFFSET;
10616 tree->inst_basereg = ins->inst_basereg;
10617 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10619 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10621 tree->opcode = OP_REGOFFSET;
10622 tree->inst_basereg = ins->inst_basereg;
10623 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10633 /* FIXME: widening and truncation */
10636 * As an optimization, when a variable allocated to the stack is first loaded into
10637 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10638 * the variable again.
10640 orig_next_vreg = cfg->next_vreg;
10641 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10642 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10646 * These arrays contain the first and last instructions accessing a given
10648 * Since we emit bblocks in the same order we process them here, and we
10649 * don't split live ranges, these will precisely describe the live range of
10650 * the variable, i.e. the instruction range where a valid value can be found
10651 * in the variables location.
10652 * The live range is computed using the liveness info computed by the liveness pass.
10653 * We can't use vmv->range, since that is an abstract live range, and we need
10654 * one which is instruction precise.
10655 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
10657 /* FIXME: Only do this if debugging info is requested */
10658 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
10659 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
10660 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10661 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10663 /* Add spill loads/stores */
10664 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10667 if (cfg->verbose_level > 2)
10668 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10670 /* Clear vreg_to_lvreg array */
10671 for (i = 0; i < lvregs_len; i++)
10672 vreg_to_lvreg [lvregs [i]] = 0;
10676 MONO_BB_FOR_EACH_INS (bb, ins) {
10677 const char *spec = INS_INFO (ins->opcode);
10678 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
10679 gboolean store, no_lvreg;
10680 int sregs [MONO_MAX_SRC_REGS];
10682 if (G_UNLIKELY (cfg->verbose_level > 2))
10683 mono_print_ins (ins);
10685 if (ins->opcode == OP_NOP)
10689 * We handle LDADDR here as well, since it can only be decomposed
10690 * when variable addresses are known.
10692 if (ins->opcode == OP_LDADDR) {
10693 MonoInst *var = ins->inst_p0;
10695 if (var->opcode == OP_VTARG_ADDR) {
10696 /* Happens on SPARC/S390 where vtypes are passed by reference */
10697 MonoInst *vtaddr = var->inst_left;
10698 if (vtaddr->opcode == OP_REGVAR) {
10699 ins->opcode = OP_MOVE;
10700 ins->sreg1 = vtaddr->dreg;
10702 else if (var->inst_left->opcode == OP_REGOFFSET) {
10703 ins->opcode = OP_LOAD_MEMBASE;
10704 ins->inst_basereg = vtaddr->inst_basereg;
10705 ins->inst_offset = vtaddr->inst_offset;
10709 g_assert (var->opcode == OP_REGOFFSET);
10711 ins->opcode = OP_ADD_IMM;
10712 ins->sreg1 = var->inst_basereg;
10713 ins->inst_imm = var->inst_offset;
10716 *need_local_opts = TRUE;
10717 spec = INS_INFO (ins->opcode);
10720 if (ins->opcode < MONO_CEE_LAST) {
10721 mono_print_ins (ins);
10722 g_assert_not_reached ();
10726 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10730 if (MONO_IS_STORE_MEMBASE (ins)) {
10731 tmp_reg = ins->dreg;
10732 ins->dreg = ins->sreg2;
10733 ins->sreg2 = tmp_reg;
10736 spec2 [MONO_INST_DEST] = ' ';
10737 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10738 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10739 spec2 [MONO_INST_SRC3] = ' ';
10741 } else if (MONO_IS_STORE_MEMINDEX (ins))
10742 g_assert_not_reached ();
10747 if (G_UNLIKELY (cfg->verbose_level > 2)) {
10748 printf ("\t %.3s %d", spec, ins->dreg);
10749 num_sregs = mono_inst_get_src_registers (ins, sregs);
10750 for (srcindex = 0; srcindex < 3; ++srcindex)
10751 printf (" %d", sregs [srcindex]);
10758 regtype = spec [MONO_INST_DEST];
10759 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
10762 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
10763 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
10764 MonoInst *store_ins;
10766 MonoInst *def_ins = ins;
10767 int dreg = ins->dreg; /* The original vreg */
10769 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
10771 if (var->opcode == OP_REGVAR) {
10772 ins->dreg = var->dreg;
10773 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
10775 * Instead of emitting a load+store, use a _membase opcode.
10777 g_assert (var->opcode == OP_REGOFFSET);
10778 if (ins->opcode == OP_MOVE) {
10782 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
10783 ins->inst_basereg = var->inst_basereg;
10784 ins->inst_offset = var->inst_offset;
10787 spec = INS_INFO (ins->opcode);
10791 g_assert (var->opcode == OP_REGOFFSET);
10793 prev_dreg = ins->dreg;
10795 /* Invalidate any previous lvreg for this vreg */
10796 vreg_to_lvreg [ins->dreg] = 0;
10800 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
10802 store_opcode = OP_STOREI8_MEMBASE_REG;
10805 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
10807 if (regtype == 'l') {
10808 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
10809 mono_bblock_insert_after_ins (bb, ins, store_ins);
10810 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
10811 mono_bblock_insert_after_ins (bb, ins, store_ins);
10812 def_ins = store_ins;
10815 g_assert (store_opcode != OP_STOREV_MEMBASE);
10817 /* Try to fuse the store into the instruction itself */
10818 /* FIXME: Add more instructions */
10819 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
10820 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
10821 ins->inst_imm = ins->inst_c0;
10822 ins->inst_destbasereg = var->inst_basereg;
10823 ins->inst_offset = var->inst_offset;
10824 spec = INS_INFO (ins->opcode);
10825 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
10826 ins->opcode = store_opcode;
10827 ins->inst_destbasereg = var->inst_basereg;
10828 ins->inst_offset = var->inst_offset;
10832 tmp_reg = ins->dreg;
10833 ins->dreg = ins->sreg2;
10834 ins->sreg2 = tmp_reg;
10837 spec2 [MONO_INST_DEST] = ' ';
10838 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10839 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10840 spec2 [MONO_INST_SRC3] = ' ';
10842 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
10843 // FIXME: The backends expect the base reg to be in inst_basereg
10844 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
10846 ins->inst_basereg = var->inst_basereg;
10847 ins->inst_offset = var->inst_offset;
10848 spec = INS_INFO (ins->opcode);
10850 /* printf ("INS: "); mono_print_ins (ins); */
10851 /* Create a store instruction */
10852 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
10854 /* Insert it after the instruction */
10855 mono_bblock_insert_after_ins (bb, ins, store_ins);
10857 def_ins = store_ins;
10860 * We can't assign ins->dreg to var->dreg here, since the
10861 * sregs could use it. So set a flag, and do it after
10864 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
10865 dest_has_lvreg = TRUE;
10870 if (def_ins && !live_range_start [dreg]) {
10871 live_range_start [dreg] = def_ins;
10872 live_range_start_bb [dreg] = bb;
10879 num_sregs = mono_inst_get_src_registers (ins, sregs);
10880 for (srcindex = 0; srcindex < 3; ++srcindex) {
10881 regtype = spec [MONO_INST_SRC1 + srcindex];
10882 sreg = sregs [srcindex];
10884 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
10885 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
10886 MonoInst *var = get_vreg_to_inst (cfg, sreg);
10887 MonoInst *use_ins = ins;
10888 MonoInst *load_ins;
10889 guint32 load_opcode;
10891 if (var->opcode == OP_REGVAR) {
10892 sregs [srcindex] = var->dreg;
10893 //mono_inst_set_src_registers (ins, sregs);
10894 live_range_end [sreg] = use_ins;
10895 live_range_end_bb [sreg] = bb;
10899 g_assert (var->opcode == OP_REGOFFSET);
10901 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
10903 g_assert (load_opcode != OP_LOADV_MEMBASE);
10905 if (vreg_to_lvreg [sreg]) {
10906 g_assert (vreg_to_lvreg [sreg] != -1);
10908 /* The variable is already loaded to an lvreg */
10909 if (G_UNLIKELY (cfg->verbose_level > 2))
10910 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
10911 sregs [srcindex] = vreg_to_lvreg [sreg];
10912 //mono_inst_set_src_registers (ins, sregs);
10916 /* Try to fuse the load into the instruction */
10917 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
10918 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
10919 sregs [0] = var->inst_basereg;
10920 //mono_inst_set_src_registers (ins, sregs);
10921 ins->inst_offset = var->inst_offset;
10922 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
10923 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
10924 sregs [1] = var->inst_basereg;
10925 //mono_inst_set_src_registers (ins, sregs);
10926 ins->inst_offset = var->inst_offset;
10928 if (MONO_IS_REAL_MOVE (ins)) {
10929 ins->opcode = OP_NOP;
10932 //printf ("%d ", srcindex); mono_print_ins (ins);
10934 sreg = alloc_dreg (cfg, stacktypes [regtype]);
10936 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
10937 if (var->dreg == prev_dreg) {
10939 * sreg refers to the value loaded by the load
10940 * emitted below, but we need to use ins->dreg
10941 * since it refers to the store emitted earlier.
10945 g_assert (sreg != -1);
10946 vreg_to_lvreg [var->dreg] = sreg;
10947 g_assert (lvregs_len < 1024);
10948 lvregs [lvregs_len ++] = var->dreg;
10952 sregs [srcindex] = sreg;
10953 //mono_inst_set_src_registers (ins, sregs);
10955 if (regtype == 'l') {
10956 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
10957 mono_bblock_insert_before_ins (bb, ins, load_ins);
10958 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
10959 mono_bblock_insert_before_ins (bb, ins, load_ins);
10960 use_ins = load_ins;
10963 #if SIZEOF_REGISTER == 4
10964 g_assert (load_opcode != OP_LOADI8_MEMBASE);
10966 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
10967 mono_bblock_insert_before_ins (bb, ins, load_ins);
10968 use_ins = load_ins;
10972 if (var->dreg < orig_next_vreg) {
10973 live_range_end [var->dreg] = use_ins;
10974 live_range_end_bb [var->dreg] = bb;
10978 mono_inst_set_src_registers (ins, sregs);
10980 if (dest_has_lvreg) {
10981 g_assert (ins->dreg != -1);
10982 vreg_to_lvreg [prev_dreg] = ins->dreg;
10983 g_assert (lvregs_len < 1024);
10984 lvregs [lvregs_len ++] = prev_dreg;
10985 dest_has_lvreg = FALSE;
10989 tmp_reg = ins->dreg;
10990 ins->dreg = ins->sreg2;
10991 ins->sreg2 = tmp_reg;
10994 if (MONO_IS_CALL (ins)) {
10995 /* Clear vreg_to_lvreg array */
10996 for (i = 0; i < lvregs_len; i++)
10997 vreg_to_lvreg [lvregs [i]] = 0;
10999 } else if (ins->opcode == OP_NOP) {
11001 MONO_INST_NULLIFY_SREGS (ins);
11004 if (cfg->verbose_level > 2)
11005 mono_print_ins_index (1, ins);
11008 /* Extend the live range based on the liveness info */
11009 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
11010 for (i = 0; i < cfg->num_varinfo; i ++) {
11011 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
11013 if (vreg_is_volatile (cfg, vi->vreg))
11014 /* The liveness info is incomplete */
11017 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
11018 /* Live from at least the first ins of this bb */
11019 live_range_start [vi->vreg] = bb->code;
11020 live_range_start_bb [vi->vreg] = bb;
11023 if (mono_bitset_test_fast (bb->live_out_set, i)) {
11024 /* Live at least until the last ins of this bb */
11025 live_range_end [vi->vreg] = bb->last_ins;
11026 live_range_end_bb [vi->vreg] = bb;
11032 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
11034 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
11035 * by storing the current native offset into MonoMethodVar->live_range_start/end.
11037 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
11038 for (i = 0; i < cfg->num_varinfo; ++i) {
11039 int vreg = MONO_VARINFO (cfg, i)->vreg;
11042 if (live_range_start [vreg]) {
11043 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
11045 ins->inst_c1 = vreg;
11046 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
11048 if (live_range_end [vreg]) {
11049 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
11051 ins->inst_c1 = vreg;
11052 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
11053 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
11055 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
11061 g_free (live_range_start);
11062 g_free (live_range_end);
11063 g_free (live_range_start_bb);
11064 g_free (live_range_end_bb);
11069 * - use 'iadd' instead of 'int_add'
11070 * - handling ovf opcodes: decompose in method_to_ir.
11071 * - unify iregs/fregs
11072 * -> partly done, the missing parts are:
11073 * - a more complete unification would involve unifying the hregs as well, so
11074 * code wouldn't need if (fp) all over the place. but that would mean the hregs
11075 * would no longer map to the machine hregs, so the code generators would need to
11076 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
11077 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
11078 * fp/non-fp branches speeds it up by about 15%.
11079 * - use sext/zext opcodes instead of shifts
11081 * - get rid of TEMPLOADs if possible and use vregs instead
11082 * - clean up usage of OP_P/OP_ opcodes
11083 * - cleanup usage of DUMMY_USE
11084 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
11086 * - set the stack type and allocate a dreg in the EMIT_NEW macros
11087 * - get rid of all the <foo>2 stuff when the new JIT is ready.
11088 * - make sure handle_stack_args () is called before the branch is emitted
11089 * - when the new IR is done, get rid of all unused stuff
11090 * - COMPARE/BEQ as separate instructions or unify them ?
11091 * - keeping them separate allows specialized compare instructions like
11092 * compare_imm, compare_membase
11093 * - most back ends unify fp compare+branch, fp compare+ceq
11094 * - integrate mono_save_args into inline_method
11095 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
11096 * - handle long shift opts on 32 bit platforms somehow: they require
11097 * 3 sregs (2 for arg1 and 1 for arg2)
11098 * - make byref a 'normal' type.
11099 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
11100 * variable if needed.
11101 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
11102 * like inline_method.
11103 * - remove inlining restrictions
11104 * - fix LNEG and enable cfold of INEG
11105 * - generalize x86 optimizations like ldelema as a peephole optimization
11106 * - add store_mem_imm for amd64
11107 * - optimize the loading of the interruption flag in the managed->native wrappers
11108 * - avoid special handling of OP_NOP in passes
11109 * - move code inserting instructions into one function/macro.
11110 * - try a coalescing phase after liveness analysis
11111 * - add float -> vreg conversion + local optimizations on !x86
11112 * - figure out how to handle decomposed branches during optimizations, ie.
11113 * compare+branch, op_jump_table+op_br etc.
11114 * - promote RuntimeXHandles to vregs
11115 * - vtype cleanups:
11116 * - add a NEW_VARLOADA_VREG macro
11117 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11118 * accessing vtype fields.
11119 * - get rid of I8CONST on 64 bit platforms
11120 * - dealing with the increase in code size due to branches created during opcode
11122 * - use extended basic blocks
11123 * - all parts of the JIT
11124 * - handle_global_vregs () && local regalloc
11125 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
11126 * - sources of increase in code size:
11129 * - isinst and castclass
11130 * - lvregs not allocated to global registers even if used multiple times
11131 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
11133 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
11134 * - add all micro optimizations from the old JIT
11135 * - put tree optimizations into the deadce pass
11136 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
11137 * specific function.
11138 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
11139 * fcompare + branchCC.
11140 * - create a helper function for allocating a stack slot, taking into account
11141 * MONO_CFG_HAS_SPILLUP.
11143 * - merge the ia64 switch changes.
11144 * - optimize mono_regstate2_alloc_int/float.
11145 * - fix the pessimistic handling of variables accessed in exception handler blocks.
11146 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
11147 * parts of the tree could be separated by other instructions, killing the tree
11148 * arguments, or stores killing loads etc. Also, should we fold loads into other
11149 * instructions if the result of the load is used multiple times ?
11150 * - make the REM_IMM optimization in mini-x86.c arch-independent.
11151 * - LAST MERGE: 108395.
11152 * - when returning vtypes in registers, generate IR and append it to the end of the
11153 * last bb instead of doing it in the epilog.
11154 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
11162 - When to decompose opcodes:
11163 - earlier: this makes some optimizations hard to implement, since the low level IR
11164 no longer contains the neccessary information. But it is easier to do.
11165 - later: harder to implement, enables more optimizations.
11166 - Branches inside bblocks:
11167 - created when decomposing complex opcodes.
11168 - branches to another bblock: harmless, but not tracked by the branch
11169 optimizations, so need to branch to a label at the start of the bblock.
11170 - branches to inside the same bblock: very problematic, trips up the local
11171 reg allocator. Can be fixed by spitting the current bblock, but that is a
11172 complex operation, since some local vregs can become global vregs etc.
11173 - Local/global vregs:
11174 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11175 local register allocator.
11176 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11177 structure, created by mono_create_var (). Assigned to hregs or the stack by
11178 the global register allocator.
11179 - When to do optimizations like alu->alu_imm:
11180 - earlier -> saves work later on since the IR will be smaller/simpler
11181 - later -> can work on more instructions
11182 - Handling of valuetypes:
11183 - When a vtype is pushed on the stack, a new temporary is created, an
11184 instruction computing its address (LDADDR) is emitted and pushed on
11185 the stack. Need to optimize cases when the vtype is used immediately as in
11186 argument passing, stloc etc.
11187 - Instead of the to_end stuff in the old JIT, simply call the function handling
11188 the values on the stack before emitting the last instruction of the bb.
11191 #endif /* DISABLE_JIT */