2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
30 #include <mono/utils/memcheck.h>
32 #include <mono/metadata/assembly.h>
33 #include <mono/metadata/loader.h>
34 #include <mono/metadata/tabledefs.h>
35 #include <mono/metadata/class.h>
36 #include <mono/metadata/object.h>
37 #include <mono/metadata/exception.h>
38 #include <mono/metadata/opcodes.h>
39 #include <mono/metadata/mono-endian.h>
40 #include <mono/metadata/tokentype.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/marshal.h>
43 #include <mono/metadata/debug-helpers.h>
44 #include <mono/metadata/mono-debug.h>
45 #include <mono/metadata/gc-internal.h>
46 #include <mono/metadata/security-manager.h>
47 #include <mono/metadata/threads-types.h>
48 #include <mono/metadata/security-core-clr.h>
49 #include <mono/metadata/monitor.h>
50 #include <mono/metadata/profiler-private.h>
51 #include <mono/metadata/profiler.h>
52 #include <mono/utils/mono-compiler.h>
53 #include <mono/metadata/mono-basic-block.h>
60 #include "jit-icalls.h"
62 #include "debugger-agent.h"
64 #define BRANCH_COST 100
65 #define INLINE_LENGTH_LIMIT 20
66 #define INLINE_FAILURE do {\
67 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
70 #define CHECK_CFG_EXCEPTION do {\
71 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
74 #define METHOD_ACCESS_FAILURE do { \
75 char *method_fname = mono_method_full_name (method, TRUE); \
76 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
77 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
78 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
79 g_free (method_fname); \
80 g_free (cil_method_fname); \
81 goto exception_exit; \
83 #define FIELD_ACCESS_FAILURE do { \
84 char *method_fname = mono_method_full_name (method, TRUE); \
85 char *field_fname = mono_field_full_name (field); \
86 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
87 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
88 g_free (method_fname); \
89 g_free (field_fname); \
90 goto exception_exit; \
92 #define GENERIC_SHARING_FAILURE(opcode) do { \
93 if (cfg->generic_sharing_context) { \
94 if (cfg->verbose_level > 2) \
95 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
96 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
97 goto exception_exit; \
101 /* Determine whenever 'ins' represents a load of the 'this' argument */
102 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
104 static int ldind_to_load_membase (int opcode);
105 static int stind_to_store_membase (int opcode);
107 int mono_op_to_op_imm (int opcode);
108 int mono_op_to_op_imm_noemul (int opcode);
110 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
111 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
112 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
114 /* helper methods signature */
115 extern MonoMethodSignature *helper_sig_class_init_trampoline;
116 extern MonoMethodSignature *helper_sig_domain_get;
117 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
118 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm;
119 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
120 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
121 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
124 * Instruction metadata
132 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
133 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
139 #if SIZEOF_REGISTER == 8
144 /* keep in sync with the enum in mini.h */
147 #include "mini-ops.h"
152 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
153 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
155 * This should contain the index of the last sreg + 1. This is not the same
156 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
158 const gint8 ins_sreg_counts[] = {
159 #include "mini-ops.h"
164 #define MONO_INIT_VARINFO(vi,id) do { \
165 (vi)->range.first_use.pos.bid = 0xffff; \
171 mono_inst_set_src_registers (MonoInst *ins, int *regs)
173 ins->sreg1 = regs [0];
174 ins->sreg2 = regs [1];
175 ins->sreg3 = regs [2];
179 mono_alloc_ireg (MonoCompile *cfg)
181 return alloc_ireg (cfg);
185 mono_alloc_freg (MonoCompile *cfg)
187 return alloc_freg (cfg);
191 mono_alloc_preg (MonoCompile *cfg)
193 return alloc_preg (cfg);
197 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
199 return alloc_dreg (cfg, stack_type);
203 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
209 switch (type->type) {
212 case MONO_TYPE_BOOLEAN:
224 case MONO_TYPE_FNPTR:
226 case MONO_TYPE_CLASS:
227 case MONO_TYPE_STRING:
228 case MONO_TYPE_OBJECT:
229 case MONO_TYPE_SZARRAY:
230 case MONO_TYPE_ARRAY:
234 #if SIZEOF_REGISTER == 8
243 case MONO_TYPE_VALUETYPE:
244 if (type->data.klass->enumtype) {
245 type = mono_class_enum_basetype (type->data.klass);
248 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
251 case MONO_TYPE_TYPEDBYREF:
253 case MONO_TYPE_GENERICINST:
254 type = &type->data.generic_class->container_class->byval_arg;
258 g_assert (cfg->generic_sharing_context);
261 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
267 mono_print_bb (MonoBasicBlock *bb, const char *msg)
272 printf ("\n%s %d: [IN: ", msg, bb->block_num);
273 for (i = 0; i < bb->in_count; ++i)
274 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
276 for (i = 0; i < bb->out_count; ++i)
277 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
279 for (tree = bb->code; tree; tree = tree->next)
280 mono_print_ins_index (-1, tree);
284 * Can't put this at the beginning, since other files reference stuff from this
289 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
291 #define GET_BBLOCK(cfg,tblock,ip) do { \
292 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
294 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
295 NEW_BBLOCK (cfg, (tblock)); \
296 (tblock)->cil_code = (ip); \
297 ADD_BBLOCK (cfg, (tblock)); \
301 #if defined(TARGET_X86) || defined(TARGET_AMD64)
302 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
303 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
304 (dest)->dreg = alloc_preg ((cfg)); \
305 (dest)->sreg1 = (sr1); \
306 (dest)->sreg2 = (sr2); \
307 (dest)->inst_imm = (imm); \
308 (dest)->backend.shift_amount = (shift); \
309 MONO_ADD_INS ((cfg)->cbb, (dest)); \
313 #if SIZEOF_REGISTER == 8
314 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
315 /* FIXME: Need to add many more cases */ \
316 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
318 int dr = alloc_preg (cfg); \
319 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
320 (ins)->sreg2 = widen->dreg; \
324 #define ADD_WIDEN_OP(ins, arg1, arg2)
327 #define ADD_BINOP(op) do { \
328 MONO_INST_NEW (cfg, ins, (op)); \
330 ins->sreg1 = sp [0]->dreg; \
331 ins->sreg2 = sp [1]->dreg; \
332 type_from_op (ins, sp [0], sp [1]); \
334 /* Have to insert a widening op */ \
335 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
336 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
337 MONO_ADD_INS ((cfg)->cbb, (ins)); \
338 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
341 #define ADD_UNOP(op) do { \
342 MONO_INST_NEW (cfg, ins, (op)); \
344 ins->sreg1 = sp [0]->dreg; \
345 type_from_op (ins, sp [0], NULL); \
347 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
348 MONO_ADD_INS ((cfg)->cbb, (ins)); \
349 *sp++ = mono_decompose_opcode (cfg, ins); \
352 #define ADD_BINCOND(next_block) do { \
355 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
356 cmp->sreg1 = sp [0]->dreg; \
357 cmp->sreg2 = sp [1]->dreg; \
358 type_from_op (cmp, sp [0], sp [1]); \
360 type_from_op (ins, sp [0], sp [1]); \
361 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
362 GET_BBLOCK (cfg, tblock, target); \
363 link_bblock (cfg, bblock, tblock); \
364 ins->inst_true_bb = tblock; \
365 if ((next_block)) { \
366 link_bblock (cfg, bblock, (next_block)); \
367 ins->inst_false_bb = (next_block); \
368 start_new_bblock = 1; \
370 GET_BBLOCK (cfg, tblock, ip); \
371 link_bblock (cfg, bblock, tblock); \
372 ins->inst_false_bb = tblock; \
373 start_new_bblock = 2; \
375 if (sp != stack_start) { \
376 handle_stack_args (cfg, stack_start, sp - stack_start); \
377 CHECK_UNVERIFIABLE (cfg); \
379 MONO_ADD_INS (bblock, cmp); \
380 MONO_ADD_INS (bblock, ins); \
384 * link_bblock: Links two basic blocks
386 * links two basic blocks in the control flow graph, the 'from'
387 * argument is the starting block and the 'to' argument is the block
388 * the control flow ends to after 'from'.
391 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
393 MonoBasicBlock **newa;
397 if (from->cil_code) {
399 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
401 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
404 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
406 printf ("edge from entry to exit\n");
411 for (i = 0; i < from->out_count; ++i) {
412 if (to == from->out_bb [i]) {
418 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
419 for (i = 0; i < from->out_count; ++i) {
420 newa [i] = from->out_bb [i];
428 for (i = 0; i < to->in_count; ++i) {
429 if (from == to->in_bb [i]) {
435 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
436 for (i = 0; i < to->in_count; ++i) {
437 newa [i] = to->in_bb [i];
446 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
448 link_bblock (cfg, from, to);
452 * mono_find_block_region:
454 * We mark each basic block with a region ID. We use that to avoid BB
455 * optimizations when blocks are in different regions.
458 * A region token that encodes where this region is, and information
459 * about the clause owner for this block.
461 * The region encodes the try/catch/filter clause that owns this block
462 * as well as the type. -1 is a special value that represents a block
463 * that is in none of try/catch/filter.
466 mono_find_block_region (MonoCompile *cfg, int offset)
468 MonoMethodHeader *header = cfg->header;
469 MonoExceptionClause *clause;
472 for (i = 0; i < header->num_clauses; ++i) {
473 clause = &header->clauses [i];
474 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
475 (offset < (clause->handler_offset)))
476 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
478 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
479 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
480 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
481 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
482 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
484 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
487 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
488 return ((i + 1) << 8) | clause->flags;
495 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
497 MonoMethodHeader *header = cfg->header;
498 MonoExceptionClause *clause;
502 for (i = 0; i < header->num_clauses; ++i) {
503 clause = &header->clauses [i];
504 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
505 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
506 if (clause->flags == type)
507 res = g_list_append (res, clause);
514 mono_create_spvar_for_region (MonoCompile *cfg, int region)
518 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
522 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
523 /* prevent it from being register allocated */
524 var->flags |= MONO_INST_INDIRECT;
526 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
530 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
532 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
536 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
540 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
544 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
545 /* prevent it from being register allocated */
546 var->flags |= MONO_INST_INDIRECT;
548 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
554 * Returns the type used in the eval stack when @type is loaded.
555 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
558 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
562 inst->klass = klass = mono_class_from_mono_type (type);
564 inst->type = STACK_MP;
569 switch (type->type) {
571 inst->type = STACK_INV;
575 case MONO_TYPE_BOOLEAN:
581 inst->type = STACK_I4;
586 case MONO_TYPE_FNPTR:
587 inst->type = STACK_PTR;
589 case MONO_TYPE_CLASS:
590 case MONO_TYPE_STRING:
591 case MONO_TYPE_OBJECT:
592 case MONO_TYPE_SZARRAY:
593 case MONO_TYPE_ARRAY:
594 inst->type = STACK_OBJ;
598 inst->type = STACK_I8;
602 inst->type = STACK_R8;
604 case MONO_TYPE_VALUETYPE:
605 if (type->data.klass->enumtype) {
606 type = mono_class_enum_basetype (type->data.klass);
610 inst->type = STACK_VTYPE;
613 case MONO_TYPE_TYPEDBYREF:
614 inst->klass = mono_defaults.typed_reference_class;
615 inst->type = STACK_VTYPE;
617 case MONO_TYPE_GENERICINST:
618 type = &type->data.generic_class->container_class->byval_arg;
621 case MONO_TYPE_MVAR :
622 /* FIXME: all the arguments must be references for now,
623 * later look inside cfg and see if the arg num is
626 g_assert (cfg->generic_sharing_context);
627 inst->type = STACK_OBJ;
630 g_error ("unknown type 0x%02x in eval stack type", type->type);
635 * The following tables are used to quickly validate the IL code in type_from_op ().
638 bin_num_table [STACK_MAX] [STACK_MAX] = {
639 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
640 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
641 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
642 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
643 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
644 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
645 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
646 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
651 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
654 /* reduce the size of this table */
656 bin_int_table [STACK_MAX] [STACK_MAX] = {
657 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
658 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
659 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
660 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
661 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
662 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
663 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
664 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
668 bin_comp_table [STACK_MAX] [STACK_MAX] = {
669 /* Inv i L p F & O vt */
671 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
672 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
673 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
674 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
675 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
676 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
677 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
680 /* reduce the size of this table */
682 shift_table [STACK_MAX] [STACK_MAX] = {
683 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
684 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
685 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
686 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
687 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
688 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
689 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
690 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
694 * Tables to map from the non-specific opcode to the matching
695 * type-specific opcode.
697 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
699 binops_op_map [STACK_MAX] = {
700 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
703 /* handles from CEE_NEG to CEE_CONV_U8 */
705 unops_op_map [STACK_MAX] = {
706 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
709 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
711 ovfops_op_map [STACK_MAX] = {
712 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
715 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
717 ovf2ops_op_map [STACK_MAX] = {
718 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
721 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
723 ovf3ops_op_map [STACK_MAX] = {
724 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
727 /* handles from CEE_BEQ to CEE_BLT_UN */
729 beqops_op_map [STACK_MAX] = {
730 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
733 /* handles from CEE_CEQ to CEE_CLT_UN */
735 ceqops_op_map [STACK_MAX] = {
736 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
740 * Sets ins->type (the type on the eval stack) according to the
741 * type of the opcode and the arguments to it.
742 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
744 * FIXME: this function sets ins->type unconditionally in some cases, but
745 * it should set it to invalid for some types (a conv.x on an object)
748 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
750 switch (ins->opcode) {
757 /* FIXME: check unverifiable args for STACK_MP */
758 ins->type = bin_num_table [src1->type] [src2->type];
759 ins->opcode += binops_op_map [ins->type];
766 ins->type = bin_int_table [src1->type] [src2->type];
767 ins->opcode += binops_op_map [ins->type];
772 ins->type = shift_table [src1->type] [src2->type];
773 ins->opcode += binops_op_map [ins->type];
778 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
779 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
780 ins->opcode = OP_LCOMPARE;
781 else if (src1->type == STACK_R8)
782 ins->opcode = OP_FCOMPARE;
784 ins->opcode = OP_ICOMPARE;
786 case OP_ICOMPARE_IMM:
787 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
788 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
789 ins->opcode = OP_LCOMPARE_IMM;
801 ins->opcode += beqops_op_map [src1->type];
804 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
805 ins->opcode += ceqops_op_map [src1->type];
811 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
812 ins->opcode += ceqops_op_map [src1->type];
816 ins->type = neg_table [src1->type];
817 ins->opcode += unops_op_map [ins->type];
820 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
821 ins->type = src1->type;
823 ins->type = STACK_INV;
824 ins->opcode += unops_op_map [ins->type];
830 ins->type = STACK_I4;
831 ins->opcode += unops_op_map [src1->type];
834 ins->type = STACK_R8;
835 switch (src1->type) {
838 ins->opcode = OP_ICONV_TO_R_UN;
841 ins->opcode = OP_LCONV_TO_R_UN;
845 case CEE_CONV_OVF_I1:
846 case CEE_CONV_OVF_U1:
847 case CEE_CONV_OVF_I2:
848 case CEE_CONV_OVF_U2:
849 case CEE_CONV_OVF_I4:
850 case CEE_CONV_OVF_U4:
851 ins->type = STACK_I4;
852 ins->opcode += ovf3ops_op_map [src1->type];
854 case CEE_CONV_OVF_I_UN:
855 case CEE_CONV_OVF_U_UN:
856 ins->type = STACK_PTR;
857 ins->opcode += ovf2ops_op_map [src1->type];
859 case CEE_CONV_OVF_I1_UN:
860 case CEE_CONV_OVF_I2_UN:
861 case CEE_CONV_OVF_I4_UN:
862 case CEE_CONV_OVF_U1_UN:
863 case CEE_CONV_OVF_U2_UN:
864 case CEE_CONV_OVF_U4_UN:
865 ins->type = STACK_I4;
866 ins->opcode += ovf2ops_op_map [src1->type];
869 ins->type = STACK_PTR;
870 switch (src1->type) {
872 ins->opcode = OP_ICONV_TO_U;
876 #if SIZEOF_REGISTER == 8
877 ins->opcode = OP_LCONV_TO_U;
879 ins->opcode = OP_MOVE;
883 ins->opcode = OP_LCONV_TO_U;
886 ins->opcode = OP_FCONV_TO_U;
892 ins->type = STACK_I8;
893 ins->opcode += unops_op_map [src1->type];
895 case CEE_CONV_OVF_I8:
896 case CEE_CONV_OVF_U8:
897 ins->type = STACK_I8;
898 ins->opcode += ovf3ops_op_map [src1->type];
900 case CEE_CONV_OVF_U8_UN:
901 case CEE_CONV_OVF_I8_UN:
902 ins->type = STACK_I8;
903 ins->opcode += ovf2ops_op_map [src1->type];
907 ins->type = STACK_R8;
908 ins->opcode += unops_op_map [src1->type];
911 ins->type = STACK_R8;
915 ins->type = STACK_I4;
916 ins->opcode += ovfops_op_map [src1->type];
921 ins->type = STACK_PTR;
922 ins->opcode += ovfops_op_map [src1->type];
930 ins->type = bin_num_table [src1->type] [src2->type];
931 ins->opcode += ovfops_op_map [src1->type];
932 if (ins->type == STACK_R8)
933 ins->type = STACK_INV;
935 case OP_LOAD_MEMBASE:
936 ins->type = STACK_PTR;
938 case OP_LOADI1_MEMBASE:
939 case OP_LOADU1_MEMBASE:
940 case OP_LOADI2_MEMBASE:
941 case OP_LOADU2_MEMBASE:
942 case OP_LOADI4_MEMBASE:
943 case OP_LOADU4_MEMBASE:
944 ins->type = STACK_PTR;
946 case OP_LOADI8_MEMBASE:
947 ins->type = STACK_I8;
949 case OP_LOADR4_MEMBASE:
950 case OP_LOADR8_MEMBASE:
951 ins->type = STACK_R8;
954 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
958 if (ins->type == STACK_MP)
959 ins->klass = mono_defaults.object_class;
964 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
970 param_table [STACK_MAX] [STACK_MAX] = {
975 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
979 switch (args->type) {
989 for (i = 0; i < sig->param_count; ++i) {
990 switch (args [i].type) {
994 if (!sig->params [i]->byref)
998 if (sig->params [i]->byref)
1000 switch (sig->params [i]->type) {
1001 case MONO_TYPE_CLASS:
1002 case MONO_TYPE_STRING:
1003 case MONO_TYPE_OBJECT:
1004 case MONO_TYPE_SZARRAY:
1005 case MONO_TYPE_ARRAY:
1012 if (sig->params [i]->byref)
1014 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1023 /*if (!param_table [args [i].type] [sig->params [i]->type])
1031 * When we need a pointer to the current domain many times in a method, we
1032 * call mono_domain_get() once and we store the result in a local variable.
1033 * This function returns the variable that represents the MonoDomain*.
1035 inline static MonoInst *
1036 mono_get_domainvar (MonoCompile *cfg)
1038 if (!cfg->domainvar)
1039 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1040 return cfg->domainvar;
1044 * The got_var contains the address of the Global Offset Table when AOT
1048 mono_get_got_var (MonoCompile *cfg)
1050 #ifdef MONO_ARCH_NEED_GOT_VAR
1051 if (!cfg->compile_aot)
1053 if (!cfg->got_var) {
1054 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1056 return cfg->got_var;
1063 mono_get_vtable_var (MonoCompile *cfg)
1065 g_assert (cfg->generic_sharing_context);
1067 if (!cfg->rgctx_var) {
1068 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1069 /* force the var to be stack allocated */
1070 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1073 return cfg->rgctx_var;
1077 type_from_stack_type (MonoInst *ins) {
1078 switch (ins->type) {
1079 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1080 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1081 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1082 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1084 return &ins->klass->this_arg;
1085 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1086 case STACK_VTYPE: return &ins->klass->byval_arg;
1088 g_error ("stack type %d to monotype not handled\n", ins->type);
1093 static G_GNUC_UNUSED int
1094 type_to_stack_type (MonoType *t)
1096 t = mono_type_get_underlying_type (t);
1100 case MONO_TYPE_BOOLEAN:
1103 case MONO_TYPE_CHAR:
1110 case MONO_TYPE_FNPTR:
1112 case MONO_TYPE_CLASS:
1113 case MONO_TYPE_STRING:
1114 case MONO_TYPE_OBJECT:
1115 case MONO_TYPE_SZARRAY:
1116 case MONO_TYPE_ARRAY:
1124 case MONO_TYPE_VALUETYPE:
1125 case MONO_TYPE_TYPEDBYREF:
1127 case MONO_TYPE_GENERICINST:
1128 if (mono_type_generic_inst_is_valuetype (t))
1134 g_assert_not_reached ();
1141 array_access_to_klass (int opcode)
1145 return mono_defaults.byte_class;
1147 return mono_defaults.uint16_class;
1150 return mono_defaults.int_class;
1153 return mono_defaults.sbyte_class;
1156 return mono_defaults.int16_class;
1159 return mono_defaults.int32_class;
1161 return mono_defaults.uint32_class;
1164 return mono_defaults.int64_class;
1167 return mono_defaults.single_class;
1170 return mono_defaults.double_class;
1171 case CEE_LDELEM_REF:
1172 case CEE_STELEM_REF:
1173 return mono_defaults.object_class;
1175 g_assert_not_reached ();
1181 * We try to share variables when possible
1184 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1189 /* inlining can result in deeper stacks */
1190 if (slot >= cfg->header->max_stack)
1191 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1193 pos = ins->type - 1 + slot * STACK_MAX;
1195 switch (ins->type) {
1202 if ((vnum = cfg->intvars [pos]))
1203 return cfg->varinfo [vnum];
1204 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1205 cfg->intvars [pos] = res->inst_c0;
1208 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1214 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1217 * Don't use this if a generic_context is set, since that means AOT can't
1218 * look up the method using just the image+token.
1219 * table == 0 means this is a reference made from a wrapper.
1221 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1222 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1223 jump_info_token->image = image;
1224 jump_info_token->token = token;
1225 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1230 * This function is called to handle items that are left on the evaluation stack
1231 * at basic block boundaries. What happens is that we save the values to local variables
1232 * and we reload them later when first entering the target basic block (with the
1233 * handle_loaded_temps () function).
1234 * A single joint point will use the same variables (stored in the array bb->out_stack or
1235 * bb->in_stack, if the basic block is before or after the joint point).
1237 * This function needs to be called _before_ emitting the last instruction of
1238 * the bb (i.e. before emitting a branch).
1239 * If the stack merge fails at a join point, cfg->unverifiable is set.
1242 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1245 MonoBasicBlock *bb = cfg->cbb;
1246 MonoBasicBlock *outb;
1247 MonoInst *inst, **locals;
1252 if (cfg->verbose_level > 3)
1253 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1254 if (!bb->out_scount) {
1255 bb->out_scount = count;
1256 //printf ("bblock %d has out:", bb->block_num);
1258 for (i = 0; i < bb->out_count; ++i) {
1259 outb = bb->out_bb [i];
1260 /* exception handlers are linked, but they should not be considered for stack args */
1261 if (outb->flags & BB_EXCEPTION_HANDLER)
1263 //printf (" %d", outb->block_num);
1264 if (outb->in_stack) {
1266 bb->out_stack = outb->in_stack;
1272 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1273 for (i = 0; i < count; ++i) {
1275 * try to reuse temps already allocated for this purpouse, if they occupy the same
1276 * stack slot and if they are of the same type.
1277 * This won't cause conflicts since if 'local' is used to
1278 * store one of the values in the in_stack of a bblock, then
1279 * the same variable will be used for the same outgoing stack
1281 * This doesn't work when inlining methods, since the bblocks
1282 * in the inlined methods do not inherit their in_stack from
1283 * the bblock they are inlined to. See bug #58863 for an
1286 if (cfg->inlined_method)
1287 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1289 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1294 for (i = 0; i < bb->out_count; ++i) {
1295 outb = bb->out_bb [i];
1296 /* exception handlers are linked, but they should not be considered for stack args */
1297 if (outb->flags & BB_EXCEPTION_HANDLER)
1299 if (outb->in_scount) {
1300 if (outb->in_scount != bb->out_scount) {
1301 cfg->unverifiable = TRUE;
1304 continue; /* check they are the same locals */
1306 outb->in_scount = count;
1307 outb->in_stack = bb->out_stack;
1310 locals = bb->out_stack;
1312 for (i = 0; i < count; ++i) {
1313 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1314 inst->cil_code = sp [i]->cil_code;
1315 sp [i] = locals [i];
1316 if (cfg->verbose_level > 3)
1317 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1321 * It is possible that the out bblocks already have in_stack assigned, and
1322 * the in_stacks differ. In this case, we will store to all the different
1329 /* Find a bblock which has a different in_stack */
1331 while (bindex < bb->out_count) {
1332 outb = bb->out_bb [bindex];
1333 /* exception handlers are linked, but they should not be considered for stack args */
1334 if (outb->flags & BB_EXCEPTION_HANDLER) {
1338 if (outb->in_stack != locals) {
1339 for (i = 0; i < count; ++i) {
1340 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1341 inst->cil_code = sp [i]->cil_code;
1342 sp [i] = locals [i];
1343 if (cfg->verbose_level > 3)
1344 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1346 locals = outb->in_stack;
1355 /* Emit code which loads interface_offsets [klass->interface_id]
1356 * The array is stored in memory before vtable.
1359 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1361 if (cfg->compile_aot) {
1362 int ioffset_reg = alloc_preg (cfg);
1363 int iid_reg = alloc_preg (cfg);
1365 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1366 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1367 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1370 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1375 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1377 int ibitmap_reg = alloc_preg (cfg);
1378 #ifdef COMPRESSED_INTERFACE_BITMAP
1380 MonoInst *res, *ins;
1381 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1382 MONO_ADD_INS (cfg->cbb, ins);
1384 if (cfg->compile_aot)
1385 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1387 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1388 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1389 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1391 int ibitmap_byte_reg = alloc_preg (cfg);
1393 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1395 if (cfg->compile_aot) {
1396 int iid_reg = alloc_preg (cfg);
1397 int shifted_iid_reg = alloc_preg (cfg);
1398 int ibitmap_byte_address_reg = alloc_preg (cfg);
1399 int masked_iid_reg = alloc_preg (cfg);
1400 int iid_one_bit_reg = alloc_preg (cfg);
1401 int iid_bit_reg = alloc_preg (cfg);
1402 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1403 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1404 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1405 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1406 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1407 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1408 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1409 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1411 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1412 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1418 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1419 * stored in "klass_reg" implements the interface "klass".
1422 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1424 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1428 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1429 * stored in "vtable_reg" implements the interface "klass".
1432 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1434 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1438 * Emit code which checks whenever the interface id of @klass is smaller than
1439 * than the value given by max_iid_reg.
1442 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1443 MonoBasicBlock *false_target)
1445 if (cfg->compile_aot) {
1446 int iid_reg = alloc_preg (cfg);
1447 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1448 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1451 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1453 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1455 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1458 /* Same as above, but obtains max_iid from a vtable */
1460 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1461 MonoBasicBlock *false_target)
1463 int max_iid_reg = alloc_preg (cfg);
1465 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1466 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1469 /* Same as above, but obtains max_iid from a klass */
1471 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1472 MonoBasicBlock *false_target)
1474 int max_iid_reg = alloc_preg (cfg);
1476 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1477 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1481 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1483 int idepth_reg = alloc_preg (cfg);
1484 int stypes_reg = alloc_preg (cfg);
1485 int stype = alloc_preg (cfg);
1487 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1488 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1489 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1490 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1492 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1493 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1495 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1496 } else if (cfg->compile_aot) {
1497 int const_reg = alloc_preg (cfg);
1498 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1499 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1501 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1503 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1507 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1509 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1513 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1515 int intf_reg = alloc_preg (cfg);
1517 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1518 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1519 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1521 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1523 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1527 * Variant of the above that takes a register to the class, not the vtable.
1530 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1532 int intf_bit_reg = alloc_preg (cfg);
1534 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1535 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1536 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1538 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1540 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1544 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1547 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1548 } else if (cfg->compile_aot) {
1549 int const_reg = alloc_preg (cfg);
1550 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1551 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1553 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1555 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1559 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1561 return mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1565 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1567 if (cfg->compile_aot) {
1568 int const_reg = alloc_preg (cfg);
1569 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1570 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1572 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1574 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1578 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1581 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1584 int rank_reg = alloc_preg (cfg);
1585 int eclass_reg = alloc_preg (cfg);
1587 g_assert (!klass_inst);
1588 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1589 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1590 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1591 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1592 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1593 if (klass->cast_class == mono_defaults.object_class) {
1594 int parent_reg = alloc_preg (cfg);
1595 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1596 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1597 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1598 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1599 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1600 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1601 } else if (klass->cast_class == mono_defaults.enum_class) {
1602 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1603 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1604 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1606 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1607 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1610 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1611 /* Check that the object is a vector too */
1612 int bounds_reg = alloc_preg (cfg);
1613 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1614 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1615 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1618 int idepth_reg = alloc_preg (cfg);
1619 int stypes_reg = alloc_preg (cfg);
1620 int stype = alloc_preg (cfg);
1622 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1623 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1624 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1625 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1627 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1628 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1629 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1634 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1636 return mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1640 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1644 g_assert (val == 0);
1649 if ((size <= 4) && (size <= align)) {
1652 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1655 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1658 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1660 #if SIZEOF_REGISTER == 8
1662 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1668 val_reg = alloc_preg (cfg);
1670 if (SIZEOF_REGISTER == 8)
1671 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1673 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1676 /* This could be optimized further if neccesary */
1678 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1685 #if !NO_UNALIGNED_ACCESS
1686 if (SIZEOF_REGISTER == 8) {
1688 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1693 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1701 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1706 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1711 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1718 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1725 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1726 g_assert (size < 10000);
1729 /* This could be optimized further if neccesary */
1731 cur_reg = alloc_preg (cfg);
1732 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1733 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1740 #if !NO_UNALIGNED_ACCESS
1741 if (SIZEOF_REGISTER == 8) {
1743 cur_reg = alloc_preg (cfg);
1744 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1745 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1754 cur_reg = alloc_preg (cfg);
1755 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1756 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1762 cur_reg = alloc_preg (cfg);
1763 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1764 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1770 cur_reg = alloc_preg (cfg);
1771 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1772 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1780 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1783 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1786 type = mini_get_basic_type_from_generic (gsctx, type);
1787 switch (type->type) {
1788 case MONO_TYPE_VOID:
1789 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1792 case MONO_TYPE_BOOLEAN:
1795 case MONO_TYPE_CHAR:
1798 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1802 case MONO_TYPE_FNPTR:
1803 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1804 case MONO_TYPE_CLASS:
1805 case MONO_TYPE_STRING:
1806 case MONO_TYPE_OBJECT:
1807 case MONO_TYPE_SZARRAY:
1808 case MONO_TYPE_ARRAY:
1809 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1812 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1815 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1816 case MONO_TYPE_VALUETYPE:
1817 if (type->data.klass->enumtype) {
1818 type = mono_class_enum_basetype (type->data.klass);
1821 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1822 case MONO_TYPE_TYPEDBYREF:
1823 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1824 case MONO_TYPE_GENERICINST:
1825 type = &type->data.generic_class->container_class->byval_arg;
1828 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1834 * target_type_is_incompatible:
1835 * @cfg: MonoCompile context
1837 * Check that the item @arg on the evaluation stack can be stored
1838 * in the target type (can be a local, or field, etc).
1839 * The cfg arg can be used to check if we need verification or just
1842 * Returns: non-0 value if arg can't be stored on a target.
1845 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1847 MonoType *simple_type;
1850 if (target->byref) {
1851 /* FIXME: check that the pointed to types match */
1852 if (arg->type == STACK_MP)
1853 return arg->klass != mono_class_from_mono_type (target);
1854 if (arg->type == STACK_PTR)
1859 simple_type = mono_type_get_underlying_type (target);
1860 switch (simple_type->type) {
1861 case MONO_TYPE_VOID:
1865 case MONO_TYPE_BOOLEAN:
1868 case MONO_TYPE_CHAR:
1871 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1875 /* STACK_MP is needed when setting pinned locals */
1876 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1881 case MONO_TYPE_FNPTR:
1882 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1885 case MONO_TYPE_CLASS:
1886 case MONO_TYPE_STRING:
1887 case MONO_TYPE_OBJECT:
1888 case MONO_TYPE_SZARRAY:
1889 case MONO_TYPE_ARRAY:
1890 if (arg->type != STACK_OBJ)
1892 /* FIXME: check type compatibility */
1896 if (arg->type != STACK_I8)
1901 if (arg->type != STACK_R8)
1904 case MONO_TYPE_VALUETYPE:
1905 if (arg->type != STACK_VTYPE)
1907 klass = mono_class_from_mono_type (simple_type);
1908 if (klass != arg->klass)
1911 case MONO_TYPE_TYPEDBYREF:
1912 if (arg->type != STACK_VTYPE)
1914 klass = mono_class_from_mono_type (simple_type);
1915 if (klass != arg->klass)
1918 case MONO_TYPE_GENERICINST:
1919 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1920 if (arg->type != STACK_VTYPE)
1922 klass = mono_class_from_mono_type (simple_type);
1923 if (klass != arg->klass)
1927 if (arg->type != STACK_OBJ)
1929 /* FIXME: check type compatibility */
1933 case MONO_TYPE_MVAR:
1934 /* FIXME: all the arguments must be references for now,
1935 * later look inside cfg and see if the arg num is
1936 * really a reference
1938 g_assert (cfg->generic_sharing_context);
1939 if (arg->type != STACK_OBJ)
1943 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1949 * Prepare arguments for passing to a function call.
1950 * Return a non-zero value if the arguments can't be passed to the given
1952 * The type checks are not yet complete and some conversions may need
1953 * casts on 32 or 64 bit architectures.
1955 * FIXME: implement this using target_type_is_incompatible ()
1958 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1960 MonoType *simple_type;
1964 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1968 for (i = 0; i < sig->param_count; ++i) {
1969 if (sig->params [i]->byref) {
1970 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1974 simple_type = sig->params [i];
1975 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1977 switch (simple_type->type) {
1978 case MONO_TYPE_VOID:
1983 case MONO_TYPE_BOOLEAN:
1986 case MONO_TYPE_CHAR:
1989 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
1995 case MONO_TYPE_FNPTR:
1996 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
1999 case MONO_TYPE_CLASS:
2000 case MONO_TYPE_STRING:
2001 case MONO_TYPE_OBJECT:
2002 case MONO_TYPE_SZARRAY:
2003 case MONO_TYPE_ARRAY:
2004 if (args [i]->type != STACK_OBJ)
2009 if (args [i]->type != STACK_I8)
2014 if (args [i]->type != STACK_R8)
2017 case MONO_TYPE_VALUETYPE:
2018 if (simple_type->data.klass->enumtype) {
2019 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2022 if (args [i]->type != STACK_VTYPE)
2025 case MONO_TYPE_TYPEDBYREF:
2026 if (args [i]->type != STACK_VTYPE)
2029 case MONO_TYPE_GENERICINST:
2030 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2034 g_error ("unknown type 0x%02x in check_call_signature",
2042 callvirt_to_call (int opcode)
2047 case OP_VOIDCALLVIRT:
2056 g_assert_not_reached ();
2063 callvirt_to_call_membase (int opcode)
2067 return OP_CALL_MEMBASE;
2068 case OP_VOIDCALLVIRT:
2069 return OP_VOIDCALL_MEMBASE;
2071 return OP_FCALL_MEMBASE;
2073 return OP_LCALL_MEMBASE;
2075 return OP_VCALL_MEMBASE;
2077 g_assert_not_reached ();
2083 #ifdef MONO_ARCH_HAVE_IMT
2085 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2087 #ifdef MONO_ARCH_IMT_REG
2088 int method_reg = alloc_preg (cfg);
2091 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2092 } else if (cfg->compile_aot) {
2093 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2096 MONO_INST_NEW (cfg, ins, OP_PCONST);
2097 ins->inst_p0 = call->method;
2098 ins->dreg = method_reg;
2099 MONO_ADD_INS (cfg->cbb, ins);
2103 if (COMPILE_LLVM (cfg))
2104 call->imt_arg_reg = method_reg;
2106 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2108 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2113 static MonoJumpInfo *
2114 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2116 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2120 ji->data.target = target;
2125 inline static MonoCallInst *
2126 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2127 MonoInst **args, int calli, int virtual, int tail)
2130 #ifdef MONO_ARCH_SOFT_FLOAT
2135 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2137 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2140 call->signature = sig;
2142 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2145 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2146 call->vret_var = cfg->vret_addr;
2147 //g_assert_not_reached ();
2149 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2150 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2153 temp->backend.is_pinvoke = sig->pinvoke;
2156 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2157 * address of return value to increase optimization opportunities.
2158 * Before vtype decomposition, the dreg of the call ins itself represents the
2159 * fact the call modifies the return value. After decomposition, the call will
2160 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2161 * will be transformed into an LDADDR.
2163 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2164 loada->dreg = alloc_preg (cfg);
2165 loada->inst_p0 = temp;
2166 /* We reference the call too since call->dreg could change during optimization */
2167 loada->inst_p1 = call;
2168 MONO_ADD_INS (cfg->cbb, loada);
2170 call->inst.dreg = temp->dreg;
2172 call->vret_var = loada;
2173 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2174 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2176 #ifdef MONO_ARCH_SOFT_FLOAT
2177 if (COMPILE_SOFT_FLOAT (cfg)) {
2179 * If the call has a float argument, we would need to do an r8->r4 conversion using
2180 * an icall, but that cannot be done during the call sequence since it would clobber
2181 * the call registers + the stack. So we do it before emitting the call.
2183 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2185 MonoInst *in = call->args [i];
2187 if (i >= sig->hasthis)
2188 t = sig->params [i - sig->hasthis];
2190 t = &mono_defaults.int_class->byval_arg;
2191 t = mono_type_get_underlying_type (t);
2193 if (!t->byref && t->type == MONO_TYPE_R4) {
2194 MonoInst *iargs [1];
2198 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2200 /* The result will be in an int vreg */
2201 call->args [i] = conv;
2208 if (COMPILE_LLVM (cfg))
2209 mono_llvm_emit_call (cfg, call);
2211 mono_arch_emit_call (cfg, call);
2213 mono_arch_emit_call (cfg, call);
2216 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2217 cfg->flags |= MONO_CFG_HAS_CALLS;
2222 inline static MonoInst*
2223 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2225 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE);
2227 call->inst.sreg1 = addr->dreg;
2229 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2231 return (MonoInst*)call;
2235 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2237 #ifdef MONO_ARCH_RGCTX_REG
2238 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2239 cfg->uses_rgctx_reg = TRUE;
2240 call->rgctx_reg = TRUE;
2242 call->rgctx_arg_reg = rgctx_reg;
2249 inline static MonoInst*
2250 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2256 rgctx_reg = mono_alloc_preg (cfg);
2257 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2259 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2261 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2262 return (MonoInst*)call;
2266 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, int rgctx_type);
2268 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, int rgctx_type);
2271 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2272 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2274 gboolean might_be_remote;
2275 gboolean virtual = this != NULL;
2276 gboolean enable_for_aot = TRUE;
2280 if (method->string_ctor) {
2281 /* Create the real signature */
2282 /* FIXME: Cache these */
2283 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2284 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2289 might_be_remote = this && sig->hasthis &&
2290 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2291 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this);
2293 context_used = mono_method_check_context_used (method);
2294 if (might_be_remote && context_used) {
2297 g_assert (cfg->generic_sharing_context);
2299 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2301 return mono_emit_calli (cfg, sig, args, addr);
2304 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE);
2306 if (might_be_remote)
2307 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2309 call->method = method;
2310 call->inst.flags |= MONO_INST_HAS_METHOD;
2311 call->inst.inst_left = this;
2314 int vtable_reg, slot_reg, this_reg;
2316 this_reg = this->dreg;
2318 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2319 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2320 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2322 /* Make a call to delegate->invoke_impl */
2323 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2324 call->inst.inst_basereg = this_reg;
2325 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2326 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2328 return (MonoInst*)call;
2332 if ((!cfg->compile_aot || enable_for_aot) &&
2333 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2334 (MONO_METHOD_IS_FINAL (method) &&
2335 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2336 !(method->klass->marshalbyref && context_used)) {
2338 * the method is not virtual, we just need to ensure this is not null
2339 * and then we can call the method directly.
2341 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2343 * The check above ensures method is not gshared, this is needed since
2344 * gshared methods can't have wrappers.
2346 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2349 if (!method->string_ctor)
2350 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2352 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2354 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2356 return (MonoInst*)call;
2359 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2361 * the method is virtual, but we can statically dispatch since either
2362 * it's class or the method itself are sealed.
2363 * But first we need to ensure it's not a null reference.
2365 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2367 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2368 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2370 return (MonoInst*)call;
2373 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2375 vtable_reg = alloc_preg (cfg);
2376 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2377 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2379 #ifdef MONO_ARCH_HAVE_IMT
2381 guint32 imt_slot = mono_method_get_imt_slot (method);
2382 emit_imt_argument (cfg, call, imt_arg);
2383 slot_reg = vtable_reg;
2384 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2387 if (slot_reg == -1) {
2388 slot_reg = alloc_preg (cfg);
2389 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2390 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2393 slot_reg = vtable_reg;
2394 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2395 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2396 #ifdef MONO_ARCH_HAVE_IMT
2398 g_assert (mono_method_signature (method)->generic_param_count);
2399 emit_imt_argument (cfg, call, imt_arg);
2404 call->inst.sreg1 = slot_reg;
2405 call->virtual = TRUE;
2408 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2410 return (MonoInst*)call;
2414 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2415 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2422 rgctx_reg = mono_alloc_preg (cfg);
2423 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2425 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2427 call = (MonoCallInst*)ins;
2429 set_rgctx_arg (cfg, call, rgctx_reg, vtable_arg);
2435 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2437 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2441 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2448 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE);
2451 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2453 return (MonoInst*)call;
2457 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2459 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2463 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2467 * mono_emit_abs_call:
2469 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2471 inline static MonoInst*
2472 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2473 MonoMethodSignature *sig, MonoInst **args)
2475 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2479 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2482 if (cfg->abs_patches == NULL)
2483 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2484 g_hash_table_insert (cfg->abs_patches, ji, ji);
2485 ins = mono_emit_native_call (cfg, ji, sig, args);
2486 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2491 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2493 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2494 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2498 * Native code might return non register sized integers
2499 * without initializing the upper bits.
2501 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2502 case OP_LOADI1_MEMBASE:
2503 widen_op = OP_ICONV_TO_I1;
2505 case OP_LOADU1_MEMBASE:
2506 widen_op = OP_ICONV_TO_U1;
2508 case OP_LOADI2_MEMBASE:
2509 widen_op = OP_ICONV_TO_I2;
2511 case OP_LOADU2_MEMBASE:
2512 widen_op = OP_ICONV_TO_U2;
2518 if (widen_op != -1) {
2519 int dreg = alloc_preg (cfg);
2522 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2523 widen->type = ins->type;
2533 get_memcpy_method (void)
2535 static MonoMethod *memcpy_method = NULL;
2536 if (!memcpy_method) {
2537 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2539 g_error ("Old corlib found. Install a new one");
2541 return memcpy_method;
2544 #if HAVE_WRITE_BARRIERS
2546 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, int destreg, int doffset, int srcreg, int soffset, int size, int align)
2549 MonoClassField *field;
2550 gpointer iter = NULL;
2551 int dest_ptr_reg, tmp_reg;
2552 unsigned need_wb = 0;
2557 /*types with references can't have alignment smaller than sizeof(void*) */
2558 if (align < SIZEOF_VOID_P)
2562 * This value cannot be biger than 32 due to the way we calculate the required wb bitmap.
2563 * FIXME tune this value.
2565 if (size > 5 * SIZEOF_VOID_P)
2568 while ((field = mono_class_get_fields (klass, &iter))) {
2571 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2573 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2574 if (mono_type_is_reference (field->type)) {
2575 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2576 need_wb |= 1 << (foffset / SIZEOF_VOID_P);
2578 /*FIXME support nested value types so this works for: struct X { Y y; int z;} struct Y { object a,b; }*/
2579 MonoClass *field_class = mono_class_from_mono_type (field->type);
2580 if (field_class->has_references)
2585 dest_ptr_reg = alloc_preg (cfg);
2586 tmp_reg = alloc_preg (cfg);
2588 /*tmp = dreg + doffset*/
2590 NEW_BIALU_IMM (cfg, args [0], OP_PADD_IMM, dest_ptr_reg, destreg, doffset);
2591 MONO_ADD_INS (cfg->cbb, args [0]);
2593 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dest_ptr_reg, destreg);
2596 while (size >= SIZEOF_VOID_P) {
2597 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, tmp_reg, srcreg, soffset);
2598 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
2600 if (need_wb & 0x1) {
2601 MonoInst *dummy_use;
2603 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2604 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
2606 MONO_INST_NEW (cfg, dummy_use, OP_DUMMY_USE);
2607 dummy_use->sreg1 = dest_ptr_reg;
2608 MONO_ADD_INS (cfg->cbb, dummy_use);
2612 doffset += SIZEOF_VOID_P;
2613 soffset += SIZEOF_VOID_P;
2614 size -= SIZEOF_VOID_P;
2617 //tmp += sizeof (void*)
2618 if (size >= SIZEOF_VOID_P) {
2619 NEW_BIALU_IMM (cfg, args [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
2620 MONO_ADD_INS (cfg->cbb, args [0]);
2624 /* Those cannot be references since size < sizeof (void*) */
2626 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, soffset);
2627 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, tmp_reg);
2634 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, soffset);
2635 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, tmp_reg);
2642 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, soffset);
2643 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, tmp_reg);
2654 * Emit code to copy a valuetype of type @klass whose address is stored in
2655 * @src->dreg to memory whose address is stored at @dest->dreg.
2658 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2660 MonoInst *iargs [3];
2663 MonoMethod *memcpy_method;
2667 * This check breaks with spilled vars... need to handle it during verification anyway.
2668 * g_assert (klass && klass == src->klass && klass == dest->klass);
2672 n = mono_class_native_size (klass, &align);
2674 n = mono_class_value_size (klass, &align);
2676 #if HAVE_WRITE_BARRIERS
2677 /* if native is true there should be no references in the struct */
2678 if (klass->has_references && !native) {
2679 /* Avoid barriers when storing to the stack */
2680 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2681 (dest->opcode == OP_LDADDR))) {
2682 int context_used = 0;
2687 if (cfg->generic_sharing_context)
2688 context_used = mono_class_check_context_used (klass);
2689 /*FIXME can we use the intrinsics version when context_used == TRUE? */
2691 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2692 } else if ((cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, dest->dreg, 0, src->dreg, 0, n, align)) {
2695 if (cfg->compile_aot) {
2696 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2698 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2699 mono_class_compute_gc_descriptor (klass);
2703 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2709 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2710 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2711 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2715 EMIT_NEW_ICONST (cfg, iargs [2], n);
2717 memcpy_method = get_memcpy_method ();
2718 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2723 get_memset_method (void)
2725 static MonoMethod *memset_method = NULL;
2726 if (!memset_method) {
2727 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2729 g_error ("Old corlib found. Install a new one");
2731 return memset_method;
2735 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2737 MonoInst *iargs [3];
2740 MonoMethod *memset_method;
2742 /* FIXME: Optimize this for the case when dest is an LDADDR */
2744 mono_class_init (klass);
2745 n = mono_class_value_size (klass, &align);
2747 if (n <= sizeof (gpointer) * 5) {
2748 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2751 memset_method = get_memset_method ();
2753 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2754 EMIT_NEW_ICONST (cfg, iargs [2], n);
2755 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2760 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2762 MonoInst *this = NULL;
2764 g_assert (cfg->generic_sharing_context);
2766 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2767 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2768 !method->klass->valuetype)
2769 EMIT_NEW_ARGLOAD (cfg, this, 0);
2771 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2772 MonoInst *mrgctx_loc, *mrgctx_var;
2775 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2777 mrgctx_loc = mono_get_vtable_var (cfg);
2778 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2781 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2782 MonoInst *vtable_loc, *vtable_var;
2786 vtable_loc = mono_get_vtable_var (cfg);
2787 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2789 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2790 MonoInst *mrgctx_var = vtable_var;
2793 vtable_reg = alloc_preg (cfg);
2794 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2795 vtable_var->type = STACK_PTR;
2801 int vtable_reg, res_reg;
2803 vtable_reg = alloc_preg (cfg);
2804 res_reg = alloc_preg (cfg);
2805 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2810 static MonoJumpInfoRgctxEntry *
2811 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2813 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2814 res->method = method;
2815 res->in_mrgctx = in_mrgctx;
2816 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2817 res->data->type = patch_type;
2818 res->data->data.target = patch_data;
2819 res->info_type = info_type;
2824 static inline MonoInst*
2825 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2827 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2831 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2832 MonoClass *klass, int rgctx_type)
2834 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2835 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2837 return emit_rgctx_fetch (cfg, rgctx, entry);
2841 * emit_get_rgctx_method:
2843 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2844 * normal constants, else emit a load from the rgctx.
2847 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2848 MonoMethod *cmethod, int rgctx_type)
2850 if (!context_used) {
2853 switch (rgctx_type) {
2854 case MONO_RGCTX_INFO_METHOD:
2855 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
2857 case MONO_RGCTX_INFO_METHOD_RGCTX:
2858 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
2861 g_assert_not_reached ();
2864 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2865 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2867 return emit_rgctx_fetch (cfg, rgctx, entry);
2872 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2873 MonoClassField *field, int rgctx_type)
2875 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2876 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2878 return emit_rgctx_fetch (cfg, rgctx, entry);
2882 * On return the caller must check @klass for load errors.
2885 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
2887 MonoInst *vtable_arg;
2889 int context_used = 0;
2891 if (cfg->generic_sharing_context)
2892 context_used = mono_class_check_context_used (klass);
2895 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
2896 klass, MONO_RGCTX_INFO_VTABLE);
2898 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2902 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
2905 if (COMPILE_LLVM (cfg))
2906 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
2908 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
2909 #ifdef MONO_ARCH_VTABLE_REG
2910 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
2911 cfg->uses_vtable_reg = TRUE;
2918 * On return the caller must check @array_class for load errors
2921 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
2923 int vtable_reg = alloc_preg (cfg);
2924 int context_used = 0;
2926 if (cfg->generic_sharing_context)
2927 context_used = mono_class_check_context_used (array_class);
2929 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2931 if (cfg->opt & MONO_OPT_SHARED) {
2932 int class_reg = alloc_preg (cfg);
2933 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2934 if (cfg->compile_aot) {
2935 int klass_reg = alloc_preg (cfg);
2936 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
2937 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
2939 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
2941 } else if (context_used) {
2942 MonoInst *vtable_ins;
2944 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
2945 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
2947 if (cfg->compile_aot) {
2951 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2953 vt_reg = alloc_preg (cfg);
2954 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
2955 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
2958 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2960 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
2964 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
2968 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
2970 if (mini_get_debug_options ()->better_cast_details) {
2971 int to_klass_reg = alloc_preg (cfg);
2972 int vtable_reg = alloc_preg (cfg);
2973 int klass_reg = alloc_preg (cfg);
2974 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2977 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2981 MONO_ADD_INS (cfg->cbb, tls_get);
2982 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2983 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2985 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
2986 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
2987 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
2992 reset_cast_details (MonoCompile *cfg)
2994 /* Reset the variables holding the cast details */
2995 if (mini_get_debug_options ()->better_cast_details) {
2996 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2998 MONO_ADD_INS (cfg->cbb, tls_get);
2999 /* It is enough to reset the from field */
3000 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3005 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3006 * generic code is generated.
3009 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3011 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3014 MonoInst *rgctx, *addr;
3016 /* FIXME: What if the class is shared? We might not
3017 have to get the address of the method from the
3019 addr = emit_get_rgctx_method (cfg, context_used, method,
3020 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3022 rgctx = emit_get_rgctx (cfg, method, context_used);
3024 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3026 return mono_emit_method_call (cfg, method, &val, NULL);
3031 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3035 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3036 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3037 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3038 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3040 obj_reg = sp [0]->dreg;
3041 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3042 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3044 /* FIXME: generics */
3045 g_assert (klass->rank == 0);
3048 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3049 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3051 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3052 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3055 MonoInst *element_class;
3057 /* This assertion is from the unboxcast insn */
3058 g_assert (klass->rank == 0);
3060 element_class = emit_get_rgctx_klass (cfg, context_used,
3061 klass->element_class, MONO_RGCTX_INFO_KLASS);
3063 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3064 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3066 save_cast_details (cfg, klass->element_class, obj_reg);
3067 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3068 reset_cast_details (cfg);
3071 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
3072 MONO_ADD_INS (cfg->cbb, add);
3073 add->type = STACK_MP;
3080 * Returns NULL and set the cfg exception on error.
3083 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3085 MonoInst *iargs [2];
3091 MonoInst *iargs [2];
3094 FIXME: we cannot get managed_alloc here because we can't get
3095 the class's vtable (because it's not a closed class)
3097 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3098 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3101 if (cfg->opt & MONO_OPT_SHARED)
3102 rgctx_info = MONO_RGCTX_INFO_KLASS;
3104 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3105 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3107 if (cfg->opt & MONO_OPT_SHARED) {
3108 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3110 alloc_ftn = mono_object_new;
3113 alloc_ftn = mono_object_new_specific;
3116 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3119 if (cfg->opt & MONO_OPT_SHARED) {
3120 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3121 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3123 alloc_ftn = mono_object_new;
3124 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3125 /* This happens often in argument checking code, eg. throw new FooException... */
3126 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3127 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3128 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3130 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3131 MonoMethod *managed_alloc = NULL;
3135 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3136 cfg->exception_ptr = klass;
3140 #ifndef MONO_CROSS_COMPILE
3141 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3144 if (managed_alloc) {
3145 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3146 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3148 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3150 guint32 lw = vtable->klass->instance_size;
3151 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3152 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3153 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3156 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3160 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3164 * Returns NULL and set the cfg exception on error.
3167 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
3169 MonoInst *alloc, *ins;
3171 if (mono_class_is_nullable (klass)) {
3172 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3175 /* FIXME: What if the class is shared? We might not
3176 have to get the method address from the RGCTX. */
3177 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3178 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3179 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3181 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3183 return mono_emit_method_call (cfg, method, &val, NULL);
3187 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3191 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3196 // FIXME: This doesn't work yet (class libs tests fail?)
3197 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || mono_class_has_variant_generic_params (klass) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3200 * Returns NULL and set the cfg exception on error.
3203 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3205 MonoBasicBlock *is_null_bb;
3206 int obj_reg = src->dreg;
3207 int vtable_reg = alloc_preg (cfg);
3208 MonoInst *klass_inst = NULL;
3213 klass_inst = emit_get_rgctx_klass (cfg, context_used,
3214 klass, MONO_RGCTX_INFO_KLASS);
3216 if (is_complex_isinst (klass)) {
3217 /* Complex case, handle by an icall */
3223 args [1] = klass_inst;
3225 return mono_emit_jit_icall (cfg, mono_object_castclass, args);
3227 /* Simple case, handled by the code below */
3231 NEW_BBLOCK (cfg, is_null_bb);
3233 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3234 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3236 save_cast_details (cfg, klass, obj_reg);
3238 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3239 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3240 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3242 int klass_reg = alloc_preg (cfg);
3244 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3246 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3247 /* the remoting code is broken, access the class for now */
3248 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3249 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3251 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3252 cfg->exception_ptr = klass;
3255 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3257 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3258 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3260 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3262 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3263 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3267 MONO_START_BB (cfg, is_null_bb);
3269 reset_cast_details (cfg);
3275 * Returns NULL and set the cfg exception on error.
3278 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3281 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3282 int obj_reg = src->dreg;
3283 int vtable_reg = alloc_preg (cfg);
3284 int res_reg = alloc_preg (cfg);
3285 MonoInst *klass_inst = NULL;
3288 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3290 if (is_complex_isinst (klass)) {
3293 /* Complex case, handle by an icall */
3299 args [1] = klass_inst;
3301 return mono_emit_jit_icall (cfg, mono_object_isinst, args);
3303 /* Simple case, the code below can handle it */
3307 NEW_BBLOCK (cfg, is_null_bb);
3308 NEW_BBLOCK (cfg, false_bb);
3309 NEW_BBLOCK (cfg, end_bb);
3311 /* Do the assignment at the beginning, so the other assignment can be if converted */
3312 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3313 ins->type = STACK_OBJ;
3316 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3317 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3319 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3321 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3322 g_assert (!context_used);
3323 /* the is_null_bb target simply copies the input register to the output */
3324 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3326 int klass_reg = alloc_preg (cfg);
3329 int rank_reg = alloc_preg (cfg);
3330 int eclass_reg = alloc_preg (cfg);
3332 g_assert (!context_used);
3333 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3334 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3335 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3336 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3337 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3338 if (klass->cast_class == mono_defaults.object_class) {
3339 int parent_reg = alloc_preg (cfg);
3340 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3341 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3342 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3343 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3344 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3345 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3346 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3347 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3348 } else if (klass->cast_class == mono_defaults.enum_class) {
3349 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3350 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3351 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3352 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3354 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3355 /* Check that the object is a vector too */
3356 int bounds_reg = alloc_preg (cfg);
3357 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3358 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3359 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3362 /* the is_null_bb target simply copies the input register to the output */
3363 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3365 } else if (mono_class_is_nullable (klass)) {
3366 g_assert (!context_used);
3367 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3368 /* the is_null_bb target simply copies the input register to the output */
3369 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3371 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3372 g_assert (!context_used);
3373 /* the remoting code is broken, access the class for now */
3374 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3375 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3377 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3378 cfg->exception_ptr = klass;
3381 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3383 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3384 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3386 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3387 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3389 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3390 /* the is_null_bb target simply copies the input register to the output */
3391 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
3396 MONO_START_BB (cfg, false_bb);
3398 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3399 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3401 MONO_START_BB (cfg, is_null_bb);
3403 MONO_START_BB (cfg, end_bb);
3409 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3411 /* This opcode takes as input an object reference and a class, and returns:
3412 0) if the object is an instance of the class,
3413 1) if the object is not instance of the class,
3414 2) if the object is a proxy whose type cannot be determined */
3417 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3418 int obj_reg = src->dreg;
3419 int dreg = alloc_ireg (cfg);
3421 int klass_reg = alloc_preg (cfg);
3423 NEW_BBLOCK (cfg, true_bb);
3424 NEW_BBLOCK (cfg, false_bb);
3425 NEW_BBLOCK (cfg, false2_bb);
3426 NEW_BBLOCK (cfg, end_bb);
3427 NEW_BBLOCK (cfg, no_proxy_bb);
3429 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3430 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3432 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3433 NEW_BBLOCK (cfg, interface_fail_bb);
3435 tmp_reg = alloc_preg (cfg);
3436 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3437 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3438 MONO_START_BB (cfg, interface_fail_bb);
3439 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3441 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3443 tmp_reg = alloc_preg (cfg);
3444 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3445 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3446 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3448 tmp_reg = alloc_preg (cfg);
3449 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3450 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3452 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3453 tmp_reg = alloc_preg (cfg);
3454 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3455 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3457 tmp_reg = alloc_preg (cfg);
3458 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3459 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3460 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3462 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3463 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3465 MONO_START_BB (cfg, no_proxy_bb);
3467 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3470 MONO_START_BB (cfg, false_bb);
3472 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3473 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3475 MONO_START_BB (cfg, false2_bb);
3477 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3478 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3480 MONO_START_BB (cfg, true_bb);
3482 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3484 MONO_START_BB (cfg, end_bb);
3487 MONO_INST_NEW (cfg, ins, OP_ICONST);
3489 ins->type = STACK_I4;
3495 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3497 /* This opcode takes as input an object reference and a class, and returns:
3498 0) if the object is an instance of the class,
3499 1) if the object is a proxy whose type cannot be determined
3500 an InvalidCastException exception is thrown otherwhise*/
3503 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3504 int obj_reg = src->dreg;
3505 int dreg = alloc_ireg (cfg);
3506 int tmp_reg = alloc_preg (cfg);
3507 int klass_reg = alloc_preg (cfg);
3509 NEW_BBLOCK (cfg, end_bb);
3510 NEW_BBLOCK (cfg, ok_result_bb);
3512 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3513 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3515 save_cast_details (cfg, klass, obj_reg);
3517 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3518 NEW_BBLOCK (cfg, interface_fail_bb);
3520 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3521 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3522 MONO_START_BB (cfg, interface_fail_bb);
3523 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3525 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3527 tmp_reg = alloc_preg (cfg);
3528 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3529 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3530 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3532 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3533 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3536 NEW_BBLOCK (cfg, no_proxy_bb);
3538 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3539 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3540 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3542 tmp_reg = alloc_preg (cfg);
3543 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3544 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3546 tmp_reg = alloc_preg (cfg);
3547 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3548 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3549 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3551 NEW_BBLOCK (cfg, fail_1_bb);
3553 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3555 MONO_START_BB (cfg, fail_1_bb);
3557 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3558 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3560 MONO_START_BB (cfg, no_proxy_bb);
3562 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3565 MONO_START_BB (cfg, ok_result_bb);
3567 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3569 MONO_START_BB (cfg, end_bb);
3572 MONO_INST_NEW (cfg, ins, OP_ICONST);
3574 ins->type = STACK_I4;
3580 * Returns NULL and set the cfg exception on error.
3582 static G_GNUC_UNUSED MonoInst*
3583 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3585 gpointer *trampoline;
3586 MonoInst *obj, *method_ins, *tramp_ins;
3590 obj = handle_alloc (cfg, klass, FALSE, 0);
3594 /* Inline the contents of mono_delegate_ctor */
3596 /* Set target field */
3597 /* Optimize away setting of NULL target */
3598 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3599 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3601 /* Set method field */
3602 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3603 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3606 * To avoid looking up the compiled code belonging to the target method
3607 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3608 * store it, and we fill it after the method has been compiled.
3610 if (!cfg->compile_aot && !method->dynamic) {
3611 MonoInst *code_slot_ins;
3614 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3616 domain = mono_domain_get ();
3617 mono_domain_lock (domain);
3618 if (!domain_jit_info (domain)->method_code_hash)
3619 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3620 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3622 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3623 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3625 mono_domain_unlock (domain);
3627 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3629 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3632 /* Set invoke_impl field */
3633 if (cfg->compile_aot) {
3634 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3636 trampoline = mono_create_delegate_trampoline (klass);
3637 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3639 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3641 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3647 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3649 MonoJitICallInfo *info;
3651 /* Need to register the icall so it gets an icall wrapper */
3652 info = mono_get_array_new_va_icall (rank);
3654 cfg->flags |= MONO_CFG_HAS_VARARGS;
3656 /* mono_array_new_va () needs a vararg calling convention */
3657 cfg->disable_llvm = TRUE;
3659 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3660 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3664 mono_emit_load_got_addr (MonoCompile *cfg)
3666 MonoInst *getaddr, *dummy_use;
3668 if (!cfg->got_var || cfg->got_var_allocated)
3671 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3672 getaddr->dreg = cfg->got_var->dreg;
3674 /* Add it to the start of the first bblock */
3675 if (cfg->bb_entry->code) {
3676 getaddr->next = cfg->bb_entry->code;
3677 cfg->bb_entry->code = getaddr;
3680 MONO_ADD_INS (cfg->bb_entry, getaddr);
3682 cfg->got_var_allocated = TRUE;
3685 * Add a dummy use to keep the got_var alive, since real uses might
3686 * only be generated by the back ends.
3687 * Add it to end_bblock, so the variable's lifetime covers the whole
3689 * It would be better to make the usage of the got var explicit in all
3690 * cases when the backend needs it (i.e. calls, throw etc.), so this
3691 * wouldn't be needed.
3693 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3694 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3697 static int inline_limit;
3698 static gboolean inline_limit_inited;
3701 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3703 MonoMethodHeaderSummary header;
3705 #ifdef MONO_ARCH_SOFT_FLOAT
3706 MonoMethodSignature *sig = mono_method_signature (method);
3710 if (cfg->generic_sharing_context)
3713 if (cfg->inline_depth > 10)
3716 #ifdef MONO_ARCH_HAVE_LMF_OPS
3717 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3718 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3719 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3724 if (!mono_method_get_header_summary (method, &header))
3727 /*runtime, icall and pinvoke are checked by summary call*/
3728 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3729 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3730 (method->klass->marshalbyref) ||
3734 /* also consider num_locals? */
3735 /* Do the size check early to avoid creating vtables */
3736 if (!inline_limit_inited) {
3737 if (getenv ("MONO_INLINELIMIT"))
3738 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3740 inline_limit = INLINE_LENGTH_LIMIT;
3741 inline_limit_inited = TRUE;
3743 if (header.code_size >= inline_limit)
3747 * if we can initialize the class of the method right away, we do,
3748 * otherwise we don't allow inlining if the class needs initialization,
3749 * since it would mean inserting a call to mono_runtime_class_init()
3750 * inside the inlined code
3752 if (!(cfg->opt & MONO_OPT_SHARED)) {
3753 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3754 if (cfg->run_cctors && method->klass->has_cctor) {
3755 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3756 if (!method->klass->runtime_info)
3757 /* No vtable created yet */
3759 vtable = mono_class_vtable (cfg->domain, method->klass);
3762 /* This makes so that inline cannot trigger */
3763 /* .cctors: too many apps depend on them */
3764 /* running with a specific order... */
3765 if (! vtable->initialized)
3767 mono_runtime_class_init (vtable);
3769 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3770 if (!method->klass->runtime_info)
3771 /* No vtable created yet */
3773 vtable = mono_class_vtable (cfg->domain, method->klass);
3776 if (!vtable->initialized)
3781 * If we're compiling for shared code
3782 * the cctor will need to be run at aot method load time, for example,
3783 * or at the end of the compilation of the inlining method.
3785 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3790 * CAS - do not inline methods with declarative security
3791 * Note: this has to be before any possible return TRUE;
3793 if (mono_method_has_declsec (method))
3796 #ifdef MONO_ARCH_SOFT_FLOAT
3798 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3800 for (i = 0; i < sig->param_count; ++i)
3801 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3809 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3811 if (vtable->initialized && !cfg->compile_aot)
3814 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3817 if (!mono_class_needs_cctor_run (vtable->klass, method))
3820 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3821 /* The initialization is already done before the method is called */
3828 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
3832 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3834 mono_class_init (klass);
3835 size = mono_class_array_element_size (klass);
3837 mult_reg = alloc_preg (cfg);
3838 array_reg = arr->dreg;
3839 index_reg = index->dreg;
3841 #if SIZEOF_REGISTER == 8
3842 /* The array reg is 64 bits but the index reg is only 32 */
3843 if (COMPILE_LLVM (cfg)) {
3845 index2_reg = index_reg;
3847 index2_reg = alloc_preg (cfg);
3848 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3851 if (index->type == STACK_I8) {
3852 index2_reg = alloc_preg (cfg);
3853 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
3855 index2_reg = index_reg;
3860 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3862 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3863 if (size == 1 || size == 2 || size == 4 || size == 8) {
3864 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3866 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3867 ins->type = STACK_PTR;
3873 add_reg = alloc_preg (cfg);
3875 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3876 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3877 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3878 ins->type = STACK_PTR;
3879 MONO_ADD_INS (cfg->cbb, ins);
3884 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3886 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3888 int bounds_reg = alloc_preg (cfg);
3889 int add_reg = alloc_preg (cfg);
3890 int mult_reg = alloc_preg (cfg);
3891 int mult2_reg = alloc_preg (cfg);
3892 int low1_reg = alloc_preg (cfg);
3893 int low2_reg = alloc_preg (cfg);
3894 int high1_reg = alloc_preg (cfg);
3895 int high2_reg = alloc_preg (cfg);
3896 int realidx1_reg = alloc_preg (cfg);
3897 int realidx2_reg = alloc_preg (cfg);
3898 int sum_reg = alloc_preg (cfg);
3903 mono_class_init (klass);
3904 size = mono_class_array_element_size (klass);
3906 index1 = index_ins1->dreg;
3907 index2 = index_ins2->dreg;
3909 /* range checking */
3910 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3911 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3913 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3914 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3915 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3916 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3917 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3918 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3919 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3921 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3922 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3923 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3924 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3925 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3926 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3927 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3929 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3930 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3931 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3932 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3933 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3935 ins->type = STACK_MP;
3937 MONO_ADD_INS (cfg->cbb, ins);
3944 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3948 MonoMethod *addr_method;
3951 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3954 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
3956 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3957 /* emit_ldelema_2 depends on OP_LMUL */
3958 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3959 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3963 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3964 addr_method = mono_marshal_get_array_address (rank, element_size);
3965 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
3970 static MonoBreakPolicy
3971 always_insert_breakpoint (MonoMethod *method)
3973 return MONO_BREAK_POLICY_ALWAYS;
3976 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
3979 * mono_set_break_policy:
3980 * policy_callback: the new callback function
3982 * Allow embedders to decide wherther to actually obey breakpoint instructions
3983 * (both break IL instructions and Debugger.Break () method calls), for example
3984 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
3985 * untrusted or semi-trusted code.
3987 * @policy_callback will be called every time a break point instruction needs to
3988 * be inserted with the method argument being the method that calls Debugger.Break()
3989 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
3990 * if it wants the breakpoint to not be effective in the given method.
3991 * #MONO_BREAK_POLICY_ALWAYS is the default.
3994 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
3996 if (policy_callback)
3997 break_policy_func = policy_callback;
3999 break_policy_func = always_insert_breakpoint;
4003 should_insert_brekpoint (MonoMethod *method) {
4004 switch (break_policy_func (method)) {
4005 case MONO_BREAK_POLICY_ALWAYS:
4007 case MONO_BREAK_POLICY_NEVER:
4009 case MONO_BREAK_POLICY_ON_DBG:
4010 return mono_debug_using_mono_debugger ();
4012 g_warning ("Incorrect value returned from break policy callback");
4017 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4019 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4021 MonoInst *addr, *store, *load;
4022 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4024 /* the bounds check is already done by the callers */
4025 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4027 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4028 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4030 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4031 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4037 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4039 MonoInst *ins = NULL;
4040 #ifdef MONO_ARCH_SIMD_INTRINSICS
4041 if (cfg->opt & MONO_OPT_SIMD) {
4042 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4052 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4054 MonoInst *ins = NULL;
4056 static MonoClass *runtime_helpers_class = NULL;
4057 if (! runtime_helpers_class)
4058 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
4059 "System.Runtime.CompilerServices", "RuntimeHelpers");
4061 if (cmethod->klass == mono_defaults.string_class) {
4062 if (strcmp (cmethod->name, "get_Chars") == 0) {
4063 int dreg = alloc_ireg (cfg);
4064 int index_reg = alloc_preg (cfg);
4065 int mult_reg = alloc_preg (cfg);
4066 int add_reg = alloc_preg (cfg);
4068 #if SIZEOF_REGISTER == 8
4069 /* The array reg is 64 bits but the index reg is only 32 */
4070 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
4072 index_reg = args [1]->dreg;
4074 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
4076 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4077 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
4078 add_reg = ins->dreg;
4079 /* Avoid a warning */
4081 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4084 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
4085 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4086 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4087 add_reg, G_STRUCT_OFFSET (MonoString, chars));
4089 type_from_op (ins, NULL, NULL);
4091 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4092 int dreg = alloc_ireg (cfg);
4093 /* Decompose later to allow more optimizations */
4094 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
4095 ins->type = STACK_I4;
4096 cfg->cbb->has_array_access = TRUE;
4097 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
4100 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
4101 int mult_reg = alloc_preg (cfg);
4102 int add_reg = alloc_preg (cfg);
4104 /* The corlib functions check for oob already. */
4105 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
4106 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4107 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
4108 return cfg->cbb->last_ins;
4111 } else if (cmethod->klass == mono_defaults.object_class) {
4113 if (strcmp (cmethod->name, "GetType") == 0) {
4114 int dreg = alloc_preg (cfg);
4115 int vt_reg = alloc_preg (cfg);
4116 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4117 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
4118 type_from_op (ins, NULL, NULL);
4121 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
4122 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0) {
4123 int dreg = alloc_ireg (cfg);
4124 int t1 = alloc_ireg (cfg);
4126 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
4127 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
4128 ins->type = STACK_I4;
4132 } else if (strcmp (cmethod->name, ".ctor") == 0) {
4133 MONO_INST_NEW (cfg, ins, OP_NOP);
4134 MONO_ADD_INS (cfg->cbb, ins);
4138 } else if (cmethod->klass == mono_defaults.array_class) {
4139 if (strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
4140 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
4141 if (cmethod->name [0] != 'g')
4144 if (strcmp (cmethod->name, "get_Rank") == 0) {
4145 int dreg = alloc_ireg (cfg);
4146 int vtable_reg = alloc_preg (cfg);
4147 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
4148 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4149 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
4150 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4151 type_from_op (ins, NULL, NULL);
4154 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4155 int dreg = alloc_ireg (cfg);
4157 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4158 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4159 type_from_op (ins, NULL, NULL);
4164 } else if (cmethod->klass == runtime_helpers_class) {
4166 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
4167 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
4171 } else if (cmethod->klass == mono_defaults.thread_class) {
4172 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
4173 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
4174 MONO_ADD_INS (cfg->cbb, ins);
4176 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
4177 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4178 MONO_ADD_INS (cfg->cbb, ins);
4181 } else if (cmethod->klass == mono_defaults.monitor_class) {
4182 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4183 if (strcmp (cmethod->name, "Enter") == 0) {
4186 if (COMPILE_LLVM (cfg)) {
4188 * Pass the argument normally, the LLVM backend will handle the
4189 * calling convention problems.
4191 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4193 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
4194 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4195 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4196 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4199 return (MonoInst*)call;
4200 } else if (strcmp (cmethod->name, "Exit") == 0) {
4203 if (COMPILE_LLVM (cfg)) {
4204 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4206 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
4207 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4208 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4209 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4212 return (MonoInst*)call;
4214 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4215 MonoMethod *fast_method = NULL;
4217 /* Avoid infinite recursion */
4218 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
4219 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
4220 strcmp (cfg->method->name, "FastMonitorExit") == 0))
4223 if (strcmp (cmethod->name, "Enter") == 0 ||
4224 strcmp (cmethod->name, "Exit") == 0)
4225 fast_method = mono_monitor_get_fast_path (cmethod);
4229 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4231 } else if (cmethod->klass->image == mono_defaults.corlib &&
4232 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4233 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4236 #if SIZEOF_REGISTER == 8
4237 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
4238 /* 64 bit reads are already atomic */
4239 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
4240 ins->dreg = mono_alloc_preg (cfg);
4241 ins->inst_basereg = args [0]->dreg;
4242 ins->inst_offset = 0;
4243 MONO_ADD_INS (cfg->cbb, ins);
4247 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4248 if (strcmp (cmethod->name, "Increment") == 0) {
4249 MonoInst *ins_iconst;
4252 if (fsig->params [0]->type == MONO_TYPE_I4)
4253 opcode = OP_ATOMIC_ADD_NEW_I4;
4254 #if SIZEOF_REGISTER == 8
4255 else if (fsig->params [0]->type == MONO_TYPE_I8)
4256 opcode = OP_ATOMIC_ADD_NEW_I8;
4259 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4260 ins_iconst->inst_c0 = 1;
4261 ins_iconst->dreg = mono_alloc_ireg (cfg);
4262 MONO_ADD_INS (cfg->cbb, ins_iconst);
4264 MONO_INST_NEW (cfg, ins, opcode);
4265 ins->dreg = mono_alloc_ireg (cfg);
4266 ins->inst_basereg = args [0]->dreg;
4267 ins->inst_offset = 0;
4268 ins->sreg2 = ins_iconst->dreg;
4269 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4270 MONO_ADD_INS (cfg->cbb, ins);
4272 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4273 MonoInst *ins_iconst;
4276 if (fsig->params [0]->type == MONO_TYPE_I4)
4277 opcode = OP_ATOMIC_ADD_NEW_I4;
4278 #if SIZEOF_REGISTER == 8
4279 else if (fsig->params [0]->type == MONO_TYPE_I8)
4280 opcode = OP_ATOMIC_ADD_NEW_I8;
4283 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4284 ins_iconst->inst_c0 = -1;
4285 ins_iconst->dreg = mono_alloc_ireg (cfg);
4286 MONO_ADD_INS (cfg->cbb, ins_iconst);
4288 MONO_INST_NEW (cfg, ins, opcode);
4289 ins->dreg = mono_alloc_ireg (cfg);
4290 ins->inst_basereg = args [0]->dreg;
4291 ins->inst_offset = 0;
4292 ins->sreg2 = ins_iconst->dreg;
4293 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4294 MONO_ADD_INS (cfg->cbb, ins);
4296 } else if (strcmp (cmethod->name, "Add") == 0) {
4299 if (fsig->params [0]->type == MONO_TYPE_I4)
4300 opcode = OP_ATOMIC_ADD_NEW_I4;
4301 #if SIZEOF_REGISTER == 8
4302 else if (fsig->params [0]->type == MONO_TYPE_I8)
4303 opcode = OP_ATOMIC_ADD_NEW_I8;
4307 MONO_INST_NEW (cfg, ins, opcode);
4308 ins->dreg = mono_alloc_ireg (cfg);
4309 ins->inst_basereg = args [0]->dreg;
4310 ins->inst_offset = 0;
4311 ins->sreg2 = args [1]->dreg;
4312 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4313 MONO_ADD_INS (cfg->cbb, ins);
4316 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4318 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4319 if (strcmp (cmethod->name, "Exchange") == 0) {
4321 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4323 if (fsig->params [0]->type == MONO_TYPE_I4)
4324 opcode = OP_ATOMIC_EXCHANGE_I4;
4325 #if SIZEOF_REGISTER == 8
4326 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4327 (fsig->params [0]->type == MONO_TYPE_I))
4328 opcode = OP_ATOMIC_EXCHANGE_I8;
4330 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4331 opcode = OP_ATOMIC_EXCHANGE_I4;
4336 MONO_INST_NEW (cfg, ins, opcode);
4337 ins->dreg = mono_alloc_ireg (cfg);
4338 ins->inst_basereg = args [0]->dreg;
4339 ins->inst_offset = 0;
4340 ins->sreg2 = args [1]->dreg;
4341 MONO_ADD_INS (cfg->cbb, ins);
4343 switch (fsig->params [0]->type) {
4345 ins->type = STACK_I4;
4349 ins->type = STACK_I8;
4351 case MONO_TYPE_OBJECT:
4352 ins->type = STACK_OBJ;
4355 g_assert_not_reached ();
4358 #if HAVE_WRITE_BARRIERS
4360 MonoInst *dummy_use;
4361 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4362 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4363 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [1]);
4367 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4369 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4370 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4372 gboolean is_ref = MONO_TYPE_IS_REFERENCE (fsig->params [1]);
4373 if (fsig->params [1]->type == MONO_TYPE_I4)
4375 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4376 size = sizeof (gpointer);
4377 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
4380 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4381 ins->dreg = alloc_ireg (cfg);
4382 ins->sreg1 = args [0]->dreg;
4383 ins->sreg2 = args [1]->dreg;
4384 ins->sreg3 = args [2]->dreg;
4385 ins->type = STACK_I4;
4386 MONO_ADD_INS (cfg->cbb, ins);
4387 } else if (size == 8) {
4388 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4389 ins->dreg = alloc_ireg (cfg);
4390 ins->sreg1 = args [0]->dreg;
4391 ins->sreg2 = args [1]->dreg;
4392 ins->sreg3 = args [2]->dreg;
4393 ins->type = STACK_I8;
4394 MONO_ADD_INS (cfg->cbb, ins);
4396 /* g_assert_not_reached (); */
4398 #if HAVE_WRITE_BARRIERS
4400 MonoInst *dummy_use;
4401 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4402 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4403 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [1]);
4407 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4411 } else if (cmethod->klass->image == mono_defaults.corlib) {
4412 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4413 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4414 if (should_insert_brekpoint (cfg->method))
4415 MONO_INST_NEW (cfg, ins, OP_BREAK);
4417 MONO_INST_NEW (cfg, ins, OP_NOP);
4418 MONO_ADD_INS (cfg->cbb, ins);
4421 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4422 && strcmp (cmethod->klass->name, "Environment") == 0) {
4424 EMIT_NEW_ICONST (cfg, ins, 1);
4426 EMIT_NEW_ICONST (cfg, ins, 0);
4430 } else if (cmethod->klass == mono_defaults.math_class) {
4432 * There is general branches code for Min/Max, but it does not work for
4434 * http://everything2.com/?node_id=1051618
4438 #ifdef MONO_ARCH_SIMD_INTRINSICS
4439 if (cfg->opt & MONO_OPT_SIMD) {
4440 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4446 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4450 * This entry point could be used later for arbitrary method
4453 inline static MonoInst*
4454 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4455 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4457 if (method->klass == mono_defaults.string_class) {
4458 /* managed string allocation support */
4459 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS)) {
4460 MonoInst *iargs [2];
4461 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4462 MonoMethod *managed_alloc = NULL;
4464 g_assert (vtable); /*Should not fail since it System.String*/
4465 #ifndef MONO_CROSS_COMPILE
4466 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4470 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4471 iargs [1] = args [0];
4472 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4479 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4481 MonoInst *store, *temp;
4484 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4485 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4488 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4489 * would be different than the MonoInst's used to represent arguments, and
4490 * the ldelema implementation can't deal with that.
4491 * Solution: When ldelema is used on an inline argument, create a var for
4492 * it, emit ldelema on that var, and emit the saving code below in
4493 * inline_method () if needed.
4495 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4496 cfg->args [i] = temp;
4497 /* This uses cfg->args [i] which is set by the preceeding line */
4498 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4499 store->cil_code = sp [0]->cil_code;
4504 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4505 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4507 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4509 check_inline_called_method_name_limit (MonoMethod *called_method)
4512 static char *limit = NULL;
4514 if (limit == NULL) {
4515 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4517 if (limit_string != NULL)
4518 limit = limit_string;
4520 limit = (char *) "";
4523 if (limit [0] != '\0') {
4524 char *called_method_name = mono_method_full_name (called_method, TRUE);
4526 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4527 g_free (called_method_name);
4529 //return (strncmp_result <= 0);
4530 return (strncmp_result == 0);
4537 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4539 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4542 static char *limit = NULL;
4544 if (limit == NULL) {
4545 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4546 if (limit_string != NULL) {
4547 limit = limit_string;
4549 limit = (char *) "";
4553 if (limit [0] != '\0') {
4554 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4556 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4557 g_free (caller_method_name);
4559 //return (strncmp_result <= 0);
4560 return (strncmp_result == 0);
4568 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4569 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4571 MonoInst *ins, *rvar = NULL;
4572 MonoMethodHeader *cheader;
4573 MonoBasicBlock *ebblock, *sbblock;
4575 MonoMethod *prev_inlined_method;
4576 MonoInst **prev_locals, **prev_args;
4577 MonoType **prev_arg_types;
4578 guint prev_real_offset;
4579 GHashTable *prev_cbb_hash;
4580 MonoBasicBlock **prev_cil_offset_to_bb;
4581 MonoBasicBlock *prev_cbb;
4582 unsigned char* prev_cil_start;
4583 guint32 prev_cil_offset_to_bb_len;
4584 MonoMethod *prev_current_method;
4585 MonoGenericContext *prev_generic_context;
4586 gboolean ret_var_set, prev_ret_var_set;
4588 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4590 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4591 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4594 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4595 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4599 if (cfg->verbose_level > 2)
4600 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4602 if (!cmethod->inline_info) {
4603 mono_jit_stats.inlineable_methods++;
4604 cmethod->inline_info = 1;
4607 /* allocate local variables */
4608 cheader = mono_method_get_header (cmethod);
4610 if (cheader == NULL || mono_loader_get_last_error ()) {
4612 mono_metadata_free_mh (cheader);
4613 mono_loader_clear_error ();
4617 /* allocate space to store the return value */
4618 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4619 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4623 prev_locals = cfg->locals;
4624 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4625 for (i = 0; i < cheader->num_locals; ++i)
4626 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4628 /* allocate start and end blocks */
4629 /* This is needed so if the inline is aborted, we can clean up */
4630 NEW_BBLOCK (cfg, sbblock);
4631 sbblock->real_offset = real_offset;
4633 NEW_BBLOCK (cfg, ebblock);
4634 ebblock->block_num = cfg->num_bblocks++;
4635 ebblock->real_offset = real_offset;
4637 prev_args = cfg->args;
4638 prev_arg_types = cfg->arg_types;
4639 prev_inlined_method = cfg->inlined_method;
4640 cfg->inlined_method = cmethod;
4641 cfg->ret_var_set = FALSE;
4642 cfg->inline_depth ++;
4643 prev_real_offset = cfg->real_offset;
4644 prev_cbb_hash = cfg->cbb_hash;
4645 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4646 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4647 prev_cil_start = cfg->cil_start;
4648 prev_cbb = cfg->cbb;
4649 prev_current_method = cfg->current_method;
4650 prev_generic_context = cfg->generic_context;
4651 prev_ret_var_set = cfg->ret_var_set;
4653 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4655 ret_var_set = cfg->ret_var_set;
4657 cfg->inlined_method = prev_inlined_method;
4658 cfg->real_offset = prev_real_offset;
4659 cfg->cbb_hash = prev_cbb_hash;
4660 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4661 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4662 cfg->cil_start = prev_cil_start;
4663 cfg->locals = prev_locals;
4664 cfg->args = prev_args;
4665 cfg->arg_types = prev_arg_types;
4666 cfg->current_method = prev_current_method;
4667 cfg->generic_context = prev_generic_context;
4668 cfg->ret_var_set = prev_ret_var_set;
4669 cfg->inline_depth --;
4671 if ((costs >= 0 && costs < 60) || inline_allways) {
4672 if (cfg->verbose_level > 2)
4673 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4675 mono_jit_stats.inlined_methods++;
4677 /* always add some code to avoid block split failures */
4678 MONO_INST_NEW (cfg, ins, OP_NOP);
4679 MONO_ADD_INS (prev_cbb, ins);
4681 prev_cbb->next_bb = sbblock;
4682 link_bblock (cfg, prev_cbb, sbblock);
4685 * Get rid of the begin and end bblocks if possible to aid local
4688 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4690 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4691 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4693 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4694 MonoBasicBlock *prev = ebblock->in_bb [0];
4695 mono_merge_basic_blocks (cfg, prev, ebblock);
4697 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4698 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4699 cfg->cbb = prev_cbb;
4707 * If the inlined method contains only a throw, then the ret var is not
4708 * set, so set it to a dummy value.
4711 static double r8_0 = 0.0;
4713 switch (rvar->type) {
4715 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4718 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4723 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4726 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4727 ins->type = STACK_R8;
4728 ins->inst_p0 = (void*)&r8_0;
4729 ins->dreg = rvar->dreg;
4730 MONO_ADD_INS (cfg->cbb, ins);
4733 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4736 g_assert_not_reached ();
4740 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4743 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4746 if (cfg->verbose_level > 2)
4747 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4748 cfg->exception_type = MONO_EXCEPTION_NONE;
4749 mono_loader_clear_error ();
4751 /* This gets rid of the newly added bblocks */
4752 cfg->cbb = prev_cbb;
4754 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4759 * Some of these comments may well be out-of-date.
4760 * Design decisions: we do a single pass over the IL code (and we do bblock
4761 * splitting/merging in the few cases when it's required: a back jump to an IL
4762 * address that was not already seen as bblock starting point).
4763 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4764 * Complex operations are decomposed in simpler ones right away. We need to let the
4765 * arch-specific code peek and poke inside this process somehow (except when the
4766 * optimizations can take advantage of the full semantic info of coarse opcodes).
4767 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4768 * MonoInst->opcode initially is the IL opcode or some simplification of that
4769 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4770 * opcode with value bigger than OP_LAST.
4771 * At this point the IR can be handed over to an interpreter, a dumb code generator
4772 * or to the optimizing code generator that will translate it to SSA form.
4774 * Profiling directed optimizations.
4775 * We may compile by default with few or no optimizations and instrument the code
4776 * or the user may indicate what methods to optimize the most either in a config file
4777 * or through repeated runs where the compiler applies offline the optimizations to
4778 * each method and then decides if it was worth it.
4781 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4782 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4783 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4784 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4785 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4786 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4787 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4788 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4790 /* offset from br.s -> br like opcodes */
4791 #define BIG_BRANCH_OFFSET 13
4794 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4796 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4798 return b == NULL || b == bb;
4802 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4804 unsigned char *ip = start;
4805 unsigned char *target;
4808 MonoBasicBlock *bblock;
4809 const MonoOpcode *opcode;
4812 cli_addr = ip - start;
4813 i = mono_opcode_value ((const guint8 **)&ip, end);
4816 opcode = &mono_opcodes [i];
4817 switch (opcode->argument) {
4818 case MonoInlineNone:
4821 case MonoInlineString:
4822 case MonoInlineType:
4823 case MonoInlineField:
4824 case MonoInlineMethod:
4827 case MonoShortInlineR:
4834 case MonoShortInlineVar:
4835 case MonoShortInlineI:
4838 case MonoShortInlineBrTarget:
4839 target = start + cli_addr + 2 + (signed char)ip [1];
4840 GET_BBLOCK (cfg, bblock, target);
4843 GET_BBLOCK (cfg, bblock, ip);
4845 case MonoInlineBrTarget:
4846 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4847 GET_BBLOCK (cfg, bblock, target);
4850 GET_BBLOCK (cfg, bblock, ip);
4852 case MonoInlineSwitch: {
4853 guint32 n = read32 (ip + 1);
4856 cli_addr += 5 + 4 * n;
4857 target = start + cli_addr;
4858 GET_BBLOCK (cfg, bblock, target);
4860 for (j = 0; j < n; ++j) {
4861 target = start + cli_addr + (gint32)read32 (ip);
4862 GET_BBLOCK (cfg, bblock, target);
4872 g_assert_not_reached ();
4875 if (i == CEE_THROW) {
4876 unsigned char *bb_start = ip - 1;
4878 /* Find the start of the bblock containing the throw */
4880 while ((bb_start >= start) && !bblock) {
4881 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4885 bblock->out_of_line = 1;
4894 static inline MonoMethod *
4895 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4899 if (m->wrapper_type != MONO_WRAPPER_NONE)
4900 return mono_method_get_wrapper_data (m, token);
4902 method = mono_get_method_full (m->klass->image, token, klass, context);
4907 static inline MonoMethod *
4908 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4910 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4912 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4918 static inline MonoClass*
4919 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4923 if (method->wrapper_type != MONO_WRAPPER_NONE)
4924 klass = mono_method_get_wrapper_data (method, token);
4926 klass = mono_class_get_full (method->klass->image, token, context);
4928 mono_class_init (klass);
4933 * Returns TRUE if the JIT should abort inlining because "callee"
4934 * is influenced by security attributes.
4937 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4941 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
4945 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
4946 if (result == MONO_JIT_SECURITY_OK)
4949 if (result == MONO_JIT_LINKDEMAND_ECMA) {
4950 /* Generate code to throw a SecurityException before the actual call/link */
4951 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4954 NEW_ICONST (cfg, args [0], 4);
4955 NEW_METHODCONST (cfg, args [1], caller);
4956 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
4957 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
4958 /* don't hide previous results */
4959 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
4960 cfg->exception_data = result;
4968 throw_exception (void)
4970 static MonoMethod *method = NULL;
4973 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4974 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
4981 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
4983 MonoMethod *thrower = throw_exception ();
4986 EMIT_NEW_PCONST (cfg, args [0], ex);
4987 mono_emit_method_call (cfg, thrower, args, NULL);
4991 * Return the original method is a wrapper is specified. We can only access
4992 * the custom attributes from the original method.
4995 get_original_method (MonoMethod *method)
4997 if (method->wrapper_type == MONO_WRAPPER_NONE)
5000 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
5001 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
5004 /* in other cases we need to find the original method */
5005 return mono_marshal_method_from_wrapper (method);
5009 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
5010 MonoBasicBlock *bblock, unsigned char *ip)
5012 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5013 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
5015 emit_throw_exception (cfg, ex);
5019 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
5020 MonoBasicBlock *bblock, unsigned char *ip)
5022 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5023 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
5025 emit_throw_exception (cfg, ex);
5029 * Check that the IL instructions at ip are the array initialization
5030 * sequence and return the pointer to the data and the size.
5033 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
5036 * newarr[System.Int32]
5038 * ldtoken field valuetype ...
5039 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
5041 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
5042 guint32 token = read32 (ip + 7);
5043 guint32 field_token = read32 (ip + 2);
5044 guint32 field_index = field_token & 0xffffff;
5046 const char *data_ptr;
5048 MonoMethod *cmethod;
5049 MonoClass *dummy_class;
5050 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
5056 *out_field_token = field_token;
5058 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
5061 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
5063 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
5064 case MONO_TYPE_BOOLEAN:
5068 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
5069 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
5070 case MONO_TYPE_CHAR:
5080 return NULL; /* stupid ARM FP swapped format */
5090 if (size > mono_type_size (field->type, &dummy_align))
5093 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
5094 if (!method->klass->image->dynamic) {
5095 field_index = read32 (ip + 2) & 0xffffff;
5096 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
5097 data_ptr = mono_image_rva_map (method->klass->image, rva);
5098 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
5099 /* for aot code we do the lookup on load */
5100 if (aot && data_ptr)
5101 return GUINT_TO_POINTER (rva);
5103 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
5105 data_ptr = mono_field_get_data (field);
5113 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
5115 char *method_fname = mono_method_full_name (method, TRUE);
5117 MonoMethodHeader *header = mono_method_get_header (method);
5119 if (header->code_size == 0)
5120 method_code = g_strdup ("method body is empty.");
5122 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
5123 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
5124 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
5125 g_free (method_fname);
5126 g_free (method_code);
5127 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
5131 set_exception_object (MonoCompile *cfg, MonoException *exception)
5133 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
5134 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
5135 cfg->exception_ptr = exception;
5139 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5143 if (cfg->generic_sharing_context)
5144 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
5146 type = &klass->byval_arg;
5147 return MONO_TYPE_IS_REFERENCE (type);
5151 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5154 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5155 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5156 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5157 /* Optimize reg-reg moves away */
5159 * Can't optimize other opcodes, since sp[0] might point to
5160 * the last ins of a decomposed opcode.
5162 sp [0]->dreg = (cfg)->locals [n]->dreg;
5164 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5169 * ldloca inhibits many optimizations so try to get rid of it in common
5172 static inline unsigned char *
5173 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5182 local = read16 (ip + 2);
5186 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5187 gboolean skip = FALSE;
5189 /* From the INITOBJ case */
5190 token = read32 (ip + 2);
5191 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5192 CHECK_TYPELOAD (klass);
5193 if (generic_class_is_reference_type (cfg, klass)) {
5194 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5195 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5196 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5197 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5198 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5211 is_exception_class (MonoClass *class)
5214 if (class == mono_defaults.exception_class)
5216 class = class->parent;
5222 * mono_method_to_ir:
5224 * Translate the .net IL into linear IR.
5227 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5228 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5229 guint inline_offset, gboolean is_virtual_call)
5232 MonoInst *ins, **sp, **stack_start;
5233 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5234 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
5235 MonoMethod *cmethod, *method_definition;
5236 MonoInst **arg_array;
5237 MonoMethodHeader *header;
5239 guint32 token, ins_flag;
5241 MonoClass *constrained_call = NULL;
5242 unsigned char *ip, *end, *target, *err_pos;
5243 static double r8_0 = 0.0;
5244 MonoMethodSignature *sig;
5245 MonoGenericContext *generic_context = NULL;
5246 MonoGenericContainer *generic_container = NULL;
5247 MonoType **param_types;
5248 int i, n, start_new_bblock, dreg;
5249 int num_calls = 0, inline_costs = 0;
5250 int breakpoint_id = 0;
5252 MonoBoolean security, pinvoke;
5253 MonoSecurityManager* secman = NULL;
5254 MonoDeclSecurityActions actions;
5255 GSList *class_inits = NULL;
5256 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5258 gboolean init_locals, seq_points, skip_dead_blocks;
5260 /* serialization and xdomain stuff may need access to private fields and methods */
5261 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5262 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5263 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5264 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5265 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5266 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5268 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5270 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5271 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5272 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5273 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5275 image = method->klass->image;
5276 header = mono_method_get_header (method);
5277 generic_container = mono_method_get_generic_container (method);
5278 sig = mono_method_signature (method);
5279 num_args = sig->hasthis + sig->param_count;
5280 ip = (unsigned char*)header->code;
5281 cfg->cil_start = ip;
5282 end = ip + header->code_size;
5283 mono_jit_stats.cil_code_size += header->code_size;
5284 init_locals = header->init_locals;
5286 seq_points = cfg->gen_seq_points && cfg->method == method;
5289 * Methods without init_locals set could cause asserts in various passes
5294 method_definition = method;
5295 while (method_definition->is_inflated) {
5296 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5297 method_definition = imethod->declaring;
5300 /* SkipVerification is not allowed if core-clr is enabled */
5301 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5303 dont_verify_stloc = TRUE;
5306 if (!dont_verify && mini_method_verify (cfg, method_definition))
5307 goto exception_exit;
5309 if (mono_debug_using_mono_debugger ())
5310 cfg->keep_cil_nops = TRUE;
5312 if (sig->is_inflated)
5313 generic_context = mono_method_get_context (method);
5314 else if (generic_container)
5315 generic_context = &generic_container->context;
5316 cfg->generic_context = generic_context;
5318 if (!cfg->generic_sharing_context)
5319 g_assert (!sig->has_type_parameters);
5321 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5322 g_assert (method->is_inflated);
5323 g_assert (mono_method_get_context (method)->method_inst);
5325 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5326 g_assert (sig->generic_param_count);
5328 if (cfg->method == method) {
5329 cfg->real_offset = 0;
5331 cfg->real_offset = inline_offset;
5334 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5335 cfg->cil_offset_to_bb_len = header->code_size;
5337 cfg->current_method = method;
5339 if (cfg->verbose_level > 2)
5340 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5342 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5344 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5345 for (n = 0; n < sig->param_count; ++n)
5346 param_types [n + sig->hasthis] = sig->params [n];
5347 cfg->arg_types = param_types;
5349 dont_inline = g_list_prepend (dont_inline, method);
5350 if (cfg->method == method) {
5352 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5353 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5356 NEW_BBLOCK (cfg, start_bblock);
5357 cfg->bb_entry = start_bblock;
5358 start_bblock->cil_code = NULL;
5359 start_bblock->cil_length = 0;
5362 NEW_BBLOCK (cfg, end_bblock);
5363 cfg->bb_exit = end_bblock;
5364 end_bblock->cil_code = NULL;
5365 end_bblock->cil_length = 0;
5366 g_assert (cfg->num_bblocks == 2);
5368 arg_array = cfg->args;
5370 if (header->num_clauses) {
5371 cfg->spvars = g_hash_table_new (NULL, NULL);
5372 cfg->exvars = g_hash_table_new (NULL, NULL);
5374 /* handle exception clauses */
5375 for (i = 0; i < header->num_clauses; ++i) {
5376 MonoBasicBlock *try_bb;
5377 MonoExceptionClause *clause = &header->clauses [i];
5378 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5379 try_bb->real_offset = clause->try_offset;
5380 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5381 tblock->real_offset = clause->handler_offset;
5382 tblock->flags |= BB_EXCEPTION_HANDLER;
5384 link_bblock (cfg, try_bb, tblock);
5386 if (*(ip + clause->handler_offset) == CEE_POP)
5387 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5389 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5390 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5391 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5392 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5393 MONO_ADD_INS (tblock, ins);
5395 /* todo: is a fault block unsafe to optimize? */
5396 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5397 tblock->flags |= BB_EXCEPTION_UNSAFE;
5401 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5403 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5405 /* catch and filter blocks get the exception object on the stack */
5406 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5407 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5408 MonoInst *dummy_use;
5410 /* mostly like handle_stack_args (), but just sets the input args */
5411 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5412 tblock->in_scount = 1;
5413 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5414 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5417 * Add a dummy use for the exvar so its liveness info will be
5421 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5423 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5424 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5425 tblock->flags |= BB_EXCEPTION_HANDLER;
5426 tblock->real_offset = clause->data.filter_offset;
5427 tblock->in_scount = 1;
5428 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5429 /* The filter block shares the exvar with the handler block */
5430 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5431 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5432 MONO_ADD_INS (tblock, ins);
5436 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5437 clause->data.catch_class &&
5438 cfg->generic_sharing_context &&
5439 mono_class_check_context_used (clause->data.catch_class)) {
5441 * In shared generic code with catch
5442 * clauses containing type variables
5443 * the exception handling code has to
5444 * be able to get to the rgctx.
5445 * Therefore we have to make sure that
5446 * the vtable/mrgctx argument (for
5447 * static or generic methods) or the
5448 * "this" argument (for non-static
5449 * methods) are live.
5451 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5452 mini_method_get_context (method)->method_inst ||
5453 method->klass->valuetype) {
5454 mono_get_vtable_var (cfg);
5456 MonoInst *dummy_use;
5458 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5463 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5464 cfg->cbb = start_bblock;
5465 cfg->args = arg_array;
5466 mono_save_args (cfg, sig, inline_args);
5469 /* FIRST CODE BLOCK */
5470 NEW_BBLOCK (cfg, bblock);
5471 bblock->cil_code = ip;
5475 ADD_BBLOCK (cfg, bblock);
5477 if (cfg->method == method) {
5478 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5479 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5480 MONO_INST_NEW (cfg, ins, OP_BREAK);
5481 MONO_ADD_INS (bblock, ins);
5485 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5486 secman = mono_security_manager_get_methods ();
5488 security = (secman && mono_method_has_declsec (method));
5489 /* at this point having security doesn't mean we have any code to generate */
5490 if (security && (cfg->method == method)) {
5491 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5492 * And we do not want to enter the next section (with allocation) if we
5493 * have nothing to generate */
5494 security = mono_declsec_get_demands (method, &actions);
5497 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5498 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5500 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5501 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5502 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5504 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5505 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5509 mono_custom_attrs_free (custom);
5512 custom = mono_custom_attrs_from_class (wrapped->klass);
5513 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5517 mono_custom_attrs_free (custom);
5520 /* not a P/Invoke after all */
5525 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5526 /* we use a separate basic block for the initialization code */
5527 NEW_BBLOCK (cfg, init_localsbb);
5528 cfg->bb_init = init_localsbb;
5529 init_localsbb->real_offset = cfg->real_offset;
5530 start_bblock->next_bb = init_localsbb;
5531 init_localsbb->next_bb = bblock;
5532 link_bblock (cfg, start_bblock, init_localsbb);
5533 link_bblock (cfg, init_localsbb, bblock);
5535 cfg->cbb = init_localsbb;
5537 start_bblock->next_bb = bblock;
5538 link_bblock (cfg, start_bblock, bblock);
5541 /* at this point we know, if security is TRUE, that some code needs to be generated */
5542 if (security && (cfg->method == method)) {
5545 mono_jit_stats.cas_demand_generation++;
5547 if (actions.demand.blob) {
5548 /* Add code for SecurityAction.Demand */
5549 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5550 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5551 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5552 mono_emit_method_call (cfg, secman->demand, args, NULL);
5554 if (actions.noncasdemand.blob) {
5555 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5556 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5557 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5558 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5559 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5560 mono_emit_method_call (cfg, secman->demand, args, NULL);
5562 if (actions.demandchoice.blob) {
5563 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5564 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5565 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5566 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5567 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5571 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5573 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5576 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5577 /* check if this is native code, e.g. an icall or a p/invoke */
5578 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5579 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5581 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
5582 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
5584 /* if this ia a native call then it can only be JITted from platform code */
5585 if ((icall || pinvk) && method->klass && method->klass->image) {
5586 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
5587 MonoException *ex = icall ? mono_get_exception_security () :
5588 mono_get_exception_method_access ();
5589 emit_throw_exception (cfg, ex);
5596 if (header->code_size == 0)
5599 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5604 if (cfg->method == method)
5605 mono_debug_init_method (cfg, bblock, breakpoint_id);
5607 for (n = 0; n < header->num_locals; ++n) {
5608 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5613 /* We force the vtable variable here for all shared methods
5614 for the possibility that they might show up in a stack
5615 trace where their exact instantiation is needed. */
5616 if (cfg->generic_sharing_context && method == cfg->method) {
5617 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5618 mini_method_get_context (method)->method_inst ||
5619 method->klass->valuetype) {
5620 mono_get_vtable_var (cfg);
5622 /* FIXME: Is there a better way to do this?
5623 We need the variable live for the duration
5624 of the whole method. */
5625 cfg->args [0]->flags |= MONO_INST_INDIRECT;
5629 /* add a check for this != NULL to inlined methods */
5630 if (is_virtual_call) {
5633 NEW_ARGLOAD (cfg, arg_ins, 0);
5634 MONO_ADD_INS (cfg->cbb, arg_ins);
5635 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
5638 skip_dead_blocks = !dont_verify;
5639 if (skip_dead_blocks) {
5640 original_bb = bb = mono_basic_block_split (method, &error);
5641 if (!mono_error_ok (&error)) {
5642 mono_error_cleanup (&error);
5648 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5649 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5652 start_new_bblock = 0;
5655 if (cfg->method == method)
5656 cfg->real_offset = ip - header->code;
5658 cfg->real_offset = inline_offset;
5663 if (start_new_bblock) {
5664 bblock->cil_length = ip - bblock->cil_code;
5665 if (start_new_bblock == 2) {
5666 g_assert (ip == tblock->cil_code);
5668 GET_BBLOCK (cfg, tblock, ip);
5670 bblock->next_bb = tblock;
5673 start_new_bblock = 0;
5674 for (i = 0; i < bblock->in_scount; ++i) {
5675 if (cfg->verbose_level > 3)
5676 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5677 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5681 g_slist_free (class_inits);
5684 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5685 link_bblock (cfg, bblock, tblock);
5686 if (sp != stack_start) {
5687 handle_stack_args (cfg, stack_start, sp - stack_start);
5689 CHECK_UNVERIFIABLE (cfg);
5691 bblock->next_bb = tblock;
5694 for (i = 0; i < bblock->in_scount; ++i) {
5695 if (cfg->verbose_level > 3)
5696 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5697 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5700 g_slist_free (class_inits);
5705 if (skip_dead_blocks) {
5706 int ip_offset = ip - header->code;
5708 if (ip_offset == bb->end)
5712 int op_size = mono_opcode_size (ip, end);
5713 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
5715 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
5717 if (ip_offset + op_size == bb->end) {
5718 MONO_INST_NEW (cfg, ins, OP_NOP);
5719 MONO_ADD_INS (bblock, ins);
5720 start_new_bblock = 1;
5728 * Sequence points are points where the debugger can place a breakpoint.
5729 * Currently, we generate these automatically at points where the IL
5732 if (seq_points && sp == stack_start) {
5733 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
5734 MONO_ADD_INS (cfg->cbb, ins);
5737 bblock->real_offset = cfg->real_offset;
5739 if ((cfg->method == method) && cfg->coverage_info) {
5740 guint32 cil_offset = ip - header->code;
5741 cfg->coverage_info->data [cil_offset].cil_code = ip;
5743 /* TODO: Use an increment here */
5744 #if defined(TARGET_X86)
5745 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5746 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5748 MONO_ADD_INS (cfg->cbb, ins);
5750 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5751 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5755 if (cfg->verbose_level > 3)
5756 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5760 if (cfg->keep_cil_nops)
5761 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
5763 MONO_INST_NEW (cfg, ins, OP_NOP);
5765 MONO_ADD_INS (bblock, ins);
5768 if (should_insert_brekpoint (cfg->method))
5769 MONO_INST_NEW (cfg, ins, OP_BREAK);
5771 MONO_INST_NEW (cfg, ins, OP_NOP);
5773 MONO_ADD_INS (bblock, ins);
5779 CHECK_STACK_OVF (1);
5780 n = (*ip)-CEE_LDARG_0;
5782 EMIT_NEW_ARGLOAD (cfg, ins, n);
5790 CHECK_STACK_OVF (1);
5791 n = (*ip)-CEE_LDLOC_0;
5793 EMIT_NEW_LOCLOAD (cfg, ins, n);
5802 n = (*ip)-CEE_STLOC_0;
5805 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5807 emit_stloc_ir (cfg, sp, header, n);
5814 CHECK_STACK_OVF (1);
5817 EMIT_NEW_ARGLOAD (cfg, ins, n);
5823 CHECK_STACK_OVF (1);
5826 NEW_ARGLOADA (cfg, ins, n);
5827 MONO_ADD_INS (cfg->cbb, ins);
5837 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5839 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5844 CHECK_STACK_OVF (1);
5847 EMIT_NEW_LOCLOAD (cfg, ins, n);
5851 case CEE_LDLOCA_S: {
5852 unsigned char *tmp_ip;
5854 CHECK_STACK_OVF (1);
5855 CHECK_LOCAL (ip [1]);
5857 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
5863 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5872 CHECK_LOCAL (ip [1]);
5873 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5875 emit_stloc_ir (cfg, sp, header, ip [1]);
5880 CHECK_STACK_OVF (1);
5881 EMIT_NEW_PCONST (cfg, ins, NULL);
5882 ins->type = STACK_OBJ;
5887 CHECK_STACK_OVF (1);
5888 EMIT_NEW_ICONST (cfg, ins, -1);
5901 CHECK_STACK_OVF (1);
5902 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
5908 CHECK_STACK_OVF (1);
5910 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
5916 CHECK_STACK_OVF (1);
5917 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
5923 CHECK_STACK_OVF (1);
5924 MONO_INST_NEW (cfg, ins, OP_I8CONST);
5925 ins->type = STACK_I8;
5926 ins->dreg = alloc_dreg (cfg, STACK_I8);
5928 ins->inst_l = (gint64)read64 (ip);
5929 MONO_ADD_INS (bblock, ins);
5935 gboolean use_aotconst = FALSE;
5937 #ifdef TARGET_POWERPC
5938 /* FIXME: Clean this up */
5939 if (cfg->compile_aot)
5940 use_aotconst = TRUE;
5943 /* FIXME: we should really allocate this only late in the compilation process */
5944 f = mono_domain_alloc (cfg->domain, sizeof (float));
5946 CHECK_STACK_OVF (1);
5952 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
5954 dreg = alloc_freg (cfg);
5955 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
5956 ins->type = STACK_R8;
5958 MONO_INST_NEW (cfg, ins, OP_R4CONST);
5959 ins->type = STACK_R8;
5960 ins->dreg = alloc_dreg (cfg, STACK_R8);
5962 MONO_ADD_INS (bblock, ins);
5972 gboolean use_aotconst = FALSE;
5974 #ifdef TARGET_POWERPC
5975 /* FIXME: Clean this up */
5976 if (cfg->compile_aot)
5977 use_aotconst = TRUE;
5980 /* FIXME: we should really allocate this only late in the compilation process */
5981 d = mono_domain_alloc (cfg->domain, sizeof (double));
5983 CHECK_STACK_OVF (1);
5989 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
5991 dreg = alloc_freg (cfg);
5992 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
5993 ins->type = STACK_R8;
5995 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5996 ins->type = STACK_R8;
5997 ins->dreg = alloc_dreg (cfg, STACK_R8);
5999 MONO_ADD_INS (bblock, ins);
6008 MonoInst *temp, *store;
6010 CHECK_STACK_OVF (1);
6014 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
6015 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
6017 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6020 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6033 if (sp [0]->type == STACK_R8)
6034 /* we need to pop the value from the x86 FP stack */
6035 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
6044 if (stack_start != sp)
6046 token = read32 (ip + 1);
6047 /* FIXME: check the signature matches */
6048 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6053 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
6054 GENERIC_SHARING_FAILURE (CEE_JMP);
6056 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
6057 CHECK_CFG_EXCEPTION;
6059 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6061 MonoMethodSignature *fsig = mono_method_signature (cmethod);
6064 /* Handle tail calls similarly to calls */
6065 n = fsig->param_count + fsig->hasthis;
6067 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
6068 call->method = cmethod;
6069 call->tail_call = TRUE;
6070 call->signature = mono_method_signature (cmethod);
6071 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
6072 call->inst.inst_p0 = cmethod;
6073 for (i = 0; i < n; ++i)
6074 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
6076 mono_arch_emit_call (cfg, call);
6077 MONO_ADD_INS (bblock, (MonoInst*)call);
6080 for (i = 0; i < num_args; ++i)
6081 /* Prevent arguments from being optimized away */
6082 arg_array [i]->flags |= MONO_INST_VOLATILE;
6084 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6085 ins = (MonoInst*)call;
6086 ins->inst_p0 = cmethod;
6087 MONO_ADD_INS (bblock, ins);
6091 start_new_bblock = 1;
6096 case CEE_CALLVIRT: {
6097 MonoInst *addr = NULL;
6098 MonoMethodSignature *fsig = NULL;
6100 int virtual = *ip == CEE_CALLVIRT;
6101 int calli = *ip == CEE_CALLI;
6102 gboolean pass_imt_from_rgctx = FALSE;
6103 MonoInst *imt_arg = NULL;
6104 gboolean pass_vtable = FALSE;
6105 gboolean pass_mrgctx = FALSE;
6106 MonoInst *vtable_arg = NULL;
6107 gboolean check_this = FALSE;
6108 gboolean supported_tail_call = FALSE;
6111 token = read32 (ip + 1);
6118 if (method->wrapper_type != MONO_WRAPPER_NONE)
6119 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6121 fsig = mono_metadata_parse_signature (image, token);
6123 n = fsig->param_count + fsig->hasthis;
6125 if (method->dynamic && fsig->pinvoke) {
6129 * This is a call through a function pointer using a pinvoke
6130 * signature. Have to create a wrapper and call that instead.
6131 * FIXME: This is very slow, need to create a wrapper at JIT time
6132 * instead based on the signature.
6134 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
6135 EMIT_NEW_PCONST (cfg, args [1], fsig);
6137 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
6140 MonoMethod *cil_method;
6142 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6143 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6144 cil_method = cmethod;
6145 } else if (constrained_call) {
6146 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6148 * This is needed since get_method_constrained can't find
6149 * the method in klass representing a type var.
6150 * The type var is guaranteed to be a reference type in this
6153 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6154 cil_method = cmethod;
6155 g_assert (!cmethod->klass->valuetype);
6157 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6160 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6161 cil_method = cmethod;
6166 if (!dont_verify && !cfg->skip_visibility) {
6167 MonoMethod *target_method = cil_method;
6168 if (method->is_inflated) {
6169 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6171 if (!mono_method_can_access_method (method_definition, target_method) &&
6172 !mono_method_can_access_method (method, cil_method))
6173 METHOD_ACCESS_FAILURE;
6176 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6177 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6179 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6180 /* MS.NET seems to silently convert this to a callvirt */
6185 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
6186 * converts to a callvirt.
6188 * tests/bug-515884.il is an example of this behavior
6190 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
6191 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
6192 if (!virtual && cmethod->klass->marshalbyref && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
6196 if (!cmethod->klass->inited)
6197 if (!mono_class_init (cmethod->klass))
6200 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6201 mini_class_is_system_array (cmethod->klass)) {
6202 array_rank = cmethod->klass->rank;
6203 fsig = mono_method_signature (cmethod);
6205 fsig = mono_method_signature (cmethod);
6210 if (fsig->pinvoke) {
6211 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6212 check_for_pending_exc, FALSE);
6213 fsig = mono_method_signature (wrapper);
6214 } else if (constrained_call) {
6215 fsig = mono_method_signature (cmethod);
6217 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6221 mono_save_token_info (cfg, image, token, cil_method);
6223 n = fsig->param_count + fsig->hasthis;
6225 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6226 if (check_linkdemand (cfg, method, cmethod))
6228 CHECK_CFG_EXCEPTION;
6231 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6232 g_assert_not_reached ();
6235 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6238 if (!cfg->generic_sharing_context && cmethod)
6239 g_assert (!mono_method_check_context_used (cmethod));
6243 //g_assert (!virtual || fsig->hasthis);
6247 if (constrained_call) {
6249 * We have the `constrained.' prefix opcode.
6251 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6253 * The type parameter is instantiated as a valuetype,
6254 * but that type doesn't override the method we're
6255 * calling, so we need to box `this'.
6257 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
6258 ins->klass = constrained_call;
6259 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call));
6260 CHECK_CFG_EXCEPTION;
6261 } else if (!constrained_call->valuetype) {
6262 int dreg = alloc_preg (cfg);
6265 * The type parameter is instantiated as a reference
6266 * type. We have a managed pointer on the stack, so
6267 * we need to dereference it here.
6269 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6270 ins->type = STACK_OBJ;
6272 } else if (cmethod->klass->valuetype)
6274 constrained_call = NULL;
6277 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6281 * If the callee is a shared method, then its static cctor
6282 * might not get called after the call was patched.
6284 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6285 emit_generic_class_init (cfg, cmethod->klass);
6286 CHECK_TYPELOAD (cmethod->klass);
6289 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6290 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6291 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6292 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6293 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6296 * Pass vtable iff target method might
6297 * be shared, which means that sharing
6298 * is enabled for its class and its
6299 * context is sharable (and it's not a
6302 if (sharing_enabled && context_sharable &&
6303 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6307 if (cmethod && mini_method_get_context (cmethod) &&
6308 mini_method_get_context (cmethod)->method_inst) {
6309 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6310 MonoGenericContext *context = mini_method_get_context (cmethod);
6311 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6313 g_assert (!pass_vtable);
6315 if (sharing_enabled && context_sharable)
6319 if (cfg->generic_sharing_context && cmethod) {
6320 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6322 context_used = mono_method_check_context_used (cmethod);
6324 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6325 /* Generic method interface
6326 calls are resolved via a
6327 helper function and don't
6329 if (!cmethod_context || !cmethod_context->method_inst)
6330 pass_imt_from_rgctx = TRUE;
6334 * If a shared method calls another
6335 * shared method then the caller must
6336 * have a generic sharing context
6337 * because the magic trampoline
6338 * requires it. FIXME: We shouldn't
6339 * have to force the vtable/mrgctx
6340 * variable here. Instead there
6341 * should be a flag in the cfg to
6342 * request a generic sharing context.
6345 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6346 mono_get_vtable_var (cfg);
6351 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6353 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6355 CHECK_TYPELOAD (cmethod->klass);
6356 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6361 g_assert (!vtable_arg);
6363 if (!cfg->compile_aot) {
6365 * emit_get_rgctx_method () calls mono_class_vtable () so check
6366 * for type load errors before.
6368 mono_class_setup_vtable (cmethod->klass);
6369 CHECK_TYPELOAD (cmethod->klass);
6372 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6374 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6375 MONO_METHOD_IS_FINAL (cmethod)) {
6382 if (pass_imt_from_rgctx) {
6383 g_assert (!pass_vtable);
6386 imt_arg = emit_get_rgctx_method (cfg, context_used,
6387 cmethod, MONO_RGCTX_INFO_METHOD);
6391 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
6393 /* Calling virtual generic methods */
6394 if (cmethod && virtual &&
6395 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6396 !(MONO_METHOD_IS_FINAL (cmethod) &&
6397 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6398 mono_method_signature (cmethod)->generic_param_count) {
6399 MonoInst *this_temp, *this_arg_temp, *store;
6400 MonoInst *iargs [4];
6402 g_assert (mono_method_signature (cmethod)->is_inflated);
6404 /* Prevent inlining of methods that contain indirect calls */
6407 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
6408 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt) {
6409 g_assert (!imt_arg);
6411 g_assert (cmethod->is_inflated);
6412 imt_arg = emit_get_rgctx_method (cfg, context_used,
6413 cmethod, MONO_RGCTX_INFO_METHOD);
6414 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6418 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6419 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6420 MONO_ADD_INS (bblock, store);
6422 /* FIXME: This should be a managed pointer */
6423 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6425 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6426 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6427 cmethod, MONO_RGCTX_INFO_METHOD);
6428 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6429 addr = mono_emit_jit_icall (cfg,
6430 mono_helper_compile_generic_method, iargs);
6432 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6434 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6437 if (!MONO_TYPE_IS_VOID (fsig->ret))
6438 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6440 CHECK_CFG_EXCEPTION;
6447 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6448 supported_tail_call = cmethod && MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
6450 supported_tail_call = cmethod && mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6454 /* FIXME: runtime generic context pointer for jumps? */
6455 /* FIXME: handle this for generic sharing eventually */
6456 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) && supported_tail_call) {
6459 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6462 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6463 /* Handle tail calls similarly to calls */
6464 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE);
6466 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6467 call->tail_call = TRUE;
6468 call->method = cmethod;
6469 call->signature = mono_method_signature (cmethod);
6472 * We implement tail calls by storing the actual arguments into the
6473 * argument variables, then emitting a CEE_JMP.
6475 for (i = 0; i < n; ++i) {
6476 /* Prevent argument from being register allocated */
6477 arg_array [i]->flags |= MONO_INST_VOLATILE;
6478 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6482 ins = (MonoInst*)call;
6483 ins->inst_p0 = cmethod;
6484 ins->inst_p1 = arg_array [0];
6485 MONO_ADD_INS (bblock, ins);
6486 link_bblock (cfg, bblock, end_bblock);
6487 start_new_bblock = 1;
6489 CHECK_CFG_EXCEPTION;
6491 /* skip CEE_RET as well */
6497 /* Conversion to a JIT intrinsic */
6498 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6499 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6500 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6505 CHECK_CFG_EXCEPTION;
6513 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6514 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6515 mono_method_check_inlining (cfg, cmethod) &&
6516 !g_list_find (dont_inline, cmethod)) {
6518 gboolean allways = FALSE;
6520 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6521 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6522 /* Prevent inlining of methods that call wrappers */
6524 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6528 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6530 cfg->real_offset += 5;
6533 if (!MONO_TYPE_IS_VOID (fsig->ret))
6534 /* *sp is already set by inline_method */
6537 inline_costs += costs;
6543 inline_costs += 10 * num_calls++;
6545 /* Tail recursion elimination */
6546 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6547 gboolean has_vtargs = FALSE;
6550 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6553 /* keep it simple */
6554 for (i = fsig->param_count - 1; i >= 0; i--) {
6555 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6560 for (i = 0; i < n; ++i)
6561 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6562 MONO_INST_NEW (cfg, ins, OP_BR);
6563 MONO_ADD_INS (bblock, ins);
6564 tblock = start_bblock->out_bb [0];
6565 link_bblock (cfg, bblock, tblock);
6566 ins->inst_target_bb = tblock;
6567 start_new_bblock = 1;
6569 /* skip the CEE_RET, too */
6570 if (ip_in_bb (cfg, bblock, ip + 5))
6580 /* Generic sharing */
6581 /* FIXME: only do this for generic methods if
6582 they are not shared! */
6583 if (context_used && !imt_arg && !array_rank &&
6584 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6585 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6586 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6587 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6590 g_assert (cfg->generic_sharing_context && cmethod);
6594 * We are compiling a call to a
6595 * generic method from shared code,
6596 * which means that we have to look up
6597 * the method in the rgctx and do an
6600 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6603 /* Indirect calls */
6605 g_assert (!imt_arg);
6607 if (*ip == CEE_CALL)
6608 g_assert (context_used);
6609 else if (*ip == CEE_CALLI)
6610 g_assert (!vtable_arg);
6612 /* FIXME: what the hell is this??? */
6613 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6614 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6616 /* Prevent inlining of methods with indirect calls */
6621 int rgctx_reg = mono_alloc_preg (cfg);
6623 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6624 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6625 call = (MonoCallInst*)ins;
6626 set_rgctx_arg (cfg, call, rgctx_reg, vtable_arg);
6628 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6630 * Instead of emitting an indirect call, emit a direct call
6631 * with the contents of the aotconst as the patch info.
6633 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
6635 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6636 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
6639 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6642 if (!MONO_TYPE_IS_VOID (fsig->ret))
6643 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6645 CHECK_CFG_EXCEPTION;
6656 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6657 if (sp [fsig->param_count]->type == STACK_OBJ) {
6658 MonoInst *iargs [2];
6661 iargs [1] = sp [fsig->param_count];
6663 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6666 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6667 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6668 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6669 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6671 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6674 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6675 if (!cmethod->klass->element_class->valuetype && !readonly)
6676 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6677 CHECK_TYPELOAD (cmethod->klass);
6680 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6683 g_assert_not_reached ();
6686 CHECK_CFG_EXCEPTION;
6693 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6695 if (!MONO_TYPE_IS_VOID (fsig->ret))
6696 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6698 CHECK_CFG_EXCEPTION;
6708 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6710 } else if (imt_arg) {
6711 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6713 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6716 if (!MONO_TYPE_IS_VOID (fsig->ret))
6717 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6719 CHECK_CFG_EXCEPTION;
6726 if (cfg->method != method) {
6727 /* return from inlined method */
6729 * If in_count == 0, that means the ret is unreachable due to
6730 * being preceeded by a throw. In that case, inline_method () will
6731 * handle setting the return value
6732 * (test case: test_0_inline_throw ()).
6734 if (return_var && cfg->cbb->in_count) {
6738 //g_assert (returnvar != -1);
6739 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6740 cfg->ret_var_set = TRUE;
6744 MonoType *ret_type = mono_method_signature (method)->ret;
6748 * Place a seq point here too even through the IL stack is not
6749 * empty, so a step over on
6752 * will work correctly.
6754 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
6755 MONO_ADD_INS (cfg->cbb, ins);
6758 g_assert (!return_var);
6761 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6764 if (!cfg->vret_addr) {
6767 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6769 EMIT_NEW_RETLOADA (cfg, ret_addr);
6771 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6772 ins->klass = mono_class_from_mono_type (ret_type);
6775 #ifdef MONO_ARCH_SOFT_FLOAT
6776 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6777 MonoInst *iargs [1];
6781 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6782 mono_arch_emit_setret (cfg, method, conv);
6784 mono_arch_emit_setret (cfg, method, *sp);
6787 mono_arch_emit_setret (cfg, method, *sp);
6792 if (sp != stack_start)
6794 MONO_INST_NEW (cfg, ins, OP_BR);
6796 ins->inst_target_bb = end_bblock;
6797 MONO_ADD_INS (bblock, ins);
6798 link_bblock (cfg, bblock, end_bblock);
6799 start_new_bblock = 1;
6803 MONO_INST_NEW (cfg, ins, OP_BR);
6805 target = ip + 1 + (signed char)(*ip);
6807 GET_BBLOCK (cfg, tblock, target);
6808 link_bblock (cfg, bblock, tblock);
6809 ins->inst_target_bb = tblock;
6810 if (sp != stack_start) {
6811 handle_stack_args (cfg, stack_start, sp - stack_start);
6813 CHECK_UNVERIFIABLE (cfg);
6815 MONO_ADD_INS (bblock, ins);
6816 start_new_bblock = 1;
6817 inline_costs += BRANCH_COST;
6831 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6833 target = ip + 1 + *(signed char*)ip;
6839 inline_costs += BRANCH_COST;
6843 MONO_INST_NEW (cfg, ins, OP_BR);
6846 target = ip + 4 + (gint32)read32(ip);
6848 GET_BBLOCK (cfg, tblock, target);
6849 link_bblock (cfg, bblock, tblock);
6850 ins->inst_target_bb = tblock;
6851 if (sp != stack_start) {
6852 handle_stack_args (cfg, stack_start, sp - stack_start);
6854 CHECK_UNVERIFIABLE (cfg);
6857 MONO_ADD_INS (bblock, ins);
6859 start_new_bblock = 1;
6860 inline_costs += BRANCH_COST;
6867 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6868 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6869 guint32 opsize = is_short ? 1 : 4;
6871 CHECK_OPSIZE (opsize);
6873 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6876 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6881 GET_BBLOCK (cfg, tblock, target);
6882 link_bblock (cfg, bblock, tblock);
6883 GET_BBLOCK (cfg, tblock, ip);
6884 link_bblock (cfg, bblock, tblock);
6886 if (sp != stack_start) {
6887 handle_stack_args (cfg, stack_start, sp - stack_start);
6888 CHECK_UNVERIFIABLE (cfg);
6891 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6892 cmp->sreg1 = sp [0]->dreg;
6893 type_from_op (cmp, sp [0], NULL);
6896 #if SIZEOF_REGISTER == 4
6897 if (cmp->opcode == OP_LCOMPARE_IMM) {
6898 /* Convert it to OP_LCOMPARE */
6899 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6900 ins->type = STACK_I8;
6901 ins->dreg = alloc_dreg (cfg, STACK_I8);
6903 MONO_ADD_INS (bblock, ins);
6904 cmp->opcode = OP_LCOMPARE;
6905 cmp->sreg2 = ins->dreg;
6908 MONO_ADD_INS (bblock, cmp);
6910 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
6911 type_from_op (ins, sp [0], NULL);
6912 MONO_ADD_INS (bblock, ins);
6913 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
6914 GET_BBLOCK (cfg, tblock, target);
6915 ins->inst_true_bb = tblock;
6916 GET_BBLOCK (cfg, tblock, ip);
6917 ins->inst_false_bb = tblock;
6918 start_new_bblock = 2;
6921 inline_costs += BRANCH_COST;
6936 MONO_INST_NEW (cfg, ins, *ip);
6938 target = ip + 4 + (gint32)read32(ip);
6944 inline_costs += BRANCH_COST;
6948 MonoBasicBlock **targets;
6949 MonoBasicBlock *default_bblock;
6950 MonoJumpInfoBBTable *table;
6951 int offset_reg = alloc_preg (cfg);
6952 int target_reg = alloc_preg (cfg);
6953 int table_reg = alloc_preg (cfg);
6954 int sum_reg = alloc_preg (cfg);
6955 gboolean use_op_switch;
6959 n = read32 (ip + 1);
6962 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
6966 CHECK_OPSIZE (n * sizeof (guint32));
6967 target = ip + n * sizeof (guint32);
6969 GET_BBLOCK (cfg, default_bblock, target);
6971 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
6972 for (i = 0; i < n; ++i) {
6973 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
6974 targets [i] = tblock;
6978 if (sp != stack_start) {
6980 * Link the current bb with the targets as well, so handle_stack_args
6981 * will set their in_stack correctly.
6983 link_bblock (cfg, bblock, default_bblock);
6984 for (i = 0; i < n; ++i)
6985 link_bblock (cfg, bblock, targets [i]);
6987 handle_stack_args (cfg, stack_start, sp - stack_start);
6989 CHECK_UNVERIFIABLE (cfg);
6992 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
6993 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
6996 for (i = 0; i < n; ++i)
6997 link_bblock (cfg, bblock, targets [i]);
6999 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
7000 table->table = targets;
7001 table->table_size = n;
7003 use_op_switch = FALSE;
7005 /* ARM implements SWITCH statements differently */
7006 /* FIXME: Make it use the generic implementation */
7007 if (!cfg->compile_aot)
7008 use_op_switch = TRUE;
7011 if (COMPILE_LLVM (cfg))
7012 use_op_switch = TRUE;
7014 cfg->cbb->has_jump_table = 1;
7016 if (use_op_switch) {
7017 MONO_INST_NEW (cfg, ins, OP_SWITCH);
7018 ins->sreg1 = src1->dreg;
7019 ins->inst_p0 = table;
7020 ins->inst_many_bb = targets;
7021 ins->klass = GUINT_TO_POINTER (n);
7022 MONO_ADD_INS (cfg->cbb, ins);
7024 if (sizeof (gpointer) == 8)
7025 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
7027 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
7029 #if SIZEOF_REGISTER == 8
7030 /* The upper word might not be zero, and we add it to a 64 bit address later */
7031 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
7034 if (cfg->compile_aot) {
7035 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
7037 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
7038 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
7039 ins->inst_p0 = table;
7040 ins->dreg = table_reg;
7041 MONO_ADD_INS (cfg->cbb, ins);
7044 /* FIXME: Use load_memindex */
7045 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
7046 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
7047 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
7049 start_new_bblock = 1;
7050 inline_costs += (BRANCH_COST * 2);
7070 dreg = alloc_freg (cfg);
7073 dreg = alloc_lreg (cfg);
7076 dreg = alloc_preg (cfg);
7079 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
7080 ins->type = ldind_type [*ip - CEE_LDIND_I1];
7081 ins->flags |= ins_flag;
7083 MONO_ADD_INS (bblock, ins);
7098 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
7099 ins->flags |= ins_flag;
7101 MONO_ADD_INS (bblock, ins);
7103 #if HAVE_WRITE_BARRIERS
7104 if (*ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0))) {
7105 MonoInst *dummy_use;
7106 /* insert call to write barrier */
7107 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
7108 mono_emit_method_call (cfg, write_barrier, sp, NULL);
7109 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
7120 MONO_INST_NEW (cfg, ins, (*ip));
7122 ins->sreg1 = sp [0]->dreg;
7123 ins->sreg2 = sp [1]->dreg;
7124 type_from_op (ins, sp [0], sp [1]);
7126 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7128 /* Use the immediate opcodes if possible */
7129 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
7130 int imm_opcode = mono_op_to_op_imm (ins->opcode);
7131 if (imm_opcode != -1) {
7132 ins->opcode = imm_opcode;
7133 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7136 sp [1]->opcode = OP_NOP;
7140 MONO_ADD_INS ((cfg)->cbb, (ins));
7142 *sp++ = mono_decompose_opcode (cfg, ins);
7159 MONO_INST_NEW (cfg, ins, (*ip));
7161 ins->sreg1 = sp [0]->dreg;
7162 ins->sreg2 = sp [1]->dreg;
7163 type_from_op (ins, sp [0], sp [1]);
7165 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7166 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7168 /* FIXME: Pass opcode to is_inst_imm */
7170 /* Use the immediate opcodes if possible */
7171 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7174 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7175 if (imm_opcode != -1) {
7176 ins->opcode = imm_opcode;
7177 if (sp [1]->opcode == OP_I8CONST) {
7178 #if SIZEOF_REGISTER == 8
7179 ins->inst_imm = sp [1]->inst_l;
7181 ins->inst_ls_word = sp [1]->inst_ls_word;
7182 ins->inst_ms_word = sp [1]->inst_ms_word;
7186 ins->inst_imm = (gssize)(sp [1]->inst_c0);
7189 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7190 if (sp [1]->next == NULL)
7191 sp [1]->opcode = OP_NOP;
7194 MONO_ADD_INS ((cfg)->cbb, (ins));
7196 *sp++ = mono_decompose_opcode (cfg, ins);
7209 case CEE_CONV_OVF_I8:
7210 case CEE_CONV_OVF_U8:
7214 /* Special case this earlier so we have long constants in the IR */
7215 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7216 int data = sp [-1]->inst_c0;
7217 sp [-1]->opcode = OP_I8CONST;
7218 sp [-1]->type = STACK_I8;
7219 #if SIZEOF_REGISTER == 8
7220 if ((*ip) == CEE_CONV_U8)
7221 sp [-1]->inst_c0 = (guint32)data;
7223 sp [-1]->inst_c0 = data;
7225 sp [-1]->inst_ls_word = data;
7226 if ((*ip) == CEE_CONV_U8)
7227 sp [-1]->inst_ms_word = 0;
7229 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7231 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7238 case CEE_CONV_OVF_I4:
7239 case CEE_CONV_OVF_I1:
7240 case CEE_CONV_OVF_I2:
7241 case CEE_CONV_OVF_I:
7242 case CEE_CONV_OVF_U:
7245 if (sp [-1]->type == STACK_R8) {
7246 ADD_UNOP (CEE_CONV_OVF_I8);
7253 case CEE_CONV_OVF_U1:
7254 case CEE_CONV_OVF_U2:
7255 case CEE_CONV_OVF_U4:
7258 if (sp [-1]->type == STACK_R8) {
7259 ADD_UNOP (CEE_CONV_OVF_U8);
7266 case CEE_CONV_OVF_I1_UN:
7267 case CEE_CONV_OVF_I2_UN:
7268 case CEE_CONV_OVF_I4_UN:
7269 case CEE_CONV_OVF_I8_UN:
7270 case CEE_CONV_OVF_U1_UN:
7271 case CEE_CONV_OVF_U2_UN:
7272 case CEE_CONV_OVF_U4_UN:
7273 case CEE_CONV_OVF_U8_UN:
7274 case CEE_CONV_OVF_I_UN:
7275 case CEE_CONV_OVF_U_UN:
7282 CHECK_CFG_EXCEPTION;
7286 case CEE_ADD_OVF_UN:
7288 case CEE_MUL_OVF_UN:
7290 case CEE_SUB_OVF_UN:
7298 token = read32 (ip + 1);
7299 klass = mini_get_class (method, token, generic_context);
7300 CHECK_TYPELOAD (klass);
7302 if (generic_class_is_reference_type (cfg, klass)) {
7303 MonoInst *store, *load;
7304 int dreg = alloc_preg (cfg);
7306 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7307 load->flags |= ins_flag;
7308 MONO_ADD_INS (cfg->cbb, load);
7310 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7311 store->flags |= ins_flag;
7312 MONO_ADD_INS (cfg->cbb, store);
7314 #if HAVE_WRITE_BARRIERS
7315 if (cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER) {
7316 MonoInst *dummy_use;
7317 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
7318 mono_emit_method_call (cfg, write_barrier, sp, NULL);
7319 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
7323 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7335 token = read32 (ip + 1);
7336 klass = mini_get_class (method, token, generic_context);
7337 CHECK_TYPELOAD (klass);
7339 /* Optimize the common ldobj+stloc combination */
7349 loc_index = ip [5] - CEE_STLOC_0;
7356 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7357 CHECK_LOCAL (loc_index);
7359 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7360 ins->dreg = cfg->locals [loc_index]->dreg;
7366 /* Optimize the ldobj+stobj combination */
7367 /* The reference case ends up being a load+store anyway */
7368 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7373 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7380 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7389 CHECK_STACK_OVF (1);
7391 n = read32 (ip + 1);
7393 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7394 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7395 ins->type = STACK_OBJ;
7398 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7399 MonoInst *iargs [1];
7401 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7402 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7404 if (cfg->opt & MONO_OPT_SHARED) {
7405 MonoInst *iargs [3];
7407 if (cfg->compile_aot) {
7408 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7410 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7411 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7412 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7413 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7414 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7416 if (bblock->out_of_line) {
7417 MonoInst *iargs [2];
7419 if (image == mono_defaults.corlib) {
7421 * Avoid relocations in AOT and save some space by using a
7422 * version of helper_ldstr specialized to mscorlib.
7424 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7425 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7427 /* Avoid creating the string object */
7428 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7429 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7430 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7434 if (cfg->compile_aot) {
7435 NEW_LDSTRCONST (cfg, ins, image, n);
7437 MONO_ADD_INS (bblock, ins);
7440 NEW_PCONST (cfg, ins, NULL);
7441 ins->type = STACK_OBJ;
7442 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7444 MONO_ADD_INS (bblock, ins);
7453 MonoInst *iargs [2];
7454 MonoMethodSignature *fsig;
7457 MonoInst *vtable_arg = NULL;
7460 token = read32 (ip + 1);
7461 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7464 fsig = mono_method_get_signature (cmethod, image, token);
7468 mono_save_token_info (cfg, image, token, cmethod);
7470 if (!mono_class_init (cmethod->klass))
7473 if (cfg->generic_sharing_context)
7474 context_used = mono_method_check_context_used (cmethod);
7476 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7477 if (check_linkdemand (cfg, method, cmethod))
7479 CHECK_CFG_EXCEPTION;
7480 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7481 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7484 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7485 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7486 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7487 mono_class_vtable (cfg->domain, cmethod->klass);
7488 CHECK_TYPELOAD (cmethod->klass);
7490 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7491 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7494 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7495 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7497 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7499 CHECK_TYPELOAD (cmethod->klass);
7500 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7505 n = fsig->param_count;
7509 * Generate smaller code for the common newobj <exception> instruction in
7510 * argument checking code.
7512 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7513 is_exception_class (cmethod->klass) && n <= 2 &&
7514 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7515 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7516 MonoInst *iargs [3];
7518 g_assert (!vtable_arg);
7522 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7525 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7529 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7534 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7537 g_assert_not_reached ();
7545 /* move the args to allow room for 'this' in the first position */
7551 /* check_call_signature () requires sp[0] to be set */
7552 this_ins.type = STACK_OBJ;
7554 if (check_call_signature (cfg, fsig, sp))
7559 if (mini_class_is_system_array (cmethod->klass)) {
7560 g_assert (!vtable_arg);
7562 *sp = emit_get_rgctx_method (cfg, context_used,
7563 cmethod, MONO_RGCTX_INFO_METHOD);
7565 /* Avoid varargs in the common case */
7566 if (fsig->param_count == 1)
7567 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7568 else if (fsig->param_count == 2)
7569 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7570 else if (fsig->param_count == 3)
7571 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
7573 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7574 } else if (cmethod->string_ctor) {
7575 g_assert (!context_used);
7576 g_assert (!vtable_arg);
7577 /* we simply pass a null pointer */
7578 EMIT_NEW_PCONST (cfg, *sp, NULL);
7579 /* now call the string ctor */
7580 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7582 MonoInst* callvirt_this_arg = NULL;
7584 if (cmethod->klass->valuetype) {
7585 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7586 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7587 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7592 * The code generated by mini_emit_virtual_call () expects
7593 * iargs [0] to be a boxed instance, but luckily the vcall
7594 * will be transformed into a normal call there.
7596 } else if (context_used) {
7597 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
7600 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7602 CHECK_TYPELOAD (cmethod->klass);
7605 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7606 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7607 * As a workaround, we call class cctors before allocating objects.
7609 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7610 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7611 if (cfg->verbose_level > 2)
7612 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7613 class_inits = g_slist_prepend (class_inits, vtable);
7616 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
7619 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
7622 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7624 /* Now call the actual ctor */
7625 /* Avoid virtual calls to ctors if possible */
7626 if (cmethod->klass->marshalbyref)
7627 callvirt_this_arg = sp [0];
7630 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7631 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7632 type_to_eval_stack_type ((cfg), fsig->ret, ins);
7637 CHECK_CFG_EXCEPTION;
7642 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7643 mono_method_check_inlining (cfg, cmethod) &&
7644 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7645 !g_list_find (dont_inline, cmethod)) {
7648 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7649 cfg->real_offset += 5;
7652 inline_costs += costs - 5;
7655 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7657 } else if (context_used &&
7658 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7659 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7660 MonoInst *cmethod_addr;
7662 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7663 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7665 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7668 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7669 callvirt_this_arg, NULL, vtable_arg);
7673 if (alloc == NULL) {
7675 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7676 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7690 token = read32 (ip + 1);
7691 klass = mini_get_class (method, token, generic_context);
7692 CHECK_TYPELOAD (klass);
7693 if (sp [0]->type != STACK_OBJ)
7696 if (cfg->generic_sharing_context)
7697 context_used = mono_class_check_context_used (klass);
7699 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7706 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7708 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7712 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7713 MonoMethod *mono_castclass;
7714 MonoInst *iargs [1];
7717 mono_castclass = mono_marshal_get_castclass (klass);
7720 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7721 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7722 g_assert (costs > 0);
7725 cfg->real_offset += 5;
7730 inline_costs += costs;
7733 ins = handle_castclass (cfg, klass, *sp, context_used);
7734 CHECK_CFG_EXCEPTION;
7744 token = read32 (ip + 1);
7745 klass = mini_get_class (method, token, generic_context);
7746 CHECK_TYPELOAD (klass);
7747 if (sp [0]->type != STACK_OBJ)
7750 if (cfg->generic_sharing_context)
7751 context_used = mono_class_check_context_used (klass);
7753 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7760 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7762 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7766 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7767 MonoMethod *mono_isinst;
7768 MonoInst *iargs [1];
7771 mono_isinst = mono_marshal_get_isinst (klass);
7774 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7775 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7776 g_assert (costs > 0);
7779 cfg->real_offset += 5;
7784 inline_costs += costs;
7787 ins = handle_isinst (cfg, klass, *sp, context_used);
7788 CHECK_CFG_EXCEPTION;
7795 case CEE_UNBOX_ANY: {
7799 token = read32 (ip + 1);
7800 klass = mini_get_class (method, token, generic_context);
7801 CHECK_TYPELOAD (klass);
7803 mono_save_token_info (cfg, image, token, klass);
7805 if (cfg->generic_sharing_context)
7806 context_used = mono_class_check_context_used (klass);
7808 if (generic_class_is_reference_type (cfg, klass)) {
7809 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
7811 MonoInst *iargs [2];
7816 iargs [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7817 ins = mono_emit_jit_icall (cfg, mono_object_castclass, iargs);
7821 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7822 MonoMethod *mono_castclass;
7823 MonoInst *iargs [1];
7826 mono_castclass = mono_marshal_get_castclass (klass);
7829 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7830 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7832 g_assert (costs > 0);
7835 cfg->real_offset += 5;
7839 inline_costs += costs;
7841 ins = handle_castclass (cfg, klass, *sp, 0);
7842 CHECK_CFG_EXCEPTION;
7850 if (mono_class_is_nullable (klass)) {
7851 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
7858 ins = handle_unbox (cfg, klass, sp, context_used);
7864 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7877 token = read32 (ip + 1);
7878 klass = mini_get_class (method, token, generic_context);
7879 CHECK_TYPELOAD (klass);
7881 mono_save_token_info (cfg, image, token, klass);
7883 if (cfg->generic_sharing_context)
7884 context_used = mono_class_check_context_used (klass);
7886 if (generic_class_is_reference_type (cfg, klass)) {
7892 if (klass == mono_defaults.void_class)
7894 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7896 /* frequent check in generic code: box (struct), brtrue */
7897 if (!mono_class_is_nullable (klass) &&
7898 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) && (ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S)) {
7899 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7901 MONO_INST_NEW (cfg, ins, OP_BR);
7902 if (*ip == CEE_BRTRUE_S) {
7905 target = ip + 1 + (signed char)(*ip);
7910 target = ip + 4 + (gint)(read32 (ip));
7913 GET_BBLOCK (cfg, tblock, target);
7914 link_bblock (cfg, bblock, tblock);
7915 ins->inst_target_bb = tblock;
7916 GET_BBLOCK (cfg, tblock, ip);
7918 * This leads to some inconsistency, since the two bblocks are
7919 * not really connected, but it is needed for handling stack
7920 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
7921 * FIXME: This should only be needed if sp != stack_start, but that
7922 * doesn't work for some reason (test failure in mcs/tests on x86).
7924 link_bblock (cfg, bblock, tblock);
7925 if (sp != stack_start) {
7926 handle_stack_args (cfg, stack_start, sp - stack_start);
7928 CHECK_UNVERIFIABLE (cfg);
7930 MONO_ADD_INS (bblock, ins);
7931 start_new_bblock = 1;
7935 *sp++ = handle_box (cfg, val, klass, context_used);
7937 CHECK_CFG_EXCEPTION;
7946 token = read32 (ip + 1);
7947 klass = mini_get_class (method, token, generic_context);
7948 CHECK_TYPELOAD (klass);
7950 mono_save_token_info (cfg, image, token, klass);
7952 if (cfg->generic_sharing_context)
7953 context_used = mono_class_check_context_used (klass);
7955 if (mono_class_is_nullable (klass)) {
7958 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
7959 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
7963 ins = handle_unbox (cfg, klass, sp, context_used);
7973 MonoClassField *field;
7977 if (*ip == CEE_STFLD) {
7984 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
7986 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
7989 token = read32 (ip + 1);
7990 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7991 field = mono_method_get_wrapper_data (method, token);
7992 klass = field->parent;
7995 field = mono_field_from_token (image, token, &klass, generic_context);
7999 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8000 FIELD_ACCESS_FAILURE;
8001 mono_class_init (klass);
8003 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
8004 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
8005 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8006 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8009 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
8010 if (*ip == CEE_STFLD) {
8011 if (target_type_is_incompatible (cfg, field->type, sp [1]))
8013 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8014 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
8015 MonoInst *iargs [5];
8018 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8019 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8020 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
8024 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8025 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
8026 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8027 g_assert (costs > 0);
8029 cfg->real_offset += 5;
8032 inline_costs += costs;
8034 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
8039 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8041 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
8042 store->flags |= MONO_INST_FAULT;
8044 #if HAVE_WRITE_BARRIERS
8045 if (mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
8046 /* insert call to write barrier */
8047 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
8048 MonoInst *iargs [2], *dummy_use;
8051 dreg = alloc_preg (cfg);
8052 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8054 mono_emit_method_call (cfg, write_barrier, iargs, NULL);
8056 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
8060 store->flags |= ins_flag;
8067 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8068 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
8069 MonoInst *iargs [4];
8072 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8073 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8074 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
8075 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8076 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
8077 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8079 g_assert (costs > 0);
8081 cfg->real_offset += 5;
8085 inline_costs += costs;
8087 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
8091 if (sp [0]->type == STACK_VTYPE) {
8094 /* Have to compute the address of the variable */
8096 var = get_vreg_to_inst (cfg, sp [0]->dreg);
8098 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
8100 g_assert (var->klass == klass);
8102 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
8106 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8108 if (*ip == CEE_LDFLDA) {
8109 dreg = alloc_preg (cfg);
8111 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8112 ins->klass = mono_class_from_mono_type (field->type);
8113 ins->type = STACK_MP;
8118 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
8119 load->flags |= ins_flag;
8120 load->flags |= MONO_INST_FAULT;
8131 MonoClassField *field;
8132 gpointer addr = NULL;
8133 gboolean is_special_static;
8136 token = read32 (ip + 1);
8138 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8139 field = mono_method_get_wrapper_data (method, token);
8140 klass = field->parent;
8143 field = mono_field_from_token (image, token, &klass, generic_context);
8146 mono_class_init (klass);
8147 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8148 FIELD_ACCESS_FAILURE;
8150 /* if the class is Critical then transparent code cannot access it's fields */
8151 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8152 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8155 * We can only support shared generic static
8156 * field access on architectures where the
8157 * trampoline code has been extended to handle
8158 * the generic class init.
8160 #ifndef MONO_ARCH_VTABLE_REG
8161 GENERIC_SHARING_FAILURE (*ip);
8164 if (cfg->generic_sharing_context)
8165 context_used = mono_class_check_context_used (klass);
8167 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
8169 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8170 * to be called here.
8172 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8173 mono_class_vtable (cfg->domain, klass);
8174 CHECK_TYPELOAD (klass);
8176 mono_domain_lock (cfg->domain);
8177 if (cfg->domain->special_static_fields)
8178 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8179 mono_domain_unlock (cfg->domain);
8181 is_special_static = mono_class_field_is_special_static (field);
8183 /* Generate IR to compute the field address */
8184 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
8186 * Fast access to TLS data
8187 * Inline version of get_thread_static_data () in
8191 int idx, static_data_reg, array_reg, dreg;
8192 MonoInst *thread_ins;
8194 // offset &= 0x7fffffff;
8195 // idx = (offset >> 24) - 1;
8196 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
8198 thread_ins = mono_get_thread_intrinsic (cfg);
8199 MONO_ADD_INS (cfg->cbb, thread_ins);
8200 static_data_reg = alloc_ireg (cfg);
8201 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
8203 if (cfg->compile_aot) {
8204 int offset_reg, offset2_reg, idx_reg;
8206 /* For TLS variables, this will return the TLS offset */
8207 EMIT_NEW_SFLDACONST (cfg, ins, field);
8208 offset_reg = ins->dreg;
8209 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
8210 idx_reg = alloc_ireg (cfg);
8211 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
8212 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
8213 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
8214 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
8215 array_reg = alloc_ireg (cfg);
8216 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
8217 offset2_reg = alloc_ireg (cfg);
8218 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
8219 dreg = alloc_ireg (cfg);
8220 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
8222 offset = (gsize)addr & 0x7fffffff;
8223 idx = (offset >> 24) - 1;
8225 array_reg = alloc_ireg (cfg);
8226 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
8227 dreg = alloc_ireg (cfg);
8228 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
8230 } else if ((cfg->opt & MONO_OPT_SHARED) ||
8231 (cfg->compile_aot && is_special_static) ||
8232 (context_used && is_special_static)) {
8233 MonoInst *iargs [2];
8235 g_assert (field->parent);
8236 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8238 iargs [1] = emit_get_rgctx_field (cfg, context_used,
8239 field, MONO_RGCTX_INFO_CLASS_FIELD);
8241 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8243 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8244 } else if (context_used) {
8245 MonoInst *static_data;
8248 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8249 method->klass->name_space, method->klass->name, method->name,
8250 depth, field->offset);
8253 if (mono_class_needs_cctor_run (klass, method))
8254 emit_generic_class_init (cfg, klass);
8257 * The pointer we're computing here is
8259 * super_info.static_data + field->offset
8261 static_data = emit_get_rgctx_klass (cfg, context_used,
8262 klass, MONO_RGCTX_INFO_STATIC_DATA);
8264 if (field->offset == 0) {
8267 int addr_reg = mono_alloc_preg (cfg);
8268 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8270 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8271 MonoInst *iargs [2];
8273 g_assert (field->parent);
8274 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8275 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8276 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8278 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8280 CHECK_TYPELOAD (klass);
8282 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8283 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8284 if (cfg->verbose_level > 2)
8285 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8286 class_inits = g_slist_prepend (class_inits, vtable);
8288 if (cfg->run_cctors) {
8290 /* This makes so that inline cannot trigger */
8291 /* .cctors: too many apps depend on them */
8292 /* running with a specific order... */
8293 if (! vtable->initialized)
8295 ex = mono_runtime_class_init_full (vtable, FALSE);
8297 set_exception_object (cfg, ex);
8298 goto exception_exit;
8302 addr = (char*)vtable->data + field->offset;
8304 if (cfg->compile_aot)
8305 EMIT_NEW_SFLDACONST (cfg, ins, field);
8307 EMIT_NEW_PCONST (cfg, ins, addr);
8309 MonoInst *iargs [1];
8310 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8311 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8315 /* Generate IR to do the actual load/store operation */
8317 if (*ip == CEE_LDSFLDA) {
8318 ins->klass = mono_class_from_mono_type (field->type);
8319 ins->type = STACK_PTR;
8321 } else if (*ip == CEE_STSFLD) {
8326 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
8327 store->flags |= ins_flag;
8329 gboolean is_const = FALSE;
8330 MonoVTable *vtable = NULL;
8332 if (!context_used) {
8333 vtable = mono_class_vtable (cfg->domain, klass);
8334 CHECK_TYPELOAD (klass);
8336 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8337 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8338 gpointer addr = (char*)vtable->data + field->offset;
8339 int ro_type = field->type->type;
8340 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
8341 ro_type = mono_class_enum_basetype (field->type->data.klass)->type;
8343 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8346 case MONO_TYPE_BOOLEAN:
8348 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8352 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8355 case MONO_TYPE_CHAR:
8357 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8361 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8366 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8370 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8376 case MONO_TYPE_FNPTR:
8377 #ifndef HAVE_MOVING_COLLECTOR
8378 case MONO_TYPE_STRING:
8379 case MONO_TYPE_OBJECT:
8380 case MONO_TYPE_CLASS:
8381 case MONO_TYPE_SZARRAY:
8382 case MONO_TYPE_ARRAY:
8384 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8385 type_to_eval_stack_type ((cfg), field->type, *sp);
8390 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8395 case MONO_TYPE_VALUETYPE:
8405 CHECK_STACK_OVF (1);
8407 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8408 load->flags |= ins_flag;
8421 token = read32 (ip + 1);
8422 klass = mini_get_class (method, token, generic_context);
8423 CHECK_TYPELOAD (klass);
8424 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8425 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8426 #if HAVE_WRITE_BARRIERS
8427 if (cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
8428 generic_class_is_reference_type (cfg, klass)) {
8429 MonoInst *dummy_use;
8430 /* insert call to write barrier */
8431 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
8432 mono_emit_method_call (cfg, write_barrier, sp, NULL);
8433 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
8446 const char *data_ptr;
8448 guint32 field_token;
8454 token = read32 (ip + 1);
8456 klass = mini_get_class (method, token, generic_context);
8457 CHECK_TYPELOAD (klass);
8459 if (cfg->generic_sharing_context)
8460 context_used = mono_class_check_context_used (klass);
8462 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
8463 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
8464 ins->sreg1 = sp [0]->dreg;
8465 ins->type = STACK_I4;
8466 ins->dreg = alloc_ireg (cfg);
8467 MONO_ADD_INS (cfg->cbb, ins);
8468 *sp = mono_decompose_opcode (cfg, ins);
8473 MonoClass *array_class = mono_array_class_get (klass, 1);
8474 /* FIXME: we cannot get a managed
8475 allocator because we can't get the
8476 open generic class's vtable. We
8477 have the same problem in
8478 handle_alloc(). This
8479 needs to be solved so that we can
8480 have managed allocs of shared
8483 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
8484 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
8486 MonoMethod *managed_alloc = NULL;
8488 /* FIXME: Decompose later to help abcrem */
8491 args [0] = emit_get_rgctx_klass (cfg, context_used,
8492 array_class, MONO_RGCTX_INFO_VTABLE);
8497 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
8499 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8501 if (cfg->opt & MONO_OPT_SHARED) {
8502 /* Decompose now to avoid problems with references to the domainvar */
8503 MonoInst *iargs [3];
8505 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8506 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8509 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8511 /* Decompose later since it is needed by abcrem */
8512 MonoClass *array_type = mono_array_class_get (klass, 1);
8513 mono_class_vtable (cfg->domain, array_type);
8514 CHECK_TYPELOAD (array_type);
8516 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8517 ins->dreg = alloc_preg (cfg);
8518 ins->sreg1 = sp [0]->dreg;
8519 ins->inst_newa_class = klass;
8520 ins->type = STACK_OBJ;
8522 MONO_ADD_INS (cfg->cbb, ins);
8523 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8524 cfg->cbb->has_array_access = TRUE;
8526 /* Needed so mono_emit_load_get_addr () gets called */
8527 mono_get_got_var (cfg);
8537 * we inline/optimize the initialization sequence if possible.
8538 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8539 * for small sizes open code the memcpy
8540 * ensure the rva field is big enough
8542 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8543 MonoMethod *memcpy_method = get_memcpy_method ();
8544 MonoInst *iargs [3];
8545 int add_reg = alloc_preg (cfg);
8547 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8548 if (cfg->compile_aot) {
8549 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8551 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8553 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8554 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8563 if (sp [0]->type != STACK_OBJ)
8566 dreg = alloc_preg (cfg);
8567 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8568 ins->dreg = alloc_preg (cfg);
8569 ins->sreg1 = sp [0]->dreg;
8570 ins->type = STACK_I4;
8571 MONO_ADD_INS (cfg->cbb, ins);
8572 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8573 cfg->cbb->has_array_access = TRUE;
8581 if (sp [0]->type != STACK_OBJ)
8584 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8586 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8587 CHECK_TYPELOAD (klass);
8588 /* we need to make sure that this array is exactly the type it needs
8589 * to be for correctness. the wrappers are lax with their usage
8590 * so we need to ignore them here
8592 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
8593 MonoClass *array_class = mono_array_class_get (klass, 1);
8594 mini_emit_check_array_type (cfg, sp [0], array_class);
8595 CHECK_TYPELOAD (array_class);
8599 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8614 case CEE_LDELEM_REF: {
8620 if (*ip == CEE_LDELEM) {
8622 token = read32 (ip + 1);
8623 klass = mini_get_class (method, token, generic_context);
8624 CHECK_TYPELOAD (klass);
8625 mono_class_init (klass);
8628 klass = array_access_to_klass (*ip);
8630 if (sp [0]->type != STACK_OBJ)
8633 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8635 if (sp [1]->opcode == OP_ICONST) {
8636 int array_reg = sp [0]->dreg;
8637 int index_reg = sp [1]->dreg;
8638 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8640 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8641 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8643 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8644 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8647 if (*ip == CEE_LDELEM)
8660 case CEE_STELEM_REF:
8667 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8669 if (*ip == CEE_STELEM) {
8671 token = read32 (ip + 1);
8672 klass = mini_get_class (method, token, generic_context);
8673 CHECK_TYPELOAD (klass);
8674 mono_class_init (klass);
8677 klass = array_access_to_klass (*ip);
8679 if (sp [0]->type != STACK_OBJ)
8682 /* storing a NULL doesn't need any of the complex checks in stelemref */
8683 if (generic_class_is_reference_type (cfg, klass) &&
8684 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8685 MonoMethod* helper = mono_marshal_get_stelemref ();
8686 MonoInst *iargs [3];
8688 if (sp [0]->type != STACK_OBJ)
8690 if (sp [2]->type != STACK_OBJ)
8697 mono_emit_method_call (cfg, helper, iargs, NULL);
8699 if (sp [1]->opcode == OP_ICONST) {
8700 int array_reg = sp [0]->dreg;
8701 int index_reg = sp [1]->dreg;
8702 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8704 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8705 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8707 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8708 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8712 if (*ip == CEE_STELEM)
8719 case CEE_CKFINITE: {
8723 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8724 ins->sreg1 = sp [0]->dreg;
8725 ins->dreg = alloc_freg (cfg);
8726 ins->type = STACK_R8;
8727 MONO_ADD_INS (bblock, ins);
8729 *sp++ = mono_decompose_opcode (cfg, ins);
8734 case CEE_REFANYVAL: {
8735 MonoInst *src_var, *src;
8737 int klass_reg = alloc_preg (cfg);
8738 int dreg = alloc_preg (cfg);
8741 MONO_INST_NEW (cfg, ins, *ip);
8744 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8745 CHECK_TYPELOAD (klass);
8746 mono_class_init (klass);
8748 if (cfg->generic_sharing_context)
8749 context_used = mono_class_check_context_used (klass);
8752 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8754 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8755 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8756 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8759 MonoInst *klass_ins;
8761 klass_ins = emit_get_rgctx_klass (cfg, context_used,
8762 klass, MONO_RGCTX_INFO_KLASS);
8765 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8766 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8768 mini_emit_class_check (cfg, klass_reg, klass);
8770 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8771 ins->type = STACK_MP;
8776 case CEE_MKREFANY: {
8777 MonoInst *loc, *addr;
8780 MONO_INST_NEW (cfg, ins, *ip);
8783 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8784 CHECK_TYPELOAD (klass);
8785 mono_class_init (klass);
8787 if (cfg->generic_sharing_context)
8788 context_used = mono_class_check_context_used (klass);
8790 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8791 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8794 MonoInst *const_ins;
8795 int type_reg = alloc_preg (cfg);
8797 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
8798 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8799 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8800 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8801 } else if (cfg->compile_aot) {
8802 int const_reg = alloc_preg (cfg);
8803 int type_reg = alloc_preg (cfg);
8805 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8806 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8807 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8808 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8810 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8811 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8813 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8815 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8816 ins->type = STACK_VTYPE;
8817 ins->klass = mono_defaults.typed_reference_class;
8824 MonoClass *handle_class;
8826 CHECK_STACK_OVF (1);
8829 n = read32 (ip + 1);
8831 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
8832 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8833 handle = mono_method_get_wrapper_data (method, n);
8834 handle_class = mono_method_get_wrapper_data (method, n + 1);
8835 if (handle_class == mono_defaults.typehandle_class)
8836 handle = &((MonoClass*)handle)->byval_arg;
8839 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8843 mono_class_init (handle_class);
8844 if (cfg->generic_sharing_context) {
8845 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
8846 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
8847 /* This case handles ldtoken
8848 of an open type, like for
8851 } else if (handle_class == mono_defaults.typehandle_class) {
8852 /* If we get a MONO_TYPE_CLASS
8853 then we need to provide the
8855 instantiation of it. */
8856 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8859 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8860 } else if (handle_class == mono_defaults.fieldhandle_class)
8861 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8862 else if (handle_class == mono_defaults.methodhandle_class)
8863 context_used = mono_method_check_context_used (handle);
8865 g_assert_not_reached ();
8868 if ((cfg->opt & MONO_OPT_SHARED) &&
8869 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
8870 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
8871 MonoInst *addr, *vtvar, *iargs [3];
8872 int method_context_used;
8874 if (cfg->generic_sharing_context)
8875 method_context_used = mono_method_check_context_used (method);
8877 method_context_used = 0;
8879 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8881 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8882 EMIT_NEW_ICONST (cfg, iargs [1], n);
8883 if (method_context_used) {
8884 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
8885 method, MONO_RGCTX_INFO_METHOD);
8886 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
8888 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
8889 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
8891 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8893 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8895 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8897 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
8898 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
8899 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
8900 (cmethod->klass == mono_defaults.monotype_class->parent) &&
8901 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
8902 MonoClass *tclass = mono_class_from_mono_type (handle);
8904 mono_class_init (tclass);
8906 ins = emit_get_rgctx_klass (cfg, context_used,
8907 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
8908 } else if (cfg->compile_aot) {
8909 if (method->wrapper_type) {
8910 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
8911 /* Special case for static synchronized wrappers */
8912 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
8914 /* FIXME: n is not a normal token */
8915 cfg->disable_aot = TRUE;
8916 EMIT_NEW_PCONST (cfg, ins, NULL);
8919 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
8922 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
8924 ins->type = STACK_OBJ;
8925 ins->klass = cmethod->klass;
8928 MonoInst *addr, *vtvar;
8930 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8933 if (handle_class == mono_defaults.typehandle_class) {
8934 ins = emit_get_rgctx_klass (cfg, context_used,
8935 mono_class_from_mono_type (handle),
8936 MONO_RGCTX_INFO_TYPE);
8937 } else if (handle_class == mono_defaults.methodhandle_class) {
8938 ins = emit_get_rgctx_method (cfg, context_used,
8939 handle, MONO_RGCTX_INFO_METHOD);
8940 } else if (handle_class == mono_defaults.fieldhandle_class) {
8941 ins = emit_get_rgctx_field (cfg, context_used,
8942 handle, MONO_RGCTX_INFO_CLASS_FIELD);
8944 g_assert_not_reached ();
8946 } else if (cfg->compile_aot) {
8947 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
8949 EMIT_NEW_PCONST (cfg, ins, handle);
8951 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8952 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8953 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8963 MONO_INST_NEW (cfg, ins, OP_THROW);
8965 ins->sreg1 = sp [0]->dreg;
8967 bblock->out_of_line = TRUE;
8968 MONO_ADD_INS (bblock, ins);
8969 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
8970 MONO_ADD_INS (bblock, ins);
8973 link_bblock (cfg, bblock, end_bblock);
8974 start_new_bblock = 1;
8976 case CEE_ENDFINALLY:
8977 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
8978 MONO_ADD_INS (bblock, ins);
8980 start_new_bblock = 1;
8983 * Control will leave the method so empty the stack, otherwise
8984 * the next basic block will start with a nonempty stack.
8986 while (sp != stack_start) {
8994 if (*ip == CEE_LEAVE) {
8996 target = ip + 5 + (gint32)read32(ip + 1);
8999 target = ip + 2 + (signed char)(ip [1]);
9002 /* empty the stack */
9003 while (sp != stack_start) {
9008 * If this leave statement is in a catch block, check for a
9009 * pending exception, and rethrow it if necessary.
9010 * We avoid doing this in runtime invoke wrappers, since those are called
9011 * by native code which excepts the wrapper to catch all exceptions.
9013 for (i = 0; i < header->num_clauses; ++i) {
9014 MonoExceptionClause *clause = &header->clauses [i];
9017 * Use <= in the final comparison to handle clauses with multiple
9018 * leave statements, like in bug #78024.
9019 * The ordering of the exception clauses guarantees that we find the
9022 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
9024 MonoBasicBlock *dont_throw;
9029 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
9032 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
9034 NEW_BBLOCK (cfg, dont_throw);
9037 * Currently, we allways rethrow the abort exception, despite the
9038 * fact that this is not correct. See thread6.cs for an example.
9039 * But propagating the abort exception is more important than
9040 * getting the sematics right.
9042 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
9043 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
9044 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
9046 MONO_START_BB (cfg, dont_throw);
9051 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
9053 MonoExceptionClause *clause;
9055 for (tmp = handlers; tmp; tmp = tmp->next) {
9057 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
9059 link_bblock (cfg, bblock, tblock);
9060 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
9061 ins->inst_target_bb = tblock;
9062 ins->inst_eh_block = clause;
9063 MONO_ADD_INS (bblock, ins);
9064 bblock->has_call_handler = 1;
9065 if (COMPILE_LLVM (cfg)) {
9066 MonoBasicBlock *target_bb;
9069 * Link the finally bblock with the target, since it will
9070 * conceptually branch there.
9071 * FIXME: Have to link the bblock containing the endfinally.
9073 GET_BBLOCK (cfg, target_bb, target);
9074 link_bblock (cfg, tblock, target_bb);
9077 g_list_free (handlers);
9080 MONO_INST_NEW (cfg, ins, OP_BR);
9081 MONO_ADD_INS (bblock, ins);
9082 GET_BBLOCK (cfg, tblock, target);
9083 link_bblock (cfg, bblock, tblock);
9084 ins->inst_target_bb = tblock;
9085 start_new_bblock = 1;
9087 if (*ip == CEE_LEAVE)
9096 * Mono specific opcodes
9098 case MONO_CUSTOM_PREFIX: {
9100 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
9104 case CEE_MONO_ICALL: {
9106 MonoJitICallInfo *info;
9108 token = read32 (ip + 2);
9109 func = mono_method_get_wrapper_data (method, token);
9110 info = mono_find_jit_icall_by_addr (func);
9113 CHECK_STACK (info->sig->param_count);
9114 sp -= info->sig->param_count;
9116 ins = mono_emit_jit_icall (cfg, info->func, sp);
9117 if (!MONO_TYPE_IS_VOID (info->sig->ret))
9121 inline_costs += 10 * num_calls++;
9125 case CEE_MONO_LDPTR: {
9128 CHECK_STACK_OVF (1);
9130 token = read32 (ip + 2);
9132 ptr = mono_method_get_wrapper_data (method, token);
9133 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
9134 MonoJitICallInfo *callinfo;
9135 const char *icall_name;
9137 icall_name = method->name + strlen ("__icall_wrapper_");
9138 g_assert (icall_name);
9139 callinfo = mono_find_jit_icall_by_name (icall_name);
9140 g_assert (callinfo);
9142 if (ptr == callinfo->func) {
9143 /* Will be transformed into an AOTCONST later */
9144 EMIT_NEW_PCONST (cfg, ins, ptr);
9150 /* FIXME: Generalize this */
9151 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
9152 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
9157 EMIT_NEW_PCONST (cfg, ins, ptr);
9160 inline_costs += 10 * num_calls++;
9161 /* Can't embed random pointers into AOT code */
9162 cfg->disable_aot = 1;
9165 case CEE_MONO_ICALL_ADDR: {
9166 MonoMethod *cmethod;
9169 CHECK_STACK_OVF (1);
9171 token = read32 (ip + 2);
9173 cmethod = mono_method_get_wrapper_data (method, token);
9175 if (cfg->compile_aot) {
9176 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
9178 ptr = mono_lookup_internal_call (cmethod);
9180 EMIT_NEW_PCONST (cfg, ins, ptr);
9186 case CEE_MONO_VTADDR: {
9187 MonoInst *src_var, *src;
9193 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9194 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
9199 case CEE_MONO_NEWOBJ: {
9200 MonoInst *iargs [2];
9202 CHECK_STACK_OVF (1);
9204 token = read32 (ip + 2);
9205 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9206 mono_class_init (klass);
9207 NEW_DOMAINCONST (cfg, iargs [0]);
9208 MONO_ADD_INS (cfg->cbb, iargs [0]);
9209 NEW_CLASSCONST (cfg, iargs [1], klass);
9210 MONO_ADD_INS (cfg->cbb, iargs [1]);
9211 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
9213 inline_costs += 10 * num_calls++;
9216 case CEE_MONO_OBJADDR:
9219 MONO_INST_NEW (cfg, ins, OP_MOVE);
9220 ins->dreg = alloc_preg (cfg);
9221 ins->sreg1 = sp [0]->dreg;
9222 ins->type = STACK_MP;
9223 MONO_ADD_INS (cfg->cbb, ins);
9227 case CEE_MONO_LDNATIVEOBJ:
9229 * Similar to LDOBJ, but instead load the unmanaged
9230 * representation of the vtype to the stack.
9235 token = read32 (ip + 2);
9236 klass = mono_method_get_wrapper_data (method, token);
9237 g_assert (klass->valuetype);
9238 mono_class_init (klass);
9241 MonoInst *src, *dest, *temp;
9244 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
9245 temp->backend.is_pinvoke = 1;
9246 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
9247 mini_emit_stobj (cfg, dest, src, klass, TRUE);
9249 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
9250 dest->type = STACK_VTYPE;
9251 dest->klass = klass;
9257 case CEE_MONO_RETOBJ: {
9259 * Same as RET, but return the native representation of a vtype
9262 g_assert (cfg->ret);
9263 g_assert (mono_method_signature (method)->pinvoke);
9268 token = read32 (ip + 2);
9269 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9271 if (!cfg->vret_addr) {
9272 g_assert (cfg->ret_var_is_local);
9274 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
9276 EMIT_NEW_RETLOADA (cfg, ins);
9278 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
9280 if (sp != stack_start)
9283 MONO_INST_NEW (cfg, ins, OP_BR);
9284 ins->inst_target_bb = end_bblock;
9285 MONO_ADD_INS (bblock, ins);
9286 link_bblock (cfg, bblock, end_bblock);
9287 start_new_bblock = 1;
9291 case CEE_MONO_CISINST:
9292 case CEE_MONO_CCASTCLASS: {
9297 token = read32 (ip + 2);
9298 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9299 if (ip [1] == CEE_MONO_CISINST)
9300 ins = handle_cisinst (cfg, klass, sp [0]);
9302 ins = handle_ccastclass (cfg, klass, sp [0]);
9308 case CEE_MONO_SAVE_LMF:
9309 case CEE_MONO_RESTORE_LMF:
9310 #ifdef MONO_ARCH_HAVE_LMF_OPS
9311 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9312 MONO_ADD_INS (bblock, ins);
9313 cfg->need_lmf_area = TRUE;
9317 case CEE_MONO_CLASSCONST:
9318 CHECK_STACK_OVF (1);
9320 token = read32 (ip + 2);
9321 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9324 inline_costs += 10 * num_calls++;
9326 case CEE_MONO_NOT_TAKEN:
9327 bblock->out_of_line = TRUE;
9331 CHECK_STACK_OVF (1);
9333 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9334 ins->dreg = alloc_preg (cfg);
9335 ins->inst_offset = (gint32)read32 (ip + 2);
9336 ins->type = STACK_PTR;
9337 MONO_ADD_INS (bblock, ins);
9341 case CEE_MONO_DYN_CALL: {
9344 /* It would be easier to call a trampoline, but that would put an
9345 * extra frame on the stack, confusing exception handling. So
9346 * implement it inline using an opcode for now.
9349 if (!cfg->dyn_call_var) {
9350 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9351 /* prevent it from being register allocated */
9352 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
9355 /* Has to use a call inst since it local regalloc expects it */
9356 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
9357 ins = (MonoInst*)call;
9359 ins->sreg1 = sp [0]->dreg;
9360 ins->sreg2 = sp [1]->dreg;
9361 MONO_ADD_INS (bblock, ins);
9363 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
9364 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
9368 inline_costs += 10 * num_calls++;
9373 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9383 /* somewhat similar to LDTOKEN */
9384 MonoInst *addr, *vtvar;
9385 CHECK_STACK_OVF (1);
9386 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9388 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9389 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9391 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9392 ins->type = STACK_VTYPE;
9393 ins->klass = mono_defaults.argumenthandle_class;
9406 * The following transforms:
9407 * CEE_CEQ into OP_CEQ
9408 * CEE_CGT into OP_CGT
9409 * CEE_CGT_UN into OP_CGT_UN
9410 * CEE_CLT into OP_CLT
9411 * CEE_CLT_UN into OP_CLT_UN
9413 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9415 MONO_INST_NEW (cfg, ins, cmp->opcode);
9417 cmp->sreg1 = sp [0]->dreg;
9418 cmp->sreg2 = sp [1]->dreg;
9419 type_from_op (cmp, sp [0], sp [1]);
9421 if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9422 cmp->opcode = OP_LCOMPARE;
9423 else if (sp [0]->type == STACK_R8)
9424 cmp->opcode = OP_FCOMPARE;
9426 cmp->opcode = OP_ICOMPARE;
9427 MONO_ADD_INS (bblock, cmp);
9428 ins->type = STACK_I4;
9429 ins->dreg = alloc_dreg (cfg, ins->type);
9430 type_from_op (ins, sp [0], sp [1]);
9432 if (cmp->opcode == OP_FCOMPARE) {
9434 * The backends expect the fceq opcodes to do the
9437 cmp->opcode = OP_NOP;
9438 ins->sreg1 = cmp->sreg1;
9439 ins->sreg2 = cmp->sreg2;
9441 MONO_ADD_INS (bblock, ins);
9448 MonoMethod *cil_method;
9449 gboolean needs_static_rgctx_invoke;
9451 CHECK_STACK_OVF (1);
9453 n = read32 (ip + 2);
9454 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9457 mono_class_init (cmethod->klass);
9459 mono_save_token_info (cfg, image, n, cmethod);
9461 if (cfg->generic_sharing_context)
9462 context_used = mono_method_check_context_used (cmethod);
9464 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9466 cil_method = cmethod;
9467 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9468 METHOD_ACCESS_FAILURE;
9470 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9471 if (check_linkdemand (cfg, method, cmethod))
9473 CHECK_CFG_EXCEPTION;
9474 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9475 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9479 * Optimize the common case of ldftn+delegate creation
9481 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9482 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9483 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9485 int invoke_context_used = 0;
9487 invoke = mono_get_delegate_invoke (ctor_method->klass);
9488 if (!invoke || !mono_method_signature (invoke))
9491 if (cfg->generic_sharing_context)
9492 invoke_context_used = mono_method_check_context_used (invoke);
9494 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
9495 /* FIXME: SGEN support */
9496 if (invoke_context_used == 0) {
9497 MonoInst *target_ins;
9500 if (cfg->verbose_level > 3)
9501 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9502 target_ins = sp [-1];
9504 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
9505 CHECK_CFG_EXCEPTION;
9514 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
9515 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
9519 inline_costs += 10 * num_calls++;
9522 case CEE_LDVIRTFTN: {
9527 n = read32 (ip + 2);
9528 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9531 mono_class_init (cmethod->klass);
9533 if (cfg->generic_sharing_context)
9534 context_used = mono_method_check_context_used (cmethod);
9536 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9537 if (check_linkdemand (cfg, method, cmethod))
9539 CHECK_CFG_EXCEPTION;
9540 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9541 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9547 args [1] = emit_get_rgctx_method (cfg, context_used,
9548 cmethod, MONO_RGCTX_INFO_METHOD);
9551 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
9553 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
9556 inline_costs += 10 * num_calls++;
9560 CHECK_STACK_OVF (1);
9562 n = read16 (ip + 2);
9564 EMIT_NEW_ARGLOAD (cfg, ins, n);
9569 CHECK_STACK_OVF (1);
9571 n = read16 (ip + 2);
9573 NEW_ARGLOADA (cfg, ins, n);
9574 MONO_ADD_INS (cfg->cbb, ins);
9582 n = read16 (ip + 2);
9584 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9586 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9590 CHECK_STACK_OVF (1);
9592 n = read16 (ip + 2);
9594 EMIT_NEW_LOCLOAD (cfg, ins, n);
9599 unsigned char *tmp_ip;
9600 CHECK_STACK_OVF (1);
9602 n = read16 (ip + 2);
9605 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9611 EMIT_NEW_LOCLOADA (cfg, ins, n);
9620 n = read16 (ip + 2);
9622 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9624 emit_stloc_ir (cfg, sp, header, n);
9631 if (sp != stack_start)
9633 if (cfg->method != method)
9635 * Inlining this into a loop in a parent could lead to
9636 * stack overflows which is different behavior than the
9637 * non-inlined case, thus disable inlining in this case.
9639 goto inline_failure;
9641 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9642 ins->dreg = alloc_preg (cfg);
9643 ins->sreg1 = sp [0]->dreg;
9644 ins->type = STACK_PTR;
9645 MONO_ADD_INS (cfg->cbb, ins);
9647 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9649 ins->flags |= MONO_INST_INIT;
9654 case CEE_ENDFILTER: {
9655 MonoExceptionClause *clause, *nearest;
9656 int cc, nearest_num;
9660 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9662 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9663 ins->sreg1 = (*sp)->dreg;
9664 MONO_ADD_INS (bblock, ins);
9665 start_new_bblock = 1;
9670 for (cc = 0; cc < header->num_clauses; ++cc) {
9671 clause = &header->clauses [cc];
9672 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9673 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9674 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9680 if ((ip - header->code) != nearest->handler_offset)
9685 case CEE_UNALIGNED_:
9686 ins_flag |= MONO_INST_UNALIGNED;
9687 /* FIXME: record alignment? we can assume 1 for now */
9692 ins_flag |= MONO_INST_VOLATILE;
9696 ins_flag |= MONO_INST_TAILCALL;
9697 cfg->flags |= MONO_CFG_HAS_TAIL;
9698 /* Can't inline tail calls at this time */
9699 inline_costs += 100000;
9706 token = read32 (ip + 2);
9707 klass = mini_get_class (method, token, generic_context);
9708 CHECK_TYPELOAD (klass);
9709 if (generic_class_is_reference_type (cfg, klass))
9710 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9712 mini_emit_initobj (cfg, *sp, NULL, klass);
9716 case CEE_CONSTRAINED_:
9718 token = read32 (ip + 2);
9719 if (method->wrapper_type != MONO_WRAPPER_NONE)
9720 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
9722 constrained_call = mono_class_get_full (image, token, generic_context);
9723 CHECK_TYPELOAD (constrained_call);
9728 MonoInst *iargs [3];
9732 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9733 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9734 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9735 /* emit_memset only works when val == 0 */
9736 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9741 if (ip [1] == CEE_CPBLK) {
9742 MonoMethod *memcpy_method = get_memcpy_method ();
9743 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9745 MonoMethod *memset_method = get_memset_method ();
9746 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9756 ins_flag |= MONO_INST_NOTYPECHECK;
9758 ins_flag |= MONO_INST_NORANGECHECK;
9759 /* we ignore the no-nullcheck for now since we
9760 * really do it explicitly only when doing callvirt->call
9766 int handler_offset = -1;
9768 for (i = 0; i < header->num_clauses; ++i) {
9769 MonoExceptionClause *clause = &header->clauses [i];
9770 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9771 handler_offset = clause->handler_offset;
9776 bblock->flags |= BB_EXCEPTION_UNSAFE;
9778 g_assert (handler_offset != -1);
9780 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9781 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9782 ins->sreg1 = load->dreg;
9783 MONO_ADD_INS (bblock, ins);
9785 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
9786 MONO_ADD_INS (bblock, ins);
9789 link_bblock (cfg, bblock, end_bblock);
9790 start_new_bblock = 1;
9798 CHECK_STACK_OVF (1);
9800 token = read32 (ip + 2);
9801 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic) {
9802 MonoType *type = mono_type_create_from_typespec (image, token);
9803 token = mono_type_size (type, &ialign);
9805 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9806 CHECK_TYPELOAD (klass);
9807 mono_class_init (klass);
9808 token = mono_class_value_size (klass, &align);
9810 EMIT_NEW_ICONST (cfg, ins, token);
9815 case CEE_REFANYTYPE: {
9816 MonoInst *src_var, *src;
9822 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9824 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9825 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9826 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9844 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
9854 g_warning ("opcode 0x%02x not handled", *ip);
9858 if (start_new_bblock != 1)
9861 bblock->cil_length = ip - bblock->cil_code;
9862 bblock->next_bb = end_bblock;
9864 if (cfg->method == method && cfg->domainvar) {
9866 MonoInst *get_domain;
9868 cfg->cbb = init_localsbb;
9870 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9871 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9874 get_domain->dreg = alloc_preg (cfg);
9875 MONO_ADD_INS (cfg->cbb, get_domain);
9877 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9878 MONO_ADD_INS (cfg->cbb, store);
9881 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
9882 if (cfg->compile_aot)
9883 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
9884 mono_get_got_var (cfg);
9887 if (cfg->method == method && cfg->got_var)
9888 mono_emit_load_got_addr (cfg);
9893 cfg->cbb = init_localsbb;
9895 for (i = 0; i < header->num_locals; ++i) {
9896 MonoType *ptype = header->locals [i];
9897 int t = ptype->type;
9898 dreg = cfg->locals [i]->dreg;
9900 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
9901 t = mono_class_enum_basetype (ptype->data.klass)->type;
9903 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9904 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
9905 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
9906 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
9907 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
9908 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
9909 MONO_INST_NEW (cfg, ins, OP_R8CONST);
9910 ins->type = STACK_R8;
9911 ins->inst_p0 = (void*)&r8_0;
9912 ins->dreg = alloc_dreg (cfg, STACK_R8);
9913 MONO_ADD_INS (init_localsbb, ins);
9914 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
9915 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
9916 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
9917 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
9919 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9924 if (cfg->init_ref_vars && cfg->method == method) {
9925 /* Emit initialization for ref vars */
9926 // FIXME: Avoid duplication initialization for IL locals.
9927 for (i = 0; i < cfg->num_varinfo; ++i) {
9928 MonoInst *ins = cfg->varinfo [i];
9930 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
9931 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
9935 /* Add a sequence point for method entry/exit events */
9937 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
9938 MONO_ADD_INS (init_localsbb, ins);
9939 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
9940 MONO_ADD_INS (cfg->bb_exit, ins);
9945 if (cfg->method == method) {
9947 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9948 bb->region = mono_find_block_region (cfg, bb->real_offset);
9950 mono_create_spvar_for_region (cfg, bb->region);
9951 if (cfg->verbose_level > 2)
9952 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
9956 g_slist_free (class_inits);
9957 dont_inline = g_list_remove (dont_inline, method);
9959 if (inline_costs < 0) {
9962 /* Method is too large */
9963 mname = mono_method_full_name (method, TRUE);
9964 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
9965 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
9967 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
9968 mono_basic_block_free (original_bb);
9972 if ((cfg->verbose_level > 2) && (cfg->method == method))
9973 mono_print_code (cfg, "AFTER METHOD-TO-IR");
9975 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
9976 mono_basic_block_free (original_bb);
9977 return inline_costs;
9980 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
9987 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
9991 set_exception_type_from_invalid_il (cfg, method, ip);
9995 g_slist_free (class_inits);
9996 mono_basic_block_free (original_bb);
9997 dont_inline = g_list_remove (dont_inline, method);
9998 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10003 store_membase_reg_to_store_membase_imm (int opcode)
10006 case OP_STORE_MEMBASE_REG:
10007 return OP_STORE_MEMBASE_IMM;
10008 case OP_STOREI1_MEMBASE_REG:
10009 return OP_STOREI1_MEMBASE_IMM;
10010 case OP_STOREI2_MEMBASE_REG:
10011 return OP_STOREI2_MEMBASE_IMM;
10012 case OP_STOREI4_MEMBASE_REG:
10013 return OP_STOREI4_MEMBASE_IMM;
10014 case OP_STOREI8_MEMBASE_REG:
10015 return OP_STOREI8_MEMBASE_IMM;
10017 g_assert_not_reached ();
10023 #endif /* DISABLE_JIT */
10026 mono_op_to_op_imm (int opcode)
10030 return OP_IADD_IMM;
10032 return OP_ISUB_IMM;
10034 return OP_IDIV_IMM;
10036 return OP_IDIV_UN_IMM;
10038 return OP_IREM_IMM;
10040 return OP_IREM_UN_IMM;
10042 return OP_IMUL_IMM;
10044 return OP_IAND_IMM;
10048 return OP_IXOR_IMM;
10050 return OP_ISHL_IMM;
10052 return OP_ISHR_IMM;
10054 return OP_ISHR_UN_IMM;
10057 return OP_LADD_IMM;
10059 return OP_LSUB_IMM;
10061 return OP_LAND_IMM;
10065 return OP_LXOR_IMM;
10067 return OP_LSHL_IMM;
10069 return OP_LSHR_IMM;
10071 return OP_LSHR_UN_IMM;
10074 return OP_COMPARE_IMM;
10076 return OP_ICOMPARE_IMM;
10078 return OP_LCOMPARE_IMM;
10080 case OP_STORE_MEMBASE_REG:
10081 return OP_STORE_MEMBASE_IMM;
10082 case OP_STOREI1_MEMBASE_REG:
10083 return OP_STOREI1_MEMBASE_IMM;
10084 case OP_STOREI2_MEMBASE_REG:
10085 return OP_STOREI2_MEMBASE_IMM;
10086 case OP_STOREI4_MEMBASE_REG:
10087 return OP_STOREI4_MEMBASE_IMM;
10089 #if defined(TARGET_X86) || defined (TARGET_AMD64)
10091 return OP_X86_PUSH_IMM;
10092 case OP_X86_COMPARE_MEMBASE_REG:
10093 return OP_X86_COMPARE_MEMBASE_IMM;
10095 #if defined(TARGET_AMD64)
10096 case OP_AMD64_ICOMPARE_MEMBASE_REG:
10097 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10099 case OP_VOIDCALL_REG:
10100 return OP_VOIDCALL;
10108 return OP_LOCALLOC_IMM;
10115 ldind_to_load_membase (int opcode)
10119 return OP_LOADI1_MEMBASE;
10121 return OP_LOADU1_MEMBASE;
10123 return OP_LOADI2_MEMBASE;
10125 return OP_LOADU2_MEMBASE;
10127 return OP_LOADI4_MEMBASE;
10129 return OP_LOADU4_MEMBASE;
10131 return OP_LOAD_MEMBASE;
10132 case CEE_LDIND_REF:
10133 return OP_LOAD_MEMBASE;
10135 return OP_LOADI8_MEMBASE;
10137 return OP_LOADR4_MEMBASE;
10139 return OP_LOADR8_MEMBASE;
10141 g_assert_not_reached ();
10148 stind_to_store_membase (int opcode)
10152 return OP_STOREI1_MEMBASE_REG;
10154 return OP_STOREI2_MEMBASE_REG;
10156 return OP_STOREI4_MEMBASE_REG;
10158 case CEE_STIND_REF:
10159 return OP_STORE_MEMBASE_REG;
10161 return OP_STOREI8_MEMBASE_REG;
10163 return OP_STORER4_MEMBASE_REG;
10165 return OP_STORER8_MEMBASE_REG;
10167 g_assert_not_reached ();
10174 mono_load_membase_to_load_mem (int opcode)
10176 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
10177 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10179 case OP_LOAD_MEMBASE:
10180 return OP_LOAD_MEM;
10181 case OP_LOADU1_MEMBASE:
10182 return OP_LOADU1_MEM;
10183 case OP_LOADU2_MEMBASE:
10184 return OP_LOADU2_MEM;
10185 case OP_LOADI4_MEMBASE:
10186 return OP_LOADI4_MEM;
10187 case OP_LOADU4_MEMBASE:
10188 return OP_LOADU4_MEM;
10189 #if SIZEOF_REGISTER == 8
10190 case OP_LOADI8_MEMBASE:
10191 return OP_LOADI8_MEM;
10200 op_to_op_dest_membase (int store_opcode, int opcode)
10202 #if defined(TARGET_X86)
10203 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
10208 return OP_X86_ADD_MEMBASE_REG;
10210 return OP_X86_SUB_MEMBASE_REG;
10212 return OP_X86_AND_MEMBASE_REG;
10214 return OP_X86_OR_MEMBASE_REG;
10216 return OP_X86_XOR_MEMBASE_REG;
10219 return OP_X86_ADD_MEMBASE_IMM;
10222 return OP_X86_SUB_MEMBASE_IMM;
10225 return OP_X86_AND_MEMBASE_IMM;
10228 return OP_X86_OR_MEMBASE_IMM;
10231 return OP_X86_XOR_MEMBASE_IMM;
10237 #if defined(TARGET_AMD64)
10238 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
10243 return OP_X86_ADD_MEMBASE_REG;
10245 return OP_X86_SUB_MEMBASE_REG;
10247 return OP_X86_AND_MEMBASE_REG;
10249 return OP_X86_OR_MEMBASE_REG;
10251 return OP_X86_XOR_MEMBASE_REG;
10253 return OP_X86_ADD_MEMBASE_IMM;
10255 return OP_X86_SUB_MEMBASE_IMM;
10257 return OP_X86_AND_MEMBASE_IMM;
10259 return OP_X86_OR_MEMBASE_IMM;
10261 return OP_X86_XOR_MEMBASE_IMM;
10263 return OP_AMD64_ADD_MEMBASE_REG;
10265 return OP_AMD64_SUB_MEMBASE_REG;
10267 return OP_AMD64_AND_MEMBASE_REG;
10269 return OP_AMD64_OR_MEMBASE_REG;
10271 return OP_AMD64_XOR_MEMBASE_REG;
10274 return OP_AMD64_ADD_MEMBASE_IMM;
10277 return OP_AMD64_SUB_MEMBASE_IMM;
10280 return OP_AMD64_AND_MEMBASE_IMM;
10283 return OP_AMD64_OR_MEMBASE_IMM;
10286 return OP_AMD64_XOR_MEMBASE_IMM;
10296 op_to_op_store_membase (int store_opcode, int opcode)
10298 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10301 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10302 return OP_X86_SETEQ_MEMBASE;
10304 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10305 return OP_X86_SETNE_MEMBASE;
10313 op_to_op_src1_membase (int load_opcode, int opcode)
10316 /* FIXME: This has sign extension issues */
10318 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10319 return OP_X86_COMPARE_MEMBASE8_IMM;
10322 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10327 return OP_X86_PUSH_MEMBASE;
10328 case OP_COMPARE_IMM:
10329 case OP_ICOMPARE_IMM:
10330 return OP_X86_COMPARE_MEMBASE_IMM;
10333 return OP_X86_COMPARE_MEMBASE_REG;
10337 #ifdef TARGET_AMD64
10338 /* FIXME: This has sign extension issues */
10340 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10341 return OP_X86_COMPARE_MEMBASE8_IMM;
10346 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10347 return OP_X86_PUSH_MEMBASE;
10349 /* FIXME: This only works for 32 bit immediates
10350 case OP_COMPARE_IMM:
10351 case OP_LCOMPARE_IMM:
10352 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10353 return OP_AMD64_COMPARE_MEMBASE_IMM;
10355 case OP_ICOMPARE_IMM:
10356 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10357 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10361 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10362 return OP_AMD64_COMPARE_MEMBASE_REG;
10365 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10366 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10375 op_to_op_src2_membase (int load_opcode, int opcode)
10378 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10384 return OP_X86_COMPARE_REG_MEMBASE;
10386 return OP_X86_ADD_REG_MEMBASE;
10388 return OP_X86_SUB_REG_MEMBASE;
10390 return OP_X86_AND_REG_MEMBASE;
10392 return OP_X86_OR_REG_MEMBASE;
10394 return OP_X86_XOR_REG_MEMBASE;
10398 #ifdef TARGET_AMD64
10401 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10402 return OP_AMD64_ICOMPARE_REG_MEMBASE;
10406 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10407 return OP_AMD64_COMPARE_REG_MEMBASE;
10410 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10411 return OP_X86_ADD_REG_MEMBASE;
10413 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10414 return OP_X86_SUB_REG_MEMBASE;
10416 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10417 return OP_X86_AND_REG_MEMBASE;
10419 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10420 return OP_X86_OR_REG_MEMBASE;
10422 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10423 return OP_X86_XOR_REG_MEMBASE;
10425 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10426 return OP_AMD64_ADD_REG_MEMBASE;
10428 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10429 return OP_AMD64_SUB_REG_MEMBASE;
10431 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10432 return OP_AMD64_AND_REG_MEMBASE;
10434 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10435 return OP_AMD64_OR_REG_MEMBASE;
10437 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10438 return OP_AMD64_XOR_REG_MEMBASE;
10446 mono_op_to_op_imm_noemul (int opcode)
10449 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10455 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10463 return mono_op_to_op_imm (opcode);
10467 #ifndef DISABLE_JIT
10470 * mono_handle_global_vregs:
10472 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10476 mono_handle_global_vregs (MonoCompile *cfg)
10478 gint32 *vreg_to_bb;
10479 MonoBasicBlock *bb;
10482 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
10484 #ifdef MONO_ARCH_SIMD_INTRINSICS
10485 if (cfg->uses_simd_intrinsics)
10486 mono_simd_simplify_indirection (cfg);
10489 /* Find local vregs used in more than one bb */
10490 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10491 MonoInst *ins = bb->code;
10492 int block_num = bb->block_num;
10494 if (cfg->verbose_level > 2)
10495 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
10498 for (; ins; ins = ins->next) {
10499 const char *spec = INS_INFO (ins->opcode);
10500 int regtype = 0, regindex;
10503 if (G_UNLIKELY (cfg->verbose_level > 2))
10504 mono_print_ins (ins);
10506 g_assert (ins->opcode >= MONO_CEE_LAST);
10508 for (regindex = 0; regindex < 4; regindex ++) {
10511 if (regindex == 0) {
10512 regtype = spec [MONO_INST_DEST];
10513 if (regtype == ' ')
10516 } else if (regindex == 1) {
10517 regtype = spec [MONO_INST_SRC1];
10518 if (regtype == ' ')
10521 } else if (regindex == 2) {
10522 regtype = spec [MONO_INST_SRC2];
10523 if (regtype == ' ')
10526 } else if (regindex == 3) {
10527 regtype = spec [MONO_INST_SRC3];
10528 if (regtype == ' ')
10533 #if SIZEOF_REGISTER == 4
10534 /* In the LLVM case, the long opcodes are not decomposed */
10535 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
10537 * Since some instructions reference the original long vreg,
10538 * and some reference the two component vregs, it is quite hard
10539 * to determine when it needs to be global. So be conservative.
10541 if (!get_vreg_to_inst (cfg, vreg)) {
10542 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10544 if (cfg->verbose_level > 2)
10545 printf ("LONG VREG R%d made global.\n", vreg);
10549 * Make the component vregs volatile since the optimizations can
10550 * get confused otherwise.
10552 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
10553 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
10557 g_assert (vreg != -1);
10559 prev_bb = vreg_to_bb [vreg];
10560 if (prev_bb == 0) {
10561 /* 0 is a valid block num */
10562 vreg_to_bb [vreg] = block_num + 1;
10563 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
10564 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
10567 if (!get_vreg_to_inst (cfg, vreg)) {
10568 if (G_UNLIKELY (cfg->verbose_level > 2))
10569 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
10573 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
10576 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10579 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
10582 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
10585 g_assert_not_reached ();
10589 /* Flag as having been used in more than one bb */
10590 vreg_to_bb [vreg] = -1;
10596 /* If a variable is used in only one bblock, convert it into a local vreg */
10597 for (i = 0; i < cfg->num_varinfo; i++) {
10598 MonoInst *var = cfg->varinfo [i];
10599 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
10601 switch (var->type) {
10607 #if SIZEOF_REGISTER == 8
10610 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
10611 /* Enabling this screws up the fp stack on x86 */
10614 /* Arguments are implicitly global */
10615 /* Putting R4 vars into registers doesn't work currently */
10616 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
10618 * Make that the variable's liveness interval doesn't contain a call, since
10619 * that would cause the lvreg to be spilled, making the whole optimization
10622 /* This is too slow for JIT compilation */
10624 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
10626 int def_index, call_index, ins_index;
10627 gboolean spilled = FALSE;
10632 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
10633 const char *spec = INS_INFO (ins->opcode);
10635 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
10636 def_index = ins_index;
10638 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
10639 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
10640 if (call_index > def_index) {
10646 if (MONO_IS_CALL (ins))
10647 call_index = ins_index;
10657 if (G_UNLIKELY (cfg->verbose_level > 2))
10658 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10659 var->flags |= MONO_INST_IS_DEAD;
10660 cfg->vreg_to_inst [var->dreg] = NULL;
10667 * Compress the varinfo and vars tables so the liveness computation is faster and
10668 * takes up less space.
10671 for (i = 0; i < cfg->num_varinfo; ++i) {
10672 MonoInst *var = cfg->varinfo [i];
10673 if (pos < i && cfg->locals_start == i)
10674 cfg->locals_start = pos;
10675 if (!(var->flags & MONO_INST_IS_DEAD)) {
10677 cfg->varinfo [pos] = cfg->varinfo [i];
10678 cfg->varinfo [pos]->inst_c0 = pos;
10679 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10680 cfg->vars [pos].idx = pos;
10681 #if SIZEOF_REGISTER == 4
10682 if (cfg->varinfo [pos]->type == STACK_I8) {
10683 /* Modify the two component vars too */
10686 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10687 var1->inst_c0 = pos;
10688 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10689 var1->inst_c0 = pos;
10696 cfg->num_varinfo = pos;
10697 if (cfg->locals_start > cfg->num_varinfo)
10698 cfg->locals_start = cfg->num_varinfo;
10702 * mono_spill_global_vars:
10704 * Generate spill code for variables which are not allocated to registers,
10705 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10706 * code is generated which could be optimized by the local optimization passes.
10709 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10711 MonoBasicBlock *bb;
10713 int orig_next_vreg;
10714 guint32 *vreg_to_lvreg;
10716 guint32 i, lvregs_len;
10717 gboolean dest_has_lvreg = FALSE;
10718 guint32 stacktypes [128];
10719 MonoInst **live_range_start, **live_range_end;
10720 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
10722 *need_local_opts = FALSE;
10724 memset (spec2, 0, sizeof (spec2));
10726 /* FIXME: Move this function to mini.c */
10727 stacktypes ['i'] = STACK_PTR;
10728 stacktypes ['l'] = STACK_I8;
10729 stacktypes ['f'] = STACK_R8;
10730 #ifdef MONO_ARCH_SIMD_INTRINSICS
10731 stacktypes ['x'] = STACK_VTYPE;
10734 #if SIZEOF_REGISTER == 4
10735 /* Create MonoInsts for longs */
10736 for (i = 0; i < cfg->num_varinfo; i++) {
10737 MonoInst *ins = cfg->varinfo [i];
10739 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10740 switch (ins->type) {
10745 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
10748 g_assert (ins->opcode == OP_REGOFFSET);
10750 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10752 tree->opcode = OP_REGOFFSET;
10753 tree->inst_basereg = ins->inst_basereg;
10754 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10756 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10758 tree->opcode = OP_REGOFFSET;
10759 tree->inst_basereg = ins->inst_basereg;
10760 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10770 /* FIXME: widening and truncation */
10773 * As an optimization, when a variable allocated to the stack is first loaded into
10774 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10775 * the variable again.
10777 orig_next_vreg = cfg->next_vreg;
10778 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10779 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10783 * These arrays contain the first and last instructions accessing a given
10785 * Since we emit bblocks in the same order we process them here, and we
10786 * don't split live ranges, these will precisely describe the live range of
10787 * the variable, i.e. the instruction range where a valid value can be found
10788 * in the variables location.
10789 * The live range is computed using the liveness info computed by the liveness pass.
10790 * We can't use vmv->range, since that is an abstract live range, and we need
10791 * one which is instruction precise.
10792 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
10794 /* FIXME: Only do this if debugging info is requested */
10795 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
10796 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
10797 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10798 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10800 /* Add spill loads/stores */
10801 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10804 if (cfg->verbose_level > 2)
10805 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10807 /* Clear vreg_to_lvreg array */
10808 for (i = 0; i < lvregs_len; i++)
10809 vreg_to_lvreg [lvregs [i]] = 0;
10813 MONO_BB_FOR_EACH_INS (bb, ins) {
10814 const char *spec = INS_INFO (ins->opcode);
10815 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
10816 gboolean store, no_lvreg;
10817 int sregs [MONO_MAX_SRC_REGS];
10819 if (G_UNLIKELY (cfg->verbose_level > 2))
10820 mono_print_ins (ins);
10822 if (ins->opcode == OP_NOP)
10826 * We handle LDADDR here as well, since it can only be decomposed
10827 * when variable addresses are known.
10829 if (ins->opcode == OP_LDADDR) {
10830 MonoInst *var = ins->inst_p0;
10832 if (var->opcode == OP_VTARG_ADDR) {
10833 /* Happens on SPARC/S390 where vtypes are passed by reference */
10834 MonoInst *vtaddr = var->inst_left;
10835 if (vtaddr->opcode == OP_REGVAR) {
10836 ins->opcode = OP_MOVE;
10837 ins->sreg1 = vtaddr->dreg;
10839 else if (var->inst_left->opcode == OP_REGOFFSET) {
10840 ins->opcode = OP_LOAD_MEMBASE;
10841 ins->inst_basereg = vtaddr->inst_basereg;
10842 ins->inst_offset = vtaddr->inst_offset;
10846 g_assert (var->opcode == OP_REGOFFSET);
10848 ins->opcode = OP_ADD_IMM;
10849 ins->sreg1 = var->inst_basereg;
10850 ins->inst_imm = var->inst_offset;
10853 *need_local_opts = TRUE;
10854 spec = INS_INFO (ins->opcode);
10857 if (ins->opcode < MONO_CEE_LAST) {
10858 mono_print_ins (ins);
10859 g_assert_not_reached ();
10863 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10867 if (MONO_IS_STORE_MEMBASE (ins)) {
10868 tmp_reg = ins->dreg;
10869 ins->dreg = ins->sreg2;
10870 ins->sreg2 = tmp_reg;
10873 spec2 [MONO_INST_DEST] = ' ';
10874 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10875 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10876 spec2 [MONO_INST_SRC3] = ' ';
10878 } else if (MONO_IS_STORE_MEMINDEX (ins))
10879 g_assert_not_reached ();
10884 if (G_UNLIKELY (cfg->verbose_level > 2)) {
10885 printf ("\t %.3s %d", spec, ins->dreg);
10886 num_sregs = mono_inst_get_src_registers (ins, sregs);
10887 for (srcindex = 0; srcindex < 3; ++srcindex)
10888 printf (" %d", sregs [srcindex]);
10895 regtype = spec [MONO_INST_DEST];
10896 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
10899 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
10900 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
10901 MonoInst *store_ins;
10903 MonoInst *def_ins = ins;
10904 int dreg = ins->dreg; /* The original vreg */
10906 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
10908 if (var->opcode == OP_REGVAR) {
10909 ins->dreg = var->dreg;
10910 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
10912 * Instead of emitting a load+store, use a _membase opcode.
10914 g_assert (var->opcode == OP_REGOFFSET);
10915 if (ins->opcode == OP_MOVE) {
10919 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
10920 ins->inst_basereg = var->inst_basereg;
10921 ins->inst_offset = var->inst_offset;
10924 spec = INS_INFO (ins->opcode);
10928 g_assert (var->opcode == OP_REGOFFSET);
10930 prev_dreg = ins->dreg;
10932 /* Invalidate any previous lvreg for this vreg */
10933 vreg_to_lvreg [ins->dreg] = 0;
10937 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
10939 store_opcode = OP_STOREI8_MEMBASE_REG;
10942 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
10944 if (regtype == 'l') {
10945 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
10946 mono_bblock_insert_after_ins (bb, ins, store_ins);
10947 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
10948 mono_bblock_insert_after_ins (bb, ins, store_ins);
10949 def_ins = store_ins;
10952 g_assert (store_opcode != OP_STOREV_MEMBASE);
10954 /* Try to fuse the store into the instruction itself */
10955 /* FIXME: Add more instructions */
10956 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
10957 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
10958 ins->inst_imm = ins->inst_c0;
10959 ins->inst_destbasereg = var->inst_basereg;
10960 ins->inst_offset = var->inst_offset;
10961 spec = INS_INFO (ins->opcode);
10962 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
10963 ins->opcode = store_opcode;
10964 ins->inst_destbasereg = var->inst_basereg;
10965 ins->inst_offset = var->inst_offset;
10969 tmp_reg = ins->dreg;
10970 ins->dreg = ins->sreg2;
10971 ins->sreg2 = tmp_reg;
10974 spec2 [MONO_INST_DEST] = ' ';
10975 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10976 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10977 spec2 [MONO_INST_SRC3] = ' ';
10979 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
10980 // FIXME: The backends expect the base reg to be in inst_basereg
10981 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
10983 ins->inst_basereg = var->inst_basereg;
10984 ins->inst_offset = var->inst_offset;
10985 spec = INS_INFO (ins->opcode);
10987 /* printf ("INS: "); mono_print_ins (ins); */
10988 /* Create a store instruction */
10989 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
10991 /* Insert it after the instruction */
10992 mono_bblock_insert_after_ins (bb, ins, store_ins);
10994 def_ins = store_ins;
10997 * We can't assign ins->dreg to var->dreg here, since the
10998 * sregs could use it. So set a flag, and do it after
11001 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
11002 dest_has_lvreg = TRUE;
11007 if (def_ins && !live_range_start [dreg]) {
11008 live_range_start [dreg] = def_ins;
11009 live_range_start_bb [dreg] = bb;
11016 num_sregs = mono_inst_get_src_registers (ins, sregs);
11017 for (srcindex = 0; srcindex < 3; ++srcindex) {
11018 regtype = spec [MONO_INST_SRC1 + srcindex];
11019 sreg = sregs [srcindex];
11021 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
11022 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
11023 MonoInst *var = get_vreg_to_inst (cfg, sreg);
11024 MonoInst *use_ins = ins;
11025 MonoInst *load_ins;
11026 guint32 load_opcode;
11028 if (var->opcode == OP_REGVAR) {
11029 sregs [srcindex] = var->dreg;
11030 //mono_inst_set_src_registers (ins, sregs);
11031 live_range_end [sreg] = use_ins;
11032 live_range_end_bb [sreg] = bb;
11036 g_assert (var->opcode == OP_REGOFFSET);
11038 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
11040 g_assert (load_opcode != OP_LOADV_MEMBASE);
11042 if (vreg_to_lvreg [sreg]) {
11043 g_assert (vreg_to_lvreg [sreg] != -1);
11045 /* The variable is already loaded to an lvreg */
11046 if (G_UNLIKELY (cfg->verbose_level > 2))
11047 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
11048 sregs [srcindex] = vreg_to_lvreg [sreg];
11049 //mono_inst_set_src_registers (ins, sregs);
11053 /* Try to fuse the load into the instruction */
11054 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
11055 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
11056 sregs [0] = var->inst_basereg;
11057 //mono_inst_set_src_registers (ins, sregs);
11058 ins->inst_offset = var->inst_offset;
11059 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
11060 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
11061 sregs [1] = var->inst_basereg;
11062 //mono_inst_set_src_registers (ins, sregs);
11063 ins->inst_offset = var->inst_offset;
11065 if (MONO_IS_REAL_MOVE (ins)) {
11066 ins->opcode = OP_NOP;
11069 //printf ("%d ", srcindex); mono_print_ins (ins);
11071 sreg = alloc_dreg (cfg, stacktypes [regtype]);
11073 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
11074 if (var->dreg == prev_dreg) {
11076 * sreg refers to the value loaded by the load
11077 * emitted below, but we need to use ins->dreg
11078 * since it refers to the store emitted earlier.
11082 g_assert (sreg != -1);
11083 vreg_to_lvreg [var->dreg] = sreg;
11084 g_assert (lvregs_len < 1024);
11085 lvregs [lvregs_len ++] = var->dreg;
11089 sregs [srcindex] = sreg;
11090 //mono_inst_set_src_registers (ins, sregs);
11092 if (regtype == 'l') {
11093 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
11094 mono_bblock_insert_before_ins (bb, ins, load_ins);
11095 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
11096 mono_bblock_insert_before_ins (bb, ins, load_ins);
11097 use_ins = load_ins;
11100 #if SIZEOF_REGISTER == 4
11101 g_assert (load_opcode != OP_LOADI8_MEMBASE);
11103 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
11104 mono_bblock_insert_before_ins (bb, ins, load_ins);
11105 use_ins = load_ins;
11109 if (var->dreg < orig_next_vreg) {
11110 live_range_end [var->dreg] = use_ins;
11111 live_range_end_bb [var->dreg] = bb;
11115 mono_inst_set_src_registers (ins, sregs);
11117 if (dest_has_lvreg) {
11118 g_assert (ins->dreg != -1);
11119 vreg_to_lvreg [prev_dreg] = ins->dreg;
11120 g_assert (lvregs_len < 1024);
11121 lvregs [lvregs_len ++] = prev_dreg;
11122 dest_has_lvreg = FALSE;
11126 tmp_reg = ins->dreg;
11127 ins->dreg = ins->sreg2;
11128 ins->sreg2 = tmp_reg;
11131 if (MONO_IS_CALL (ins)) {
11132 /* Clear vreg_to_lvreg array */
11133 for (i = 0; i < lvregs_len; i++)
11134 vreg_to_lvreg [lvregs [i]] = 0;
11136 } else if (ins->opcode == OP_NOP) {
11138 MONO_INST_NULLIFY_SREGS (ins);
11141 if (cfg->verbose_level > 2)
11142 mono_print_ins_index (1, ins);
11145 /* Extend the live range based on the liveness info */
11146 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
11147 for (i = 0; i < cfg->num_varinfo; i ++) {
11148 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
11150 if (vreg_is_volatile (cfg, vi->vreg))
11151 /* The liveness info is incomplete */
11154 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
11155 /* Live from at least the first ins of this bb */
11156 live_range_start [vi->vreg] = bb->code;
11157 live_range_start_bb [vi->vreg] = bb;
11160 if (mono_bitset_test_fast (bb->live_out_set, i)) {
11161 /* Live at least until the last ins of this bb */
11162 live_range_end [vi->vreg] = bb->last_ins;
11163 live_range_end_bb [vi->vreg] = bb;
11169 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
11171 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
11172 * by storing the current native offset into MonoMethodVar->live_range_start/end.
11174 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
11175 for (i = 0; i < cfg->num_varinfo; ++i) {
11176 int vreg = MONO_VARINFO (cfg, i)->vreg;
11179 if (live_range_start [vreg]) {
11180 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
11182 ins->inst_c1 = vreg;
11183 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
11185 if (live_range_end [vreg]) {
11186 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
11188 ins->inst_c1 = vreg;
11189 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
11190 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
11192 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
11198 g_free (live_range_start);
11199 g_free (live_range_end);
11200 g_free (live_range_start_bb);
11201 g_free (live_range_end_bb);
11206 * - use 'iadd' instead of 'int_add'
11207 * - handling ovf opcodes: decompose in method_to_ir.
11208 * - unify iregs/fregs
11209 * -> partly done, the missing parts are:
11210 * - a more complete unification would involve unifying the hregs as well, so
11211 * code wouldn't need if (fp) all over the place. but that would mean the hregs
11212 * would no longer map to the machine hregs, so the code generators would need to
11213 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
11214 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
11215 * fp/non-fp branches speeds it up by about 15%.
11216 * - use sext/zext opcodes instead of shifts
11218 * - get rid of TEMPLOADs if possible and use vregs instead
11219 * - clean up usage of OP_P/OP_ opcodes
11220 * - cleanup usage of DUMMY_USE
11221 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
11223 * - set the stack type and allocate a dreg in the EMIT_NEW macros
11224 * - get rid of all the <foo>2 stuff when the new JIT is ready.
11225 * - make sure handle_stack_args () is called before the branch is emitted
11226 * - when the new IR is done, get rid of all unused stuff
11227 * - COMPARE/BEQ as separate instructions or unify them ?
11228 * - keeping them separate allows specialized compare instructions like
11229 * compare_imm, compare_membase
11230 * - most back ends unify fp compare+branch, fp compare+ceq
11231 * - integrate mono_save_args into inline_method
11232 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
11233 * - handle long shift opts on 32 bit platforms somehow: they require
11234 * 3 sregs (2 for arg1 and 1 for arg2)
11235 * - make byref a 'normal' type.
11236 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
11237 * variable if needed.
11238 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
11239 * like inline_method.
11240 * - remove inlining restrictions
11241 * - fix LNEG and enable cfold of INEG
11242 * - generalize x86 optimizations like ldelema as a peephole optimization
11243 * - add store_mem_imm for amd64
11244 * - optimize the loading of the interruption flag in the managed->native wrappers
11245 * - avoid special handling of OP_NOP in passes
11246 * - move code inserting instructions into one function/macro.
11247 * - try a coalescing phase after liveness analysis
11248 * - add float -> vreg conversion + local optimizations on !x86
11249 * - figure out how to handle decomposed branches during optimizations, ie.
11250 * compare+branch, op_jump_table+op_br etc.
11251 * - promote RuntimeXHandles to vregs
11252 * - vtype cleanups:
11253 * - add a NEW_VARLOADA_VREG macro
11254 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11255 * accessing vtype fields.
11256 * - get rid of I8CONST on 64 bit platforms
11257 * - dealing with the increase in code size due to branches created during opcode
11259 * - use extended basic blocks
11260 * - all parts of the JIT
11261 * - handle_global_vregs () && local regalloc
11262 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
11263 * - sources of increase in code size:
11266 * - isinst and castclass
11267 * - lvregs not allocated to global registers even if used multiple times
11268 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
11270 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
11271 * - add all micro optimizations from the old JIT
11272 * - put tree optimizations into the deadce pass
11273 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
11274 * specific function.
11275 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
11276 * fcompare + branchCC.
11277 * - create a helper function for allocating a stack slot, taking into account
11278 * MONO_CFG_HAS_SPILLUP.
11280 * - merge the ia64 switch changes.
11281 * - optimize mono_regstate2_alloc_int/float.
11282 * - fix the pessimistic handling of variables accessed in exception handler blocks.
11283 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
11284 * parts of the tree could be separated by other instructions, killing the tree
11285 * arguments, or stores killing loads etc. Also, should we fold loads into other
11286 * instructions if the result of the load is used multiple times ?
11287 * - make the REM_IMM optimization in mini-x86.c arch-independent.
11288 * - LAST MERGE: 108395.
11289 * - when returning vtypes in registers, generate IR and append it to the end of the
11290 * last bb instead of doing it in the epilog.
11291 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
11299 - When to decompose opcodes:
11300 - earlier: this makes some optimizations hard to implement, since the low level IR
11301 no longer contains the neccessary information. But it is easier to do.
11302 - later: harder to implement, enables more optimizations.
11303 - Branches inside bblocks:
11304 - created when decomposing complex opcodes.
11305 - branches to another bblock: harmless, but not tracked by the branch
11306 optimizations, so need to branch to a label at the start of the bblock.
11307 - branches to inside the same bblock: very problematic, trips up the local
11308 reg allocator. Can be fixed by spitting the current bblock, but that is a
11309 complex operation, since some local vregs can become global vregs etc.
11310 - Local/global vregs:
11311 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11312 local register allocator.
11313 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11314 structure, created by mono_create_var (). Assigned to hregs or the stack by
11315 the global register allocator.
11316 - When to do optimizations like alu->alu_imm:
11317 - earlier -> saves work later on since the IR will be smaller/simpler
11318 - later -> can work on more instructions
11319 - Handling of valuetypes:
11320 - When a vtype is pushed on the stack, a new temporary is created, an
11321 instruction computing its address (LDADDR) is emitted and pushed on
11322 the stack. Need to optimize cases when the vtype is used immediately as in
11323 argument passing, stloc etc.
11324 - Instead of the to_end stuff in the old JIT, simply call the function handling
11325 the values on the stack before emitting the last instruction of the bb.
11328 #endif /* DISABLE_JIT */